aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-08-27 23:20:09 -0400
committerDave Airlie <airlied@redhat.com>2015-08-27 23:20:09 -0400
commit3439633a85891626abf124a52f2c3e3e83cca9d0 (patch)
treee44511a47f633d45e7bb456e0fea9fa7974febd7
parentd7b273685fedba5359a4ba0ae4f542e3ece28153 (diff)
parenta3c1ff87cfe27f99de58c153eb9d42dcfdbfa59b (diff)
Merge branch 'linux-4.3' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-next
Rather large pull request this time around, due to the long-pending cleanup of the kernel driver being here. There's a stupidly large number of commits for that, as I wanted to have the series be bisectable at a fairly fine-grained level. That said, a very large portion of the churn in the rework was automated, and a very large number of boards from right across the whole range we support have been tested. I'm fairly confident there shouldn't be (too many) issues from this. Beyond correcting some not-so-great design decisions and making the code a lot easier to work with, there's not much exciting (lower memory usage, GPU VM should be a lot faster, etc) to be gained by the end-user as a result of the cleanup, it mostly lays the groundwork for future improvements. A big thanks goes to Alexandre Courbot for testing/debugging the GK20A codepaths for me :) Highlights: - A heap of perfmon work, providing a more useful userspace interface and specifying counters for a bunch of boards - Support for GT200 reclocking + other misc pm improvements - Initial patches towards supporting GM20B (Tegra X1) - Maxwell DisplayPort fixes - Cleanup of the kernel driver - The usual collection of random fixes * 'linux-4.3' of git://anongit.freedesktop.org/git/nouveau/linux-2.6: (312 commits) drm/nouveau: bump driver version for release drm/nouveau/tegra: merge platform setup from nouveau drm drm/nouveau/pci: merge agp handling from nouveau drm drm/nouveau/device: remove pci/platform_device from common struct drm/nouveau/device: import pciid list and integrate quirks with it drm/nouveau/device: cleaner abstraction for device resource functions drm/nouveau/mc: move device irq handling to platform-specific code drm/nouveau/mc/gf100-: handle second interrupt tree drm/nouveau/mc: abstract interface to master intr registers drm/nouveau/pci: new subdev drm/nouveau/object: merge with handle drm/nouveau/core: remove the remainder of the previous style drm/nouveau/mpeg: convert to new-style nvkm_engine drm/nouveau/sw: convert to new-style nvkm_engine drm/nouveau/pm: convert to new-style nvkm_engine drm/nouveau/gr: convert to new-style nvkm_engine drm/nouveau/fifo: convert to new-style nvkm_engine drm/nouveau/disp: convert to new-style nvkm_engine drm/nouveau/dma: convert to new-style nvkm_engine drm/nouveau/cipher: convert to new-style nvkm_engine ...
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c45
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h199
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h73
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/notify.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h70
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/debug.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h274
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h81
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/handle.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h261
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/option.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/parent.h58
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/printk.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h139
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h75
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h160
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h118
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h63
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h50
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h70
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h139
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h151
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h54
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h37
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h78
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h106
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c221
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c195
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c145
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c227
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c197
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c200
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c188
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engctx.c239
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c154
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c379
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/handle.c221
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c395
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/namedb.c199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c400
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c200
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/option.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/parent.c159
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/printk.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c208
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c2923
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c204
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c131
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c427
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c478
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c1685
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c295
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c371
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c325
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c114
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c301
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h127
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c244
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c)26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c247
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c1310
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c536
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c265
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c105
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c)34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c)41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c186
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c1667
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h231
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c127
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c)59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c399
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c)28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c)96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c131
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c133
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c156
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c163
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c195
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c292
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c345
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c415
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c285
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c270
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c220
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c481
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c924
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c1037
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c293
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c323
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c638
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h170
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c208
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c335
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c533
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c327
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c143
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c135
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c196
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c1556
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c227
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c349
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c223
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c1213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c824
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c567
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c220
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c331
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c218
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c218
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c590
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c877
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c84
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c406
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c248
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c228
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c911
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c126
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c154
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c152
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c188
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c224
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c192
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c133
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c205
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c287
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c592
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c187
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c116
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c282
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c173
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c294
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c125
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c197
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c121
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c351
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c342
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c304
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c507
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c)50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c374
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c181
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c181
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c742
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c245
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c124
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c301
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c240
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c247
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c124
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c146
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c234
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c195
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c182
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c230
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4 (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c305
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c129
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c122
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c158
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c253
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h20
728 files changed, 43062 insertions, 37930 deletions
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2b765663c1a3..a34b437dbc8f 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -18,7 +18,6 @@ nouveau-y += $(nvkm-y)
18ifdef CONFIG_X86 18ifdef CONFIG_X86
19nouveau-$(CONFIG_ACPI) += nouveau_acpi.o 19nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
20endif 20endif
21nouveau-y += nouveau_agp.o
22nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o 21nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
23nouveau-y += nouveau_drm.o 22nouveau-y += nouveau_drm.o
24nouveau-y += nouveau_hwmon.o 23nouveau-y += nouveau_hwmon.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index c6361422a0b2..82bd4658aa58 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,7 +198,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
198 int *burst, int *lwm) 198 int *burst, int *lwm)
199{ 199{
200 struct nouveau_drm *drm = nouveau_drm(dev); 200 struct nouveau_drm *drm = nouveau_drm(dev);
201 struct nvif_device *device = &nouveau_drm(dev)->device; 201 struct nvif_object *device = &nouveau_drm(dev)->device.object;
202 struct nv_fifo_info fifo_data; 202 struct nv_fifo_info fifo_data;
203 struct nv_sim_state sim_data; 203 struct nv_sim_state sim_data;
204 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); 204 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index af7249ca0f4b..78cb033bc015 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -65,8 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
65 65
66static int sample_load_twice(struct drm_device *dev, bool sense[2]) 66static int sample_load_twice(struct drm_device *dev, bool sense[2])
67{ 67{
68 struct nvif_device *device = &nouveau_drm(dev)->device; 68 struct nouveau_drm *drm = nouveau_drm(dev);
69 struct nvkm_timer *ptimer = nvxx_timer(device); 69 struct nvif_object *device = &drm->device.object;
70 int i; 70 int i;
71 71
72 for (i = 0; i < 2; i++) { 72 for (i = 0; i < 2; i++) {
@@ -80,17 +80,22 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
80 * use a 10ms timeout (guards against crtc being inactive, in 80 * use a 10ms timeout (guards against crtc being inactive, in
81 * which case blank state would never change) 81 * which case blank state would never change)
82 */ 82 */
83 if (!nvkm_timer_wait_eq(ptimer, 10000000, 83 if (nvif_msec(&drm->device, 10,
84 NV_PRMCIO_INP0__COLOR, 84 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
85 0x00000001, 0x00000000)) 85 break;
86 ) < 0)
86 return -EBUSY; 87 return -EBUSY;
87 if (!nvkm_timer_wait_eq(ptimer, 10000000, 88
88 NV_PRMCIO_INP0__COLOR, 89 if (nvif_msec(&drm->device, 10,
89 0x00000001, 0x00000001)) 90 if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
91 break;
92 ) < 0)
90 return -EBUSY; 93 return -EBUSY;
91 if (!nvkm_timer_wait_eq(ptimer, 10000000, 94
92 NV_PRMCIO_INP0__COLOR, 95 if (nvif_msec(&drm->device, 10,
93 0x00000001, 0x00000000)) 96 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
97 break;
98 ) < 0)
94 return -EBUSY; 99 return -EBUSY;
95 100
96 udelay(100); 101 udelay(100);
@@ -128,7 +133,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
128 struct drm_connector *connector) 133 struct drm_connector *connector)
129{ 134{
130 struct drm_device *dev = encoder->dev; 135 struct drm_device *dev = encoder->dev;
131 struct nvif_device *device = &nouveau_drm(dev)->device; 136 struct nvif_object *device = &nouveau_drm(dev)->device.object;
132 struct nouveau_drm *drm = nouveau_drm(dev); 137 struct nouveau_drm *drm = nouveau_drm(dev);
133 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; 138 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
134 uint8_t saved_palette0[3], saved_palette_mask; 139 uint8_t saved_palette0[3], saved_palette_mask;
@@ -231,8 +236,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
231{ 236{
232 struct drm_device *dev = encoder->dev; 237 struct drm_device *dev = encoder->dev;
233 struct nouveau_drm *drm = nouveau_drm(dev); 238 struct nouveau_drm *drm = nouveau_drm(dev);
234 struct nvif_device *device = &nouveau_drm(dev)->device; 239 struct nvif_object *device = &nouveau_drm(dev)->device.object;
235 struct nvkm_gpio *gpio = nvxx_gpio(device); 240 struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
236 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; 241 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
237 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); 242 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
238 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, 243 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -265,10 +270,10 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
265 } 270 }
266 271
267 if (gpio) { 272 if (gpio) {
268 saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff); 273 saved_gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
269 saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff); 274 saved_gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
270 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV); 275 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
271 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV); 276 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
272 } 277 }
273 278
274 msleep(4); 279 msleep(4);
@@ -320,8 +325,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
320 nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); 325 nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
321 326
322 if (gpio) { 327 if (gpio) {
323 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1); 328 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
324 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0); 329 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
325 } 330 }
326 331
327 return sample; 332 return sample;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7cfb0cbc9b6e..522e91ab5360 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
281 struct drm_display_mode *adjusted_mode) 281 struct drm_display_mode *adjusted_mode)
282{ 282{
283 struct drm_device *dev = encoder->dev; 283 struct drm_device *dev = encoder->dev;
284 struct nvif_device *device = &nouveau_drm(dev)->device; 284 struct nvif_object *device = &nouveau_drm(dev)->device.object;
285 struct nouveau_drm *drm = nouveau_drm(dev); 285 struct nouveau_drm *drm = nouveau_drm(dev);
286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; 287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -493,11 +493,11 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
493 if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 || 493 if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
494 dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) { 494 dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
495 if (mode == DRM_MODE_DPMS_ON) { 495 if (mode == DRM_MODE_DPMS_ON) {
496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31); 496 nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); 497 nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
498 } else { 498 } else {
499 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0); 499 nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
500 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0); 500 nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
501 } 501 }
502 } 502 }
503#endif 503#endif
@@ -624,8 +624,8 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
624 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; 624 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
625 struct nouveau_drm *drm = nouveau_drm(dev); 625 struct nouveau_drm *drm = nouveau_drm(dev);
626 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 626 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
627 struct nvkm_i2c_port *port = i2c->find(i2c, 2); 627 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
628 struct nvkm_i2c_board_info info[] = { 628 struct nvkm_i2c_bus_probe info[] = {
629 { 629 {
630 { 630 {
631 .type = "sil164", 631 .type = "sil164",
@@ -639,16 +639,15 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
639 }; 639 };
640 int type; 640 int type;
641 641
642 if (!nv_gf4_disp_arch(dev) || !port || 642 if (!nv_gf4_disp_arch(dev) || !bus || get_tmds_slave(encoder))
643 get_tmds_slave(encoder))
644 return; 643 return;
645 644
646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL); 645 type = nvkm_i2c_bus_probe(bus, "TMDS transmitter", info, NULL, NULL);
647 if (type < 0) 646 if (type < 0)
648 return; 647 return;
649 648
650 drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 649 drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
651 &port->adapter, &info[type].dev); 650 &bus->i2c, &info[type].dev);
652} 651}
653 652
654static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { 653static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4131be5507ab..9e650081c357 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -47,7 +47,7 @@ nv04_display_create(struct drm_device *dev)
47 if (!disp) 47 if (!disp)
48 return -ENOMEM; 48 return -ENOMEM;
49 49
50 nvif_object_map(nvif_object(&drm->device)); 50 nvif_object_map(&drm->device.object);
51 51
52 nouveau_display(dev)->priv = disp; 52 nouveau_display(dev)->priv = disp;
53 nouveau_display(dev)->dtor = nv04_display_destroy; 53 nouveau_display(dev)->dtor = nv04_display_destroy;
@@ -101,7 +101,9 @@ nv04_display_create(struct drm_device *dev)
101 101
102 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 102 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
103 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 103 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
104 nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index); 104 struct nvkm_i2c_bus *bus =
105 nvkm_i2c_bus_find(i2c, nv_encoder->dcb->i2c_index);
106 nv_encoder->i2c = bus ? &bus->i2c : NULL;
105 } 107 }
106 108
107 /* Save previous state */ 109 /* Save previous state */
@@ -151,7 +153,7 @@ nv04_display_destroy(struct drm_device *dev)
151 nouveau_display(dev)->priv = NULL; 153 nouveau_display(dev)->priv = NULL;
152 kfree(disp); 154 kfree(disp);
153 155
154 nvif_object_unmap(nvif_object(&drm->device)); 156 nvif_object_unmap(&drm->device.object);
155} 157}
156 158
157int 159int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index c910c5d5c662..6c9a1e89810f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -172,7 +172,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
172 struct nouveau_drm *drm = nouveau_drm(dev); 172 struct nouveau_drm *drm = nouveau_drm(dev);
173 struct nvkm_bios *bios = nvxx_bios(&drm->device); 173 struct nvkm_bios *bios = nvxx_bios(&drm->device);
174 struct nvbios_init init = { 174 struct nvbios_init init = {
175 .subdev = nv_subdev(bios), 175 .subdev = &bios->subdev,
176 .bios = bios, 176 .bios = bios,
177 .offset = table, 177 .offset = table,
178 .outp = outp, 178 .outp = outp,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 42e07afc4c2b..956a833b8200 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -165,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
165 struct nvkm_pll_vals *pllvals) 165 struct nvkm_pll_vals *pllvals)
166{ 166{
167 struct nouveau_drm *drm = nouveau_drm(dev); 167 struct nouveau_drm *drm = nouveau_drm(dev);
168 struct nvif_device *device = &drm->device; 168 struct nvif_object *device = &drm->device.object;
169 struct nvkm_bios *bios = nvxx_bios(device); 169 struct nvkm_bios *bios = nvxx_bios(&drm->device);
170 uint32_t reg1, pll1, pll2 = 0; 170 uint32_t reg1, pll1, pll2 = 0;
171 struct nvbios_pll pll_lim; 171 struct nvbios_pll pll_lim;
172 int ret; 172 int ret;
@@ -660,8 +660,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
660 struct nv04_mode_state *state) 660 struct nv04_mode_state *state)
661{ 661{
662 struct nouveau_drm *drm = nouveau_drm(dev); 662 struct nouveau_drm *drm = nouveau_drm(dev);
663 struct nvif_device *device = &drm->device; 663 struct nvif_object *device = &drm->device.object;
664 struct nvkm_timer *ptimer = nvxx_timer(device);
665 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 664 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
666 uint32_t reg900; 665 uint32_t reg900;
667 int i; 666 int i;
@@ -678,10 +677,10 @@ nv_load_state_ext(struct drm_device *dev, int head,
678 nvif_wr32(device, NV_PVIDEO_INTR_EN, 0); 677 nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
679 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); 678 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
680 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); 679 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
681 nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1); 680 nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1);
682 nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1); 681 nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1);
683 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1); 682 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1);
684 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1); 683 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1);
685 nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); 684 nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
686 685
687 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); 686 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -741,8 +740,14 @@ nv_load_state_ext(struct drm_device *dev, int head,
741 if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) { 740 if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
742 /* Not waiting for vertical retrace before modifying 741 /* Not waiting for vertical retrace before modifying
743 CRE_53/CRE_54 causes lockups. */ 742 CRE_53/CRE_54 causes lockups. */
744 nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); 743 nvif_msec(&drm->device, 650,
745 nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); 744 if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
745 break;
746 );
747 nvif_msec(&drm->device, 650,
748 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
749 break;
750 );
746 } 751 }
747 752
748 wr_cio_state(dev, head, regp, NV_CIO_CRE_42); 753 wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
@@ -765,7 +770,7 @@ static void
765nv_save_state_palette(struct drm_device *dev, int head, 770nv_save_state_palette(struct drm_device *dev, int head,
766 struct nv04_mode_state *state) 771 struct nv04_mode_state *state)
767{ 772{
768 struct nvif_device *device = &nouveau_drm(dev)->device; 773 struct nvif_object *device = &nouveau_drm(dev)->device.object;
769 int head_offset = head * NV_PRMDIO_SIZE, i; 774 int head_offset = head * NV_PRMDIO_SIZE, i;
770 775
771 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, 776 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -784,7 +789,7 @@ void
784nouveau_hw_load_state_palette(struct drm_device *dev, int head, 789nouveau_hw_load_state_palette(struct drm_device *dev, int head,
785 struct nv04_mode_state *state) 790 struct nv04_mode_state *state)
786{ 791{
787 struct nvif_device *device = &nouveau_drm(dev)->device; 792 struct nvif_object *device = &nouveau_drm(dev)->device.object;
788 int head_offset = head * NV_PRMDIO_SIZE, i; 793 int head_offset = head * NV_PRMDIO_SIZE, i;
789 794
790 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, 795 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 6c796178bf0c..3bded60c5596 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,7 +60,7 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
60static inline uint32_t NVReadCRTC(struct drm_device *dev, 60static inline uint32_t NVReadCRTC(struct drm_device *dev,
61 int head, uint32_t reg) 61 int head, uint32_t reg)
62{ 62{
63 struct nvif_device *device = &nouveau_drm(dev)->device; 63 struct nvif_object *device = &nouveau_drm(dev)->device.object;
64 uint32_t val; 64 uint32_t val;
65 if (head) 65 if (head)
66 reg += NV_PCRTC0_SIZE; 66 reg += NV_PCRTC0_SIZE;
@@ -71,7 +71,7 @@ static inline uint32_t NVReadCRTC(struct drm_device *dev,
71static inline void NVWriteCRTC(struct drm_device *dev, 71static inline void NVWriteCRTC(struct drm_device *dev,
72 int head, uint32_t reg, uint32_t val) 72 int head, uint32_t reg, uint32_t val)
73{ 73{
74 struct nvif_device *device = &nouveau_drm(dev)->device; 74 struct nvif_object *device = &nouveau_drm(dev)->device.object;
75 if (head) 75 if (head)
76 reg += NV_PCRTC0_SIZE; 76 reg += NV_PCRTC0_SIZE;
77 nvif_wr32(device, reg, val); 77 nvif_wr32(device, reg, val);
@@ -80,7 +80,7 @@ static inline void NVWriteCRTC(struct drm_device *dev,
80static inline uint32_t NVReadRAMDAC(struct drm_device *dev, 80static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
81 int head, uint32_t reg) 81 int head, uint32_t reg)
82{ 82{
83 struct nvif_device *device = &nouveau_drm(dev)->device; 83 struct nvif_object *device = &nouveau_drm(dev)->device.object;
84 uint32_t val; 84 uint32_t val;
85 if (head) 85 if (head)
86 reg += NV_PRAMDAC0_SIZE; 86 reg += NV_PRAMDAC0_SIZE;
@@ -91,7 +91,7 @@ static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
91static inline void NVWriteRAMDAC(struct drm_device *dev, 91static inline void NVWriteRAMDAC(struct drm_device *dev,
92 int head, uint32_t reg, uint32_t val) 92 int head, uint32_t reg, uint32_t val)
93{ 93{
94 struct nvif_device *device = &nouveau_drm(dev)->device; 94 struct nvif_object *device = &nouveau_drm(dev)->device.object;
95 if (head) 95 if (head)
96 reg += NV_PRAMDAC0_SIZE; 96 reg += NV_PRAMDAC0_SIZE;
97 nvif_wr32(device, reg, val); 97 nvif_wr32(device, reg, val);
@@ -120,7 +120,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
120static inline void NVWriteVgaCrtc(struct drm_device *dev, 120static inline void NVWriteVgaCrtc(struct drm_device *dev,
121 int head, uint8_t index, uint8_t value) 121 int head, uint8_t index, uint8_t value)
122{ 122{
123 struct nvif_device *device = &nouveau_drm(dev)->device; 123 struct nvif_object *device = &nouveau_drm(dev)->device.object;
124 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 124 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
125 nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); 125 nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
126} 126}
@@ -128,7 +128,7 @@ static inline void NVWriteVgaCrtc(struct drm_device *dev,
128static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, 128static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
129 int head, uint8_t index) 129 int head, uint8_t index)
130{ 130{
131 struct nvif_device *device = &nouveau_drm(dev)->device; 131 struct nvif_object *device = &nouveau_drm(dev)->device.object;
132 uint8_t val; 132 uint8_t val;
133 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 133 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
134 val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); 134 val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
@@ -165,7 +165,7 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
165static inline uint8_t NVReadPRMVIO(struct drm_device *dev, 165static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
166 int head, uint32_t reg) 166 int head, uint32_t reg)
167{ 167{
168 struct nvif_device *device = &nouveau_drm(dev)->device; 168 struct nvif_object *device = &nouveau_drm(dev)->device.object;
169 struct nouveau_drm *drm = nouveau_drm(dev); 169 struct nouveau_drm *drm = nouveau_drm(dev);
170 uint8_t val; 170 uint8_t val;
171 171
@@ -181,7 +181,7 @@ static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
181static inline void NVWritePRMVIO(struct drm_device *dev, 181static inline void NVWritePRMVIO(struct drm_device *dev,
182 int head, uint32_t reg, uint8_t value) 182 int head, uint32_t reg, uint8_t value)
183{ 183{
184 struct nvif_device *device = &nouveau_drm(dev)->device; 184 struct nvif_object *device = &nouveau_drm(dev)->device.object;
185 struct nouveau_drm *drm = nouveau_drm(dev); 185 struct nouveau_drm *drm = nouveau_drm(dev);
186 186
187 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 187 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
@@ -194,14 +194,14 @@ static inline void NVWritePRMVIO(struct drm_device *dev,
194 194
195static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) 195static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
196{ 196{
197 struct nvif_device *device = &nouveau_drm(dev)->device; 197 struct nvif_object *device = &nouveau_drm(dev)->device.object;
198 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 198 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
199 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); 199 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
200} 200}
201 201
202static inline bool NVGetEnablePalette(struct drm_device *dev, int head) 202static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
203{ 203{
204 struct nvif_device *device = &nouveau_drm(dev)->device; 204 struct nvif_object *device = &nouveau_drm(dev)->device.object;
205 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 205 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
206 return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); 206 return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
207} 207}
@@ -209,7 +209,7 @@ static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
209static inline void NVWriteVgaAttr(struct drm_device *dev, 209static inline void NVWriteVgaAttr(struct drm_device *dev,
210 int head, uint8_t index, uint8_t value) 210 int head, uint8_t index, uint8_t value)
211{ 211{
212 struct nvif_device *device = &nouveau_drm(dev)->device; 212 struct nvif_object *device = &nouveau_drm(dev)->device.object;
213 if (NVGetEnablePalette(dev, head)) 213 if (NVGetEnablePalette(dev, head))
214 index &= ~0x20; 214 index &= ~0x20;
215 else 215 else
@@ -223,7 +223,7 @@ static inline void NVWriteVgaAttr(struct drm_device *dev,
223static inline uint8_t NVReadVgaAttr(struct drm_device *dev, 223static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
224 int head, uint8_t index) 224 int head, uint8_t index)
225{ 225{
226 struct nvif_device *device = &nouveau_drm(dev)->device; 226 struct nvif_object *device = &nouveau_drm(dev)->device.object;
227 uint8_t val; 227 uint8_t val;
228 if (NVGetEnablePalette(dev, head)) 228 if (NVGetEnablePalette(dev, head))
229 index &= ~0x20; 229 index &= ~0x20;
@@ -259,7 +259,7 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
259static inline bool 259static inline bool
260nv_heads_tied(struct drm_device *dev) 260nv_heads_tied(struct drm_device *dev)
261{ 261{
262 struct nvif_device *device = &nouveau_drm(dev)->device; 262 struct nvif_object *device = &nouveau_drm(dev)->device.object;
263 struct nouveau_drm *drm = nouveau_drm(dev); 263 struct nouveau_drm *drm = nouveau_drm(dev);
264 264
265 if (drm->device.info.chipset == 0x11) 265 if (drm->device.info.chipset == 0x11)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 5f6ea1873f51..aeebdd402478 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -96,7 +96,8 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
96 uint32_t src_x, uint32_t src_y, 96 uint32_t src_x, uint32_t src_y,
97 uint32_t src_w, uint32_t src_h) 97 uint32_t src_w, uint32_t src_h)
98{ 98{
99 struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 99 struct nouveau_drm *drm = nouveau_drm(plane->dev);
100 struct nvif_object *dev = &drm->device.object;
100 struct nouveau_plane *nv_plane = 101 struct nouveau_plane *nv_plane =
101 container_of(plane, struct nouveau_plane, base); 102 container_of(plane, struct nouveau_plane, base);
102 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 103 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -118,7 +119,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
118 if (format > 0xffff) 119 if (format > 0xffff)
119 return -ERANGE; 120 return -ERANGE;
120 121
121 if (dev->info.chipset >= 0x30) { 122 if (drm->device.info.chipset >= 0x30) {
122 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) 123 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
123 return -ERANGE; 124 return -ERANGE;
124 } else { 125 } else {
@@ -173,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
173static int 174static int
174nv10_disable_plane(struct drm_plane *plane) 175nv10_disable_plane(struct drm_plane *plane)
175{ 176{
176 struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 177 struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
177 struct nouveau_plane *nv_plane = 178 struct nouveau_plane *nv_plane =
178 container_of(plane, struct nouveau_plane, base); 179 container_of(plane, struct nouveau_plane, base);
179 180
@@ -197,7 +198,7 @@ nv_destroy_plane(struct drm_plane *plane)
197static void 198static void
198nv10_set_params(struct nouveau_plane *plane) 199nv10_set_params(struct nouveau_plane *plane)
199{ 200{
200 struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device; 201 struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object;
201 u32 luma = (plane->brightness - 512) << 16 | plane->contrast; 202 u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
202 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | 203 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
203 (cos_mul(plane->hue, plane->saturation) & 0xffff); 204 (cos_mul(plane->hue, plane->saturation) & 0xffff);
@@ -346,7 +347,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
346 uint32_t src_x, uint32_t src_y, 347 uint32_t src_x, uint32_t src_y,
347 uint32_t src_w, uint32_t src_h) 348 uint32_t src_w, uint32_t src_h)
348{ 349{
349 struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 350 struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
350 struct nouveau_plane *nv_plane = 351 struct nouveau_plane *nv_plane =
351 container_of(plane, struct nouveau_plane, base); 352 container_of(plane, struct nouveau_plane, base);
352 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 353 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -426,7 +427,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
426static int 427static int
427nv04_disable_plane(struct drm_plane *plane) 428nv04_disable_plane(struct drm_plane *plane)
428{ 429{
429 struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 430 struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
430 struct nouveau_plane *nv_plane = 431 struct nouveau_plane *nv_plane =
431 container_of(plane, struct nouveau_plane, base); 432 container_of(plane, struct nouveau_plane, base);
432 433
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 70e95cf6fd19..5345eb5378a8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,7 +35,7 @@
35 35
36#include <drm/i2c/ch7006.h> 36#include <drm/i2c/ch7006.h>
37 37
38static struct nvkm_i2c_board_info nv04_tv_encoder_info[] = { 38static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
39 { 39 {
40 { 40 {
41 I2C_BOARD_INFO("ch7006", 0x75), 41 I2C_BOARD_INFO("ch7006", 0x75),
@@ -55,9 +55,13 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
55{ 55{
56 struct nouveau_drm *drm = nouveau_drm(dev); 56 struct nouveau_drm *drm = nouveau_drm(dev);
57 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 57 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
58 58 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
59 return i2c->identify(i2c, i2c_index, "TV encoder", 59 if (bus) {
60 nv04_tv_encoder_info, NULL, NULL); 60 return nvkm_i2c_bus_probe(bus, "TV encoder",
61 nv04_tv_encoder_info,
62 NULL, NULL);
63 }
64 return -ENODEV;
61} 65}
62 66
63 67
@@ -205,7 +209,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
205 struct drm_device *dev = connector->dev; 209 struct drm_device *dev = connector->dev;
206 struct nouveau_drm *drm = nouveau_drm(dev); 210 struct nouveau_drm *drm = nouveau_drm(dev);
207 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 211 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
208 struct nvkm_i2c_port *port = i2c->find(i2c, entry->i2c_index); 212 struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
209 int type, ret; 213 int type, ret;
210 214
211 /* Ensure that we can talk to this encoder */ 215 /* Ensure that we can talk to this encoder */
@@ -231,7 +235,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
231 235
232 /* Run the slave-specific initialization */ 236 /* Run the slave-specific initialization */
233 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 237 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
234 &port->adapter, 238 &bus->i2c,
235 &nv04_tv_encoder_info[type].dev); 239 &nv04_tv_encoder_info[type].dev);
236 if (ret < 0) 240 if (ret < 0)
237 goto fail_cleanup; 241 goto fail_cleanup;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index d9720dda8385..b734195d80a0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -62,8 +62,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
62 head = (dacclk & 0x100) >> 8; 62 head = (dacclk & 0x100) >> 8;
63 63
64 /* Save the previous state. */ 64 /* Save the previous state. */
65 gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff); 65 gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
66 gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff); 66 gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
67 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); 67 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
68 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); 68 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
69 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); 69 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -74,8 +74,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
74 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); 74 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
75 75
76 /* Prepare the DAC for load detection. */ 76 /* Prepare the DAC for load detection. */
77 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true); 77 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
78 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true); 78 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
79 79
80 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); 80 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
81 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); 81 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -120,8 +120,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
120 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); 120 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
121 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); 121 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
122 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); 122 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
123 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1); 123 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
124 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0); 124 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
125 125
126 return sample; 126 return sample;
127} 127}
@@ -130,18 +130,10 @@ static bool
130get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 130get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
131{ 131{
132 struct nouveau_drm *drm = nouveau_drm(dev); 132 struct nouveau_drm *drm = nouveau_drm(dev);
133 struct nvif_device *device = &drm->device; 133 struct nvkm_device *device = nvxx_device(&drm->device);
134 134
135 /* Zotac FX5200 */ 135 if (device->quirk && device->quirk->tv_pin_mask) {
136 if (nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x1035) || 136 *pin_mask = device->quirk->tv_pin_mask;
137 nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x2035)) {
138 *pin_mask = 0xc;
139 return false;
140 }
141
142 /* MSI nForce2 IGP */
143 if (nv_device_match(nvxx_object(device), 0x01f0, 0x1462, 0x5710)) {
144 *pin_mask = 0xc;
145 return false; 137 return false;
146 } 138 }
147 139
@@ -395,8 +387,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
395 387
396 nv_load_ptv(dev, regs, 200); 388 nv_load_ptv(dev, regs, 200);
397 389
398 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON); 390 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
399 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON); 391 nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
400 392
401 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); 393 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
402} 394}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 225894cdcac2..459910b6bb32 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -131,13 +131,13 @@ static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
131 uint32_t val) 131 uint32_t val)
132{ 132{
133 struct nvif_device *device = &nouveau_drm(dev)->device; 133 struct nvif_device *device = &nouveau_drm(dev)->device;
134 nvif_wr32(device, reg, val); 134 nvif_wr32(&device->object, reg, val);
135} 135}
136 136
137static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) 137static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
138{ 138{
139 struct nvif_device *device = &nouveau_drm(dev)->device; 139 struct nvif_device *device = &nouveau_drm(dev)->device;
140 return nvif_rd32(device, reg); 140 return nvif_rd32(&device->object, reg);
141} 141}
142 142
143static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, 143static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 64f8b2f687d2..95a64d89547c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -45,6 +45,11 @@
45#define GM107_DISP 0x00009470 45#define GM107_DISP 0x00009470
46#define GM204_DISP 0x00009570 46#define GM204_DISP 0x00009570
47 47
48#define NV31_MPEG 0x00003174
49#define G82_MPEG 0x00008274
50
51#define NV74_VP2 0x00007476
52
48#define NV50_DISP_CURSOR 0x0000507a 53#define NV50_DISP_CURSOR 0x0000507a
49#define G82_DISP_CURSOR 0x0000827a 54#define G82_DISP_CURSOR 0x0000827a
50#define GT214_DISP_CURSOR 0x0000857a 55#define GT214_DISP_CURSOR 0x0000857a
@@ -94,15 +99,40 @@
94#define MAXWELL_A 0x0000b097 99#define MAXWELL_A 0x0000b097
95#define MAXWELL_B 0x0000b197 100#define MAXWELL_B 0x0000b197
96 101
102#define NV74_BSP 0x000074b0
103
104#define GT212_MSVLD 0x000085b1
105#define IGT21A_MSVLD 0x000086b1
106#define G98_MSVLD 0x000088b1
107#define GF100_MSVLD 0x000090b1
108#define GK104_MSVLD 0x000095b1
109
110#define GT212_MSPDEC 0x000085b2
111#define G98_MSPDEC 0x000088b2
112#define GF100_MSPDEC 0x000090b2
113#define GK104_MSPDEC 0x000095b2
114
115#define GT212_MSPPP 0x000085b3
116#define G98_MSPPP 0x000088b3
117#define GF100_MSPPP 0x000090b3
118
119#define G98_SEC 0x000088b4
120
121#define GT212_DMA 0x000085b5
122#define FERMI_DMA 0x000090b5
123#define KEPLER_DMA_COPY_A 0x0000a0b5
124#define MAXWELL_DMA_COPY_A 0x0000b0b5
125
126#define FERMI_DECOMPRESS 0x000090b8
127
97#define FERMI_COMPUTE_A 0x000090c0 128#define FERMI_COMPUTE_A 0x000090c0
98#define FERMI_COMPUTE_B 0x000091c0 129#define FERMI_COMPUTE_B 0x000091c0
99
100#define KEPLER_COMPUTE_A 0x0000a0c0 130#define KEPLER_COMPUTE_A 0x0000a0c0
101#define KEPLER_COMPUTE_B 0x0000a1c0 131#define KEPLER_COMPUTE_B 0x0000a1c0
102
103#define MAXWELL_COMPUTE_A 0x0000b0c0 132#define MAXWELL_COMPUTE_A 0x0000b0c0
104#define MAXWELL_COMPUTE_B 0x0000b1c0 133#define MAXWELL_COMPUTE_B 0x0000b1c0
105 134
135#define NV74_CIPHER 0x000074c1
106 136
107/******************************************************************************* 137/*******************************************************************************
108 * client 138 * client
@@ -126,32 +156,10 @@ struct nv_device_v0 {
126 __u8 version; 156 __u8 version;
127 __u8 pad01[7]; 157 __u8 pad01[7];
128 __u64 device; /* device identifier, ~0 for client default */ 158 __u64 device; /* device identifier, ~0 for client default */
129#define NV_DEVICE_V0_DISABLE_IDENTIFY 0x0000000000000001ULL
130#define NV_DEVICE_V0_DISABLE_MMIO 0x0000000000000002ULL
131#define NV_DEVICE_V0_DISABLE_VBIOS 0x0000000000000004ULL
132#define NV_DEVICE_V0_DISABLE_CORE 0x0000000000000008ULL
133#define NV_DEVICE_V0_DISABLE_DISP 0x0000000000010000ULL
134#define NV_DEVICE_V0_DISABLE_FIFO 0x0000000000020000ULL
135#define NV_DEVICE_V0_DISABLE_GR 0x0000000100000000ULL
136#define NV_DEVICE_V0_DISABLE_MPEG 0x0000000200000000ULL
137#define NV_DEVICE_V0_DISABLE_ME 0x0000000400000000ULL
138#define NV_DEVICE_V0_DISABLE_VP 0x0000000800000000ULL
139#define NV_DEVICE_V0_DISABLE_CIPHER 0x0000001000000000ULL
140#define NV_DEVICE_V0_DISABLE_BSP 0x0000002000000000ULL
141#define NV_DEVICE_V0_DISABLE_MSPPP 0x0000004000000000ULL
142#define NV_DEVICE_V0_DISABLE_CE0 0x0000008000000000ULL
143#define NV_DEVICE_V0_DISABLE_CE1 0x0000010000000000ULL
144#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
145#define NV_DEVICE_V0_DISABLE_MSENC 0x0000040000000000ULL
146#define NV_DEVICE_V0_DISABLE_CE2 0x0000080000000000ULL
147#define NV_DEVICE_V0_DISABLE_MSVLD 0x0000100000000000ULL
148#define NV_DEVICE_V0_DISABLE_SEC 0x0000200000000000ULL
149#define NV_DEVICE_V0_DISABLE_MSPDEC 0x0000400000000000ULL
150 __u64 disable; /* disable particular subsystems */
151 __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
152}; 159};
153 160
154#define NV_DEVICE_V0_INFO 0x00 161#define NV_DEVICE_V0_INFO 0x00
162#define NV_DEVICE_V0_TIME 0x01
155 163
156struct nv_device_info_v0 { 164struct nv_device_info_v0 {
157 __u8 version; 165 __u8 version;
@@ -176,6 +184,14 @@ struct nv_device_info_v0 {
176 __u8 pad06[2]; 184 __u8 pad06[2];
177 __u64 ram_size; 185 __u64 ram_size;
178 __u64 ram_user; 186 __u64 ram_user;
187 char chip[16];
188 char name[64];
189};
190
191struct nv_device_time_v0 {
192 __u8 version;
193 __u8 pad01[7];
194 __u64 time;
179}; 195};
180 196
181 197
@@ -235,13 +251,13 @@ struct gf100_dma_v0 {
235 __u8 pad03[5]; 251 __u8 pad03[5];
236}; 252};
237 253
238struct gf110_dma_v0 { 254struct gf119_dma_v0 {
239 __u8 version; 255 __u8 version;
240#define GF110_DMA_V0_PAGE_LP 0x00 256#define GF119_DMA_V0_PAGE_LP 0x00
241#define GF110_DMA_V0_PAGE_SP 0x01 257#define GF119_DMA_V0_PAGE_SP 0x01
242 __u8 page; 258 __u8 page;
243#define GF110_DMA_V0_KIND_PITCH 0x00 259#define GF119_DMA_V0_KIND_PITCH 0x00
244#define GF110_DMA_V0_KIND_VM 0xff 260#define GF119_DMA_V0_KIND_VM 0xff
245 __u8 kind; 261 __u8 kind;
246 __u8 pad03[5]; 262 __u8 pad03[5];
247}; 263};
@@ -251,33 +267,74 @@ struct gf110_dma_v0 {
251 * perfmon 267 * perfmon
252 ******************************************************************************/ 268 ******************************************************************************/
253 269
254struct nvif_perfctr_v0 { 270#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
271#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
272#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
273
274struct nvif_perfmon_query_domain_v0 {
255 __u8 version; 275 __u8 version;
256 __u8 pad01[1]; 276 __u8 id;
257 __u16 logic_op; 277 __u8 counter_nr;
258 __u8 pad04[4]; 278 __u8 iter;
259 char name[4][64]; 279 __u16 signal_nr;
280 __u8 pad05[2];
281 char name[64];
260}; 282};
261 283
262#define NVIF_PERFCTR_V0_QUERY 0x00 284struct nvif_perfmon_query_signal_v0 {
263#define NVIF_PERFCTR_V0_SAMPLE 0x01 285 __u8 version;
264#define NVIF_PERFCTR_V0_READ 0x02 286 __u8 domain;
287 __u16 iter;
288 __u8 signal;
289 __u8 source_nr;
290 __u8 pad05[2];
291 char name[64];
292};
265 293
266struct nvif_perfctr_query_v0 { 294struct nvif_perfmon_query_source_v0 {
267 __u8 version; 295 __u8 version;
268 __u8 pad01[3]; 296 __u8 domain;
269 __u32 iter; 297 __u8 signal;
298 __u8 iter;
299 __u8 pad04[4];
300 __u32 source;
301 __u32 mask;
270 char name[64]; 302 char name[64];
271}; 303};
272 304
273struct nvif_perfctr_sample { 305
306/*******************************************************************************
307 * perfdom
308 ******************************************************************************/
309
310struct nvif_perfdom_v0 {
311 __u8 version;
312 __u8 domain;
313 __u8 mode;
314 __u8 pad03[1];
315 struct {
316 __u8 signal[4];
317 __u64 source[4][8];
318 __u16 logic_op;
319 } ctr[4];
274}; 320};
275 321
276struct nvif_perfctr_read_v0 { 322#define NVIF_PERFDOM_V0_INIT 0x00
323#define NVIF_PERFDOM_V0_SAMPLE 0x01
324#define NVIF_PERFDOM_V0_READ 0x02
325
326struct nvif_perfdom_init {
327};
328
329struct nvif_perfdom_sample {
330};
331
332struct nvif_perfdom_read_v0 {
277 __u8 version; 333 __u8 version;
278 __u8 pad01[7]; 334 __u8 pad01[7];
279 __u32 ctr; 335 __u32 ctr[4];
280 __u32 clk; 336 __u32 clk;
337 __u8 pad04[4];
281}; 338};
282 339
283 340
@@ -337,7 +394,16 @@ struct nv03_channel_dma_v0 {
337 __u8 version; 394 __u8 version;
338 __u8 chid; 395 __u8 chid;
339 __u8 pad02[2]; 396 __u8 pad02[2];
340 __u32 pushbuf; 397 __u32 offset;
398 __u64 pushbuf;
399};
400
401struct nv50_channel_dma_v0 {
402 __u8 version;
403 __u8 chid;
404 __u8 pad02[6];
405 __u64 vm;
406 __u64 pushbuf;
341 __u64 offset; 407 __u64 offset;
342}; 408};
343 409
@@ -350,10 +416,20 @@ struct nv03_channel_dma_v0 {
350struct nv50_channel_gpfifo_v0 { 416struct nv50_channel_gpfifo_v0 {
351 __u8 version; 417 __u8 version;
352 __u8 chid; 418 __u8 chid;
353 __u8 pad01[6]; 419 __u8 pad02[2];
354 __u32 pushbuf;
355 __u32 ilength; 420 __u32 ilength;
356 __u64 ioffset; 421 __u64 ioffset;
422 __u64 pushbuf;
423 __u64 vm;
424};
425
426struct fermi_channel_gpfifo_v0 {
427 __u8 version;
428 __u8 chid;
429 __u8 pad02[2];
430 __u32 ilength;
431 __u64 ioffset;
432 __u64 vm;
357}; 433};
358 434
359struct kepler_channel_gpfifo_a_v0 { 435struct kepler_channel_gpfifo_a_v0 {
@@ -367,10 +443,9 @@ struct kepler_channel_gpfifo_a_v0 {
367#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40 443#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
368 __u8 engine; 444 __u8 engine;
369 __u16 chid; 445 __u16 chid;
370 __u8 pad04[4];
371 __u32 pushbuf;
372 __u32 ilength; 446 __u32 ilength;
373 __u64 ioffset; 447 __u64 ioffset;
448 __u64 vm;
374}; 449};
375 450
376/******************************************************************************* 451/*******************************************************************************
@@ -491,8 +566,8 @@ struct nv50_disp_pior_pwr_v0 {
491/* core */ 566/* core */
492struct nv50_disp_core_channel_dma_v0 { 567struct nv50_disp_core_channel_dma_v0 {
493 __u8 version; 568 __u8 version;
494 __u8 pad01[3]; 569 __u8 pad01[7];
495 __u32 pushbuf; 570 __u64 pushbuf;
496}; 571};
497 572
498#define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00 573#define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
@@ -509,9 +584,9 @@ struct nv50_disp_cursor_v0 {
509/* base */ 584/* base */
510struct nv50_disp_base_channel_dma_v0 { 585struct nv50_disp_base_channel_dma_v0 {
511 __u8 version; 586 __u8 version;
512 __u8 pad01[2];
513 __u8 head; 587 __u8 head;
514 __u32 pushbuf; 588 __u8 pad02[6];
589 __u64 pushbuf;
515}; 590};
516 591
517#define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00 592#define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
@@ -519,9 +594,9 @@ struct nv50_disp_base_channel_dma_v0 {
519/* overlay */ 594/* overlay */
520struct nv50_disp_overlay_channel_dma_v0 { 595struct nv50_disp_overlay_channel_dma_v0 {
521 __u8 version; 596 __u8 version;
522 __u8 pad01[2];
523 __u8 head; 597 __u8 head;
524 __u32 pushbuf; 598 __u8 pad02[6];
599 __u64 pushbuf;
525}; 600};
526 601
527#define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT 0x00 602#define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
@@ -536,6 +611,20 @@ struct nv50_disp_overlay_v0 {
536#define NV50_DISP_OVERLAY_V0_NTFY_UEVENT 0x00 611#define NV50_DISP_OVERLAY_V0_NTFY_UEVENT 0x00
537 612
538/******************************************************************************* 613/*******************************************************************************
614 * software
615 ******************************************************************************/
616
617#define NVSW_NTFY_UEVENT 0x00
618
619#define NV04_NVSW_GET_REF 0x00
620
621struct nv04_nvsw_get_ref_v0 {
622 __u8 version;
623 __u8 pad01[3];
624 __u32 ref;
625};
626
627/*******************************************************************************
539 * fermi 628 * fermi
540 ******************************************************************************/ 629 ******************************************************************************/
541 630
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index eca648ef0f7a..4a7f6f7b836d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -4,36 +4,25 @@
4#include <nvif/object.h> 4#include <nvif/object.h>
5 5
6struct nvif_client { 6struct nvif_client {
7 struct nvif_object base; 7 struct nvif_object object;
8 struct nvif_object *object; /*XXX: hack for nvif_object() */
9 const struct nvif_driver *driver; 8 const struct nvif_driver *driver;
9 u64 version;
10 u8 route;
10 bool super; 11 bool super;
11}; 12};
12 13
13static inline struct nvif_client * 14int nvif_client_init(const char *drv, const char *name, u64 device,
14nvif_client(struct nvif_object *object) 15 const char *cfg, const char *dbg,
15{
16 while (object && object->parent != object)
17 object = object->parent;
18 return (void *)object;
19}
20
21int nvif_client_init(void (*dtor)(struct nvif_client *), const char *,
22 const char *, u64, const char *, const char *,
23 struct nvif_client *); 16 struct nvif_client *);
24void nvif_client_fini(struct nvif_client *); 17void nvif_client_fini(struct nvif_client *);
25int nvif_client_new(const char *, const char *, u64, const char *,
26 const char *, struct nvif_client **);
27void nvif_client_ref(struct nvif_client *, struct nvif_client **);
28int nvif_client_ioctl(struct nvif_client *, void *, u32); 18int nvif_client_ioctl(struct nvif_client *, void *, u32);
29int nvif_client_suspend(struct nvif_client *); 19int nvif_client_suspend(struct nvif_client *);
30int nvif_client_resume(struct nvif_client *); 20int nvif_client_resume(struct nvif_client *);
31 21
32/*XXX*/ 22/*XXX*/
33#include <core/client.h> 23#include <core/client.h>
34#define nvxx_client(a) ({ \ 24#define nvxx_client(a) ({ \
35 struct nvif_client *_client = nvif_client(nvif_object(a)); \ 25 struct nvif_client *_client = (a); \
36 nvkm_client(_client->base.priv); \ 26 (struct nvkm_client *)_client->object.priv; \
37}) 27})
38
39#endif 28#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 88553a741ab7..700a9b206726 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -5,26 +5,35 @@
5#include <nvif/class.h> 5#include <nvif/class.h>
6 6
7struct nvif_device { 7struct nvif_device {
8 struct nvif_object base; 8 struct nvif_object object;
9 struct nvif_object *object; /*XXX: hack for nvif_object() */
10 struct nv_device_info_v0 info; 9 struct nv_device_info_v0 info;
11}; 10};
12 11
13static inline struct nvif_device * 12int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
14nvif_device(struct nvif_object *object)
15{
16 while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
17 object = object->parent;
18 return (void *)object;
19}
20
21int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
22 u32 handle, u32 oclass, void *, u32,
23 struct nvif_device *); 13 struct nvif_device *);
24void nvif_device_fini(struct nvif_device *); 14void nvif_device_fini(struct nvif_device *);
25int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass, 15u64 nvif_device_time(struct nvif_device *);
26 void *, u32, struct nvif_device **); 16
27void nvif_device_ref(struct nvif_device *, struct nvif_device **); 17/* Delay based on GPU time (ie. PTIMER).
18 *
19 * Will return -ETIMEDOUT unless the loop was terminated with 'break',
20 * where it will return the number of nanoseconds taken instead.
21 */
22#define nvif_nsec(d,n,cond...) ({ \
23 struct nvif_device *_device = (d); \
24 u64 _nsecs = (n), _time0 = nvif_device_time(_device); \
25 s64 _taken = 0; \
26 \
27 do { \
28 cond \
29 } while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\
30 \
31 if (_taken >= _nsecs) \
32 _taken = -ETIMEDOUT; \
33 _taken; \
34})
35#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
36#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
28 37
29/*XXX*/ 38/*XXX*/
30#include <subdev/bios.h> 39#include <subdev/bios.h>
@@ -36,26 +45,30 @@ void nvif_device_ref(struct nvif_device *, struct nvif_device **);
36#include <subdev/i2c.h> 45#include <subdev/i2c.h>
37#include <subdev/timer.h> 46#include <subdev/timer.h>
38#include <subdev/therm.h> 47#include <subdev/therm.h>
48#include <subdev/pci.h>
39 49
40#define nvxx_device(a) nv_device(nvxx_object((a))) 50#define nvxx_device(a) ({ \
41#define nvxx_bios(a) nvkm_bios(nvxx_device(a)) 51 struct nvif_device *_device = (a); \
42#define nvxx_fb(a) nvkm_fb(nvxx_device(a)) 52 struct { \
43#define nvxx_mmu(a) nvkm_mmu(nvxx_device(a)) 53 struct nvkm_object object; \
44#define nvxx_bar(a) nvkm_bar(nvxx_device(a)) 54 struct nvkm_device *device; \
45#define nvxx_gpio(a) nvkm_gpio(nvxx_device(a)) 55 } *_udevice = _device->object.priv; \
46#define nvxx_clk(a) nvkm_clk(nvxx_device(a)) 56 _udevice->device; \
47#define nvxx_i2c(a) nvkm_i2c(nvxx_device(a)) 57})
48#define nvxx_timer(a) nvkm_timer(nvxx_device(a)) 58#define nvxx_bios(a) nvxx_device(a)->bios
49#define nvxx_wait(a,b,c,d) nv_wait(nvxx_timer(a), (b), (c), (d)) 59#define nvxx_fb(a) nvxx_device(a)->fb
50#define nvxx_wait_cb(a,b,c) nv_wait_cb(nvxx_timer(a), (b), (c)) 60#define nvxx_mmu(a) nvxx_device(a)->mmu
51#define nvxx_therm(a) nvkm_therm(nvxx_device(a)) 61#define nvxx_bar(a) nvxx_device(a)->bar
62#define nvxx_gpio(a) nvxx_device(a)->gpio
63#define nvxx_clk(a) nvxx_device(a)->clk
64#define nvxx_i2c(a) nvxx_device(a)->i2c
65#define nvxx_therm(a) nvxx_device(a)->therm
52 66
53#include <core/device.h> 67#include <core/device.h>
54#include <engine/fifo.h> 68#include <engine/fifo.h>
55#include <engine/gr.h> 69#include <engine/gr.h>
56#include <engine/sw.h> 70#include <engine/sw.h>
57 71
58#define nvxx_fifo(a) nvkm_fifo(nvxx_device(a)) 72#define nvxx_fifo(a) nvxx_device(a)->fifo
59#define nvxx_fifo_chan(a) ((struct nvkm_fifo_chan *)nvxx_object(a)) 73#define nvxx_gr(a) nvxx_device(a)->gr
60#define nvxx_gr(a) ((struct nvkm_gr *)nvkm_engine(nvxx_object(a), NVDEV_ENGINE_GR))
61#endif 74#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4cd8e323b23d..b0ac0215ebf9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,11 +1,10 @@
1#ifndef __NVIF_IOCTL_H__ 1#ifndef __NVIF_IOCTL_H__
2#define __NVIF_IOCTL_H__ 2#define __NVIF_IOCTL_H__
3 3
4#define NVIF_VERSION_LATEST 0x0000000000000000ULL
5
4struct nvif_ioctl_v0 { 6struct nvif_ioctl_v0 {
5 __u8 version; 7 __u8 version;
6#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
7#define NVIF_IOCTL_V0_OWNER_ANY 0xff
8 __u8 owner;
9#define NVIF_IOCTL_V0_NOP 0x00 8#define NVIF_IOCTL_V0_NOP 0x00
10#define NVIF_IOCTL_V0_SCLASS 0x01 9#define NVIF_IOCTL_V0_SCLASS 0x01
11#define NVIF_IOCTL_V0_NEW 0x02 10#define NVIF_IOCTL_V0_NEW 0x02
@@ -20,17 +19,20 @@ struct nvif_ioctl_v0 {
20#define NVIF_IOCTL_V0_NTFY_GET 0x0b 19#define NVIF_IOCTL_V0_NTFY_GET 0x0b
21#define NVIF_IOCTL_V0_NTFY_PUT 0x0c 20#define NVIF_IOCTL_V0_NTFY_PUT 0x0c
22 __u8 type; 21 __u8 type;
23 __u8 path_nr; 22 __u8 pad02[4];
23#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
24#define NVIF_IOCTL_V0_OWNER_ANY 0xff
25 __u8 owner;
24#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00 26#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
25#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff 27#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
26 __u8 pad04[3];
27 __u8 route; 28 __u8 route;
28 __u64 token; 29 __u64 token;
29 __u32 path[8]; /* in reverse */ 30 __u64 object;
30 __u8 data[]; /* ioctl data (below) */ 31 __u8 data[]; /* ioctl data (below) */
31}; 32};
32 33
33struct nvif_ioctl_nop { 34struct nvif_ioctl_nop_v0 {
35 __u64 version;
34}; 36};
35 37
36struct nvif_ioctl_sclass_v0 { 38struct nvif_ioctl_sclass_v0 {
@@ -38,7 +40,11 @@ struct nvif_ioctl_sclass_v0 {
38 __u8 version; 40 __u8 version;
39 __u8 count; 41 __u8 count;
40 __u8 pad02[6]; 42 __u8 pad02[6];
41 __u32 oclass[]; 43 struct nvif_ioctl_sclass_oclass_v0 {
44 __s32 oclass;
45 __s16 minver;
46 __s16 maxver;
47 } oclass[];
42}; 48};
43 49
44struct nvif_ioctl_new_v0 { 50struct nvif_ioctl_new_v0 {
@@ -47,11 +53,17 @@ struct nvif_ioctl_new_v0 {
47 __u8 pad01[6]; 53 __u8 pad01[6];
48 __u8 route; 54 __u8 route;
49 __u64 token; 55 __u64 token;
56 __u64 object;
50 __u32 handle; 57 __u32 handle;
51/* these class numbers are made up by us, and not nvidia-assigned */ 58/* these class numbers are made up by us, and not nvidia-assigned */
52#define NVIF_IOCTL_NEW_V0_PERFCTR 0x0000ffff 59#define NVIF_IOCTL_NEW_V0_CONTROL -1
53#define NVIF_IOCTL_NEW_V0_CONTROL 0x0000fffe 60#define NVIF_IOCTL_NEW_V0_PERFMON -2
54 __u32 oclass; 61#define NVIF_IOCTL_NEW_V0_PERFDOM -3
62#define NVIF_IOCTL_NEW_V0_SW_NV04 -4
63#define NVIF_IOCTL_NEW_V0_SW_NV10 -5
64#define NVIF_IOCTL_NEW_V0_SW_NV50 -6
65#define NVIF_IOCTL_NEW_V0_SW_GF100 -7
66 __s32 oclass;
55 __u8 data[]; /* class data (class.h) */ 67 __u8 data[]; /* class data (class.h) */
56}; 68};
57 69
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 9ebfa3b45e76..51e2eb580809 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -23,17 +23,11 @@ struct nvif_notify {
23 struct work_struct work; 23 struct work_struct work;
24}; 24};
25 25
26int nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *), 26int nvif_notify_init(struct nvif_object *, int (*func)(struct nvif_notify *),
27 int (*func)(struct nvif_notify *), bool work, u8 type, 27 bool work, u8 type, void *data, u32 size, u32 reply,
28 void *data, u32 size, u32 reply, struct nvif_notify *); 28 struct nvif_notify *);
29int nvif_notify_fini(struct nvif_notify *); 29int nvif_notify_fini(struct nvif_notify *);
30int nvif_notify_get(struct nvif_notify *); 30int nvif_notify_get(struct nvif_notify *);
31int nvif_notify_put(struct nvif_notify *); 31int nvif_notify_put(struct nvif_notify *);
32int nvif_notify(const void *, u32, const void *, u32); 32int nvif_notify(const void *, u32, const void *, u32);
33
34int nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *),
35 bool work, u8 type, void *data, u32 size, u32 reply,
36 struct nvif_notify **);
37void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **);
38
39#endif 33#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 04c874707b96..8d815967767f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -3,73 +3,73 @@
3 3
4#include <nvif/os.h> 4#include <nvif/os.h>
5 5
6struct nvif_sclass {
7 s32 oclass;
8 int minver;
9 int maxver;
10};
11
6struct nvif_object { 12struct nvif_object {
7 struct nvif_object *parent; 13 struct nvif_client *client;
8 struct nvif_object *object; /*XXX: hack for nvif_object() */
9 struct kref refcount;
10 u32 handle; 14 u32 handle;
11 u32 oclass; 15 s32 oclass;
12 void *data;
13 u32 size;
14 void *priv; /*XXX: hack */ 16 void *priv; /*XXX: hack */
15 void (*dtor)(struct nvif_object *);
16 struct { 17 struct {
17 void __iomem *ptr; 18 void __iomem *ptr;
18 u32 size; 19 u32 size;
19 } map; 20 } map;
20}; 21};
21 22
22int nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *), 23int nvif_object_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
23 u32 handle, u32 oclass, void *, u32,
24 struct nvif_object *); 24 struct nvif_object *);
25void nvif_object_fini(struct nvif_object *); 25void nvif_object_fini(struct nvif_object *);
26int nvif_object_new(struct nvif_object *, u32 handle, u32 oclass,
27 void *, u32, struct nvif_object **);
28void nvif_object_ref(struct nvif_object *, struct nvif_object **);
29int nvif_object_ioctl(struct nvif_object *, void *, u32, void **); 26int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
30int nvif_object_sclass(struct nvif_object *, u32 *, int); 27int nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
28void nvif_object_sclass_put(struct nvif_sclass **);
31u32 nvif_object_rd(struct nvif_object *, int, u64); 29u32 nvif_object_rd(struct nvif_object *, int, u64);
32void nvif_object_wr(struct nvif_object *, int, u64, u32); 30void nvif_object_wr(struct nvif_object *, int, u64, u32);
33int nvif_object_mthd(struct nvif_object *, u32, void *, u32); 31int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
34int nvif_object_map(struct nvif_object *); 32int nvif_object_map(struct nvif_object *);
35void nvif_object_unmap(struct nvif_object *); 33void nvif_object_unmap(struct nvif_object *);
36 34
35#define nvif_handle(a) (unsigned long)(void *)(a)
37#define nvif_object(a) (a)->object 36#define nvif_object(a) (a)->object
38 37
39#define ioread8_native ioread8 38#define nvif_rd(a,f,b,c) ({ \
40#define iowrite8_native iowrite8 39 struct nvif_object *_object = (a); \
41#define nvif_rd(a,b,c) ({ \
42 struct nvif_object *_object = nvif_object(a); \
43 u32 _data; \ 40 u32 _data; \
44 if (likely(_object->map.ptr)) \ 41 if (likely(_object->map.ptr)) \
45 _data = ioread##b##_native((u8 __iomem *)_object->map.ptr + (c)); \ 42 _data = f((u8 __iomem *)_object->map.ptr + (c)); \
46 else \ 43 else \
47 _data = nvif_object_rd(_object, (b) / 8, (c)); \ 44 _data = nvif_object_rd(_object, (b), (c)); \
48 _data; \ 45 _data; \
49}) 46})
50#define nvif_wr(a,b,c,d) ({ \ 47#define nvif_wr(a,f,b,c,d) ({ \
51 struct nvif_object *_object = nvif_object(a); \ 48 struct nvif_object *_object = (a); \
52 if (likely(_object->map.ptr)) \ 49 if (likely(_object->map.ptr)) \
53 iowrite##b##_native((d), (u8 __iomem *)_object->map.ptr + (c)); \ 50 f((d), (u8 __iomem *)_object->map.ptr + (c)); \
54 else \ 51 else \
55 nvif_object_wr(_object, (b) / 8, (c), (d)); \ 52 nvif_object_wr(_object, (b), (c), (d)); \
56}) 53})
57#define nvif_rd08(a,b) ({ u8 _v = nvif_rd((a), 8, (b)); _v; }) 54#define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); })
58#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; }) 55#define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); })
59#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; }) 56#define nvif_rd32(a,b) ({ ((u32)nvif_rd((a), ioread32_native, 4, (b))); })
60#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c)) 57#define nvif_wr08(a,b,c) nvif_wr((a), iowrite8, 1, (b), (u8)(c))
61#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c)) 58#define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c))
62#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c)) 59#define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c))
63#define nvif_mask(a,b,c,d) ({ \ 60#define nvif_mask(a,b,c,d) ({ \
64 u32 _v = nvif_rd32(nvif_object(a), (b)); \ 61 struct nvif_object *__object = (a); \
65 nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d)); \ 62 u32 _addr = (b), _data = nvif_rd32(__object, _addr); \
66 _v; \ 63 nvif_wr32(__object, _addr, (_data & ~(c)) | (d)); \
64 _data; \
67}) 65})
68 66
69#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d)) 67#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
70 68
71/*XXX*/ 69/*XXX*/
72#include <core/object.h> 70#include <core/object.h>
73#define nvxx_object(a) ((struct nvkm_object *)nvif_object(a)->priv) 71#define nvxx_object(a) ({ \
74 72 struct nvif_object *_object = (a); \
73 (struct nvkm_object *)_object->priv; \
74})
75#endif 75#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index bdd05ee7ec72..3accc99d8e0b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -24,9 +24,15 @@
24#include <linux/power_supply.h> 24#include <linux/power_supply.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
27#include <linux/agp_backend.h>
28#include <linux/reset.h>
29#include <linux/iommu.h>
27 30
28#include <asm/unaligned.h> 31#include <asm/unaligned.h>
29 32
33#include <soc/tegra/fuse.h>
34#include <soc/tegra/pmc.h>
35
30#ifndef ioread32_native 36#ifndef ioread32_native
31#ifdef __BIG_ENDIAN 37#ifdef __BIG_ENDIAN
32#define ioread16_native ioread16be 38#define ioread16_native ioread16be
@@ -40,5 +46,4 @@
40#define iowrite32_native iowrite32 46#define iowrite32_native iowrite32
41#endif /* def __BIG_ENDIAN else */ 47#endif /* def __BIG_ENDIAN else */
42#endif /* !ioread32_native */ 48#endif /* !ioread32_native */
43
44#endif 49#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index a35b38244502..eaf5905a87a3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,55 +1,52 @@
1#ifndef __NVKM_CLIENT_H__ 1#ifndef __NVKM_CLIENT_H__
2#define __NVKM_CLIENT_H__ 2#define __NVKM_CLIENT_H__
3#include <core/namedb.h> 3#include <core/object.h>
4 4
5struct nvkm_client { 5struct nvkm_client {
6 struct nvkm_namedb namedb; 6 struct nvkm_object object;
7 struct nvkm_handle *root;
8 struct nvkm_object *device;
9 char name[32]; 7 char name[32];
8 u64 device;
10 u32 debug; 9 u32 debug;
11 struct nvkm_vm *vm; 10
11 struct nvkm_client_notify *notify[16];
12 struct rb_root objroot;
13 struct rb_root dmaroot;
14
12 bool super; 15 bool super;
13 void *data; 16 void *data;
14
15 int (*ntfy)(const void *, u32, const void *, u32); 17 int (*ntfy)(const void *, u32, const void *, u32);
16 struct nvkm_client_notify *notify[16]; 18
19 struct nvkm_vm *vm;
17}; 20};
18 21
19static inline struct nvkm_client * 22bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *);
20nv_client(void *obj) 23void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *);
21{ 24struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object);
22#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
23 if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
24 nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
25#endif
26 return obj;
27}
28
29static inline struct nvkm_client *
30nvkm_client(void *obj)
31{
32 struct nvkm_object *client = nv_object(obj);
33 while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
34 client = client->parent;
35 return (void *)client;
36}
37
38#define nvkm_client_create(n,c,oc,od,d) \
39 nvkm_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
40
41int nvkm_client_create_(const char *name, u64 device, const char *cfg,
42 const char *dbg, int, void **);
43#define nvkm_client_destroy(p) \
44 nvkm_namedb_destroy(&(p)->base)
45 25
26int nvkm_client_new(const char *name, u64 device, const char *cfg,
27 const char *dbg, struct nvkm_client **);
28void nvkm_client_del(struct nvkm_client **);
46int nvkm_client_init(struct nvkm_client *); 29int nvkm_client_init(struct nvkm_client *);
47int nvkm_client_fini(struct nvkm_client *, bool suspend); 30int nvkm_client_fini(struct nvkm_client *, bool suspend);
48const char *nvkm_client_name(void *obj);
49 31
50int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *, 32int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
51 void *data, u32 size); 33 void *data, u32 size);
52int nvkm_client_notify_del(struct nvkm_client *, int index); 34int nvkm_client_notify_del(struct nvkm_client *, int index);
53int nvkm_client_notify_get(struct nvkm_client *, int index); 35int nvkm_client_notify_get(struct nvkm_client *, int index);
54int nvkm_client_notify_put(struct nvkm_client *, int index); 36int nvkm_client_notify_put(struct nvkm_client *, int index);
37
38/* logging for client-facing objects */
39#define nvif_printk(o,l,p,f,a...) do { \
40 struct nvkm_object *_object = (o); \
41 struct nvkm_client *_client = _object->client; \
42 if (_client->debug >= NV_DBG_##l) \
43 printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \
44 _object->handle, _object->oclass, ##a); \
45} while(0)
46#define nvif_fatal(o,f,a...) nvif_printk((o), FATAL, CRIT, f, ##a)
47#define nvif_error(o,f,a...) nvif_printk((o), ERROR, ERR, f, ##a)
48#define nvif_debug(o,f,a...) nvif_printk((o), DEBUG, INFO, f, ##a)
49#define nvif_trace(o,f,a...) nvif_printk((o), TRACE, INFO, f, ##a)
50#define nvif_info(o,f,a...) nvif_printk((o), INFO, INFO, f, ##a)
51#define nvif_ioctl(o,f,a...) nvif_trace((o), "ioctl: "f, ##a)
55#endif 52#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index d07cb860b56c..c59fd4e2ad5e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,18 +1,11 @@
1#ifndef __NVKM_DEBUG_H__ 1#ifndef __NVKM_DEBUG_H__
2#define __NVKM_DEBUG_H__ 2#define __NVKM_DEBUG_H__
3extern int nv_info_debug_level;
4
5#define NV_DBG_FATAL 0 3#define NV_DBG_FATAL 0
6#define NV_DBG_ERROR 1 4#define NV_DBG_ERROR 1
7#define NV_DBG_WARN 2 5#define NV_DBG_WARN 2
8#define NV_DBG_INFO nv_info_debug_level 6#define NV_DBG_INFO 3
9#define NV_DBG_DEBUG 4 7#define NV_DBG_DEBUG 4
10#define NV_DBG_TRACE 5 8#define NV_DBG_TRACE 5
11#define NV_DBG_PARANOIA 6 9#define NV_DBG_PARANOIA 6
12#define NV_DBG_SPAM 7 10#define NV_DBG_SPAM 7
13
14#define NV_DBG_INFO_NORMAL 3
15#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
16
17#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
18#endif 11#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 333db33a162c..8f760002e401 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,24 +1,84 @@
1#ifndef __NVKM_DEVICE_H__ 1#ifndef __NVKM_DEVICE_H__
2#define __NVKM_DEVICE_H__ 2#define __NVKM_DEVICE_H__
3#include <core/engine.h>
4#include <core/event.h> 3#include <core/event.h>
4#include <core/object.h>
5
6enum nvkm_devidx {
7 NVKM_SUBDEV_PCI,
8 NVKM_SUBDEV_VBIOS,
9 NVKM_SUBDEV_DEVINIT,
10 NVKM_SUBDEV_IBUS,
11 NVKM_SUBDEV_GPIO,
12 NVKM_SUBDEV_I2C,
13 NVKM_SUBDEV_FUSE,
14 NVKM_SUBDEV_MXM,
15 NVKM_SUBDEV_MC,
16 NVKM_SUBDEV_BUS,
17 NVKM_SUBDEV_TIMER,
18 NVKM_SUBDEV_FB,
19 NVKM_SUBDEV_LTC,
20 NVKM_SUBDEV_INSTMEM,
21 NVKM_SUBDEV_MMU,
22 NVKM_SUBDEV_BAR,
23 NVKM_SUBDEV_PMU,
24 NVKM_SUBDEV_VOLT,
25 NVKM_SUBDEV_THERM,
26 NVKM_SUBDEV_CLK,
27
28 NVKM_ENGINE_DMAOBJ,
29 NVKM_ENGINE_IFB,
30 NVKM_ENGINE_FIFO,
31 NVKM_ENGINE_SW,
32 NVKM_ENGINE_GR,
33 NVKM_ENGINE_MPEG,
34 NVKM_ENGINE_ME,
35 NVKM_ENGINE_VP,
36 NVKM_ENGINE_CIPHER,
37 NVKM_ENGINE_BSP,
38 NVKM_ENGINE_MSPPP,
39 NVKM_ENGINE_CE0,
40 NVKM_ENGINE_CE1,
41 NVKM_ENGINE_CE2,
42 NVKM_ENGINE_VIC,
43 NVKM_ENGINE_MSENC,
44 NVKM_ENGINE_DISP,
45 NVKM_ENGINE_PM,
46 NVKM_ENGINE_MSVLD,
47 NVKM_ENGINE_SEC,
48 NVKM_ENGINE_MSPDEC,
49
50 NVKM_SUBDEV_NR
51};
52
53enum nvkm_device_type {
54 NVKM_DEVICE_PCI,
55 NVKM_DEVICE_AGP,
56 NVKM_DEVICE_PCIE,
57 NVKM_DEVICE_TEGRA,
58};
5 59
6struct nvkm_device { 60struct nvkm_device {
7 struct nvkm_engine engine; 61 const struct nvkm_device_func *func;
62 const struct nvkm_device_quirk *quirk;
63 struct device *dev;
64 enum nvkm_device_type type;
65 u64 handle;
66 const char *name;
67 const char *cfgopt;
68 const char *dbgopt;
69
8 struct list_head head; 70 struct list_head head;
71 struct mutex mutex;
72 int refcount;
9 73
10 struct pci_dev *pdev; 74 void __iomem *pri;
11 struct platform_device *platformdev;
12 u64 handle;
13 75
14 struct nvkm_event event; 76 struct nvkm_event event;
15 77
16 const char *cfgopt;
17 const char *dbgopt;
18 const char *name;
19 const char *cname;
20 u64 disable_mask; 78 u64 disable_mask;
79 u32 debug;
21 80
81 const struct nvkm_device_chip *chip;
22 enum { 82 enum {
23 NV_04 = 0x04, 83 NV_04 = 0x04,
24 NV_10 = 0x10, 84 NV_10 = 0x10,
@@ -35,67 +95,157 @@ struct nvkm_device {
35 u8 chiprev; 95 u8 chiprev;
36 u32 crystal; 96 u32 crystal;
37 97
38 struct nvkm_oclass *oclass[NVDEV_SUBDEV_NR];
39 struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
40
41 struct { 98 struct {
42 struct notifier_block nb; 99 struct notifier_block nb;
43 } acpi; 100 } acpi;
101
102 struct nvkm_bar *bar;
103 struct nvkm_bios *bios;
104 struct nvkm_bus *bus;
105 struct nvkm_clk *clk;
106 struct nvkm_devinit *devinit;
107 struct nvkm_fb *fb;
108 struct nvkm_fuse *fuse;
109 struct nvkm_gpio *gpio;
110 struct nvkm_i2c *i2c;
111 struct nvkm_subdev *ibus;
112 struct nvkm_instmem *imem;
113 struct nvkm_ltc *ltc;
114 struct nvkm_mc *mc;
115 struct nvkm_mmu *mmu;
116 struct nvkm_subdev *mxm;
117 struct nvkm_pci *pci;
118 struct nvkm_pmu *pmu;
119 struct nvkm_therm *therm;
120 struct nvkm_timer *timer;
121 struct nvkm_volt *volt;
122
123 struct nvkm_engine *bsp;
124 struct nvkm_engine *ce[3];
125 struct nvkm_engine *cipher;
126 struct nvkm_disp *disp;
127 struct nvkm_dma *dma;
128 struct nvkm_fifo *fifo;
129 struct nvkm_gr *gr;
130 struct nvkm_engine *ifb;
131 struct nvkm_engine *me;
132 struct nvkm_engine *mpeg;
133 struct nvkm_engine *msenc;
134 struct nvkm_engine *mspdec;
135 struct nvkm_engine *msppp;
136 struct nvkm_engine *msvld;
137 struct nvkm_pm *pm;
138 struct nvkm_engine *sec;
139 struct nvkm_sw *sw;
140 struct nvkm_engine *vic;
141 struct nvkm_engine *vp;
142};
143
144struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
145struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index);
146
147struct nvkm_device_func {
148 struct nvkm_device_pci *(*pci)(struct nvkm_device *);
149 struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
150 void *(*dtor)(struct nvkm_device *);
151 int (*preinit)(struct nvkm_device *);
152 int (*init)(struct nvkm_device *);
153 void (*fini)(struct nvkm_device *, bool suspend);
154 resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
155 resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
156 bool cpu_coherent;
157};
158
159struct nvkm_device_quirk {
160 u8 tv_pin_mask;
161 u8 tv_gpio;
162 bool War00C800_0;
163};
164
165struct nvkm_device_chip {
166 const char *name;
167
168 int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
169 int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
170 int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
171 int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **);
172 int (*devinit)(struct nvkm_device *, int idx, struct nvkm_devinit **);
173 int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **);
174 int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **);
175 int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **);
176 int (*i2c )(struct nvkm_device *, int idx, struct nvkm_i2c **);
177 int (*ibus )(struct nvkm_device *, int idx, struct nvkm_subdev **);
178 int (*imem )(struct nvkm_device *, int idx, struct nvkm_instmem **);
179 int (*ltc )(struct nvkm_device *, int idx, struct nvkm_ltc **);
180 int (*mc )(struct nvkm_device *, int idx, struct nvkm_mc **);
181 int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **);
182 int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
183 int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
184 int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
185 int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
186 int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
187 int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
188
189 int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
190 int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **);
191 int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
192 int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
193 int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
194 int (*fifo )(struct nvkm_device *, int idx, struct nvkm_fifo **);
195 int (*gr )(struct nvkm_device *, int idx, struct nvkm_gr **);
196 int (*ifb )(struct nvkm_device *, int idx, struct nvkm_engine **);
197 int (*me )(struct nvkm_device *, int idx, struct nvkm_engine **);
198 int (*mpeg )(struct nvkm_device *, int idx, struct nvkm_engine **);
199 int (*msenc )(struct nvkm_device *, int idx, struct nvkm_engine **);
200 int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
201 int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
202 int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
203 int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
204 int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
205 int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
206 int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
207 int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
44}; 208};
45 209
46struct nvkm_device *nvkm_device_find(u64 name); 210struct nvkm_device *nvkm_device_find(u64 name);
47int nvkm_device_list(u64 *name, int size); 211int nvkm_device_list(u64 *name, int size);
48 212
49struct nvkm_device *nv_device(void *obj); 213/* privileged register interface accessor macros */
50 214#define nvkm_rd08(d,a) ioread8((d)->pri + (a))
51static inline bool 215#define nvkm_rd16(d,a) ioread16_native((d)->pri + (a))
52nv_device_match(struct nvkm_object *object, u16 dev, u16 ven, u16 sub) 216#define nvkm_rd32(d,a) ioread32_native((d)->pri + (a))
53{ 217#define nvkm_wr08(d,a,v) iowrite8((v), (d)->pri + (a))
54 struct nvkm_device *device = nv_device(object); 218#define nvkm_wr16(d,a,v) iowrite16_native((v), (d)->pri + (a))
55 return device->pdev->device == dev && 219#define nvkm_wr32(d,a,v) iowrite32_native((v), (d)->pri + (a))
56 device->pdev->subsystem_vendor == ven && 220#define nvkm_mask(d,a,m,v) ({ \
57 device->pdev->subsystem_device == sub; 221 struct nvkm_device *_device = (d); \
58} 222 u32 _addr = (a), _temp = nvkm_rd32(_device, _addr); \
59 223 nvkm_wr32(_device, _addr, (_temp & ~(m)) | (v)); \
60static inline bool 224 _temp; \
61nv_device_is_pci(struct nvkm_device *device) 225})
62{ 226
63 return device->pdev != NULL; 227void nvkm_device_del(struct nvkm_device **);
64} 228
65 229struct nvkm_device_oclass {
66static inline bool 230 int (*ctor)(struct nvkm_device *, const struct nvkm_oclass *,
67nv_device_is_cpu_coherent(struct nvkm_device *device) 231 void *data, u32 size, struct nvkm_object **);
68{ 232 struct nvkm_sclass base;
69 return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
70}
71
72static inline struct device *
73nv_device_base(struct nvkm_device *device)
74{
75 return nv_device_is_pci(device) ? &device->pdev->dev :
76 &device->platformdev->dev;
77}
78
79resource_size_t
80nv_device_resource_start(struct nvkm_device *device, unsigned int bar);
81
82resource_size_t
83nv_device_resource_len(struct nvkm_device *device, unsigned int bar);
84
85int
86nv_device_get_irq(struct nvkm_device *device, bool stall);
87
88struct platform_device;
89
90enum nv_bus_type {
91 NVKM_BUS_PCI,
92 NVKM_BUS_PLATFORM,
93}; 233};
94 234
95#define nvkm_device_create(p,t,n,s,c,d,u) \ 235extern const struct nvkm_sclass nvkm_udevice_sclass;
96 nvkm_device_create_((void *)(p), (t), (n), (s), (c), (d), \ 236
97 sizeof(**u), (void **)u) 237/* device logging */
98int nvkm_device_create_(void *, enum nv_bus_type type, u64 name, 238#define nvdev_printk_(d,l,p,f,a...) do { \
99 const char *sname, const char *cfg, const char *dbg, 239 struct nvkm_device *_device = (d); \
100 int, void **); 240 if (_device->debug >= (l)) \
241 dev_##p(_device->dev, f, ##a); \
242} while(0)
243#define nvdev_printk(d,l,p,f,a...) nvdev_printk_((d), NV_DBG_##l, p, f, ##a)
244#define nvdev_fatal(d,f,a...) nvdev_printk((d), FATAL, crit, f, ##a)
245#define nvdev_error(d,f,a...) nvdev_printk((d), ERROR, err, f, ##a)
246#define nvdev_warn(d,f,a...) nvdev_printk((d), WARN, notice, f, ##a)
247#define nvdev_info(d,f,a...) nvdev_printk((d), INFO, info, f, ##a)
248#define nvdev_debug(d,f,a...) nvdev_printk((d), DEBUG, info, f, ##a)
249#define nvdev_trace(d,f,a...) nvdev_printk((d), TRACE, info, f, ##a)
250#define nvdev_spam(d,f,a...) nvdev_printk((d), SPAM, dbg, f, ##a)
101#endif 251#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
deleted file mode 100644
index 60c5888b5df3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
+++ /dev/null
@@ -1,62 +0,0 @@
1#ifndef __NVKM_DEVIDX_H__
2#define __NVKM_DEVIDX_H__
3enum nvkm_devidx {
4 NVDEV_ENGINE_DEVICE,
5 NVDEV_SUBDEV_VBIOS,
6
7 /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
8 * *any* of them are initialised. This subdev category is used
9 * for any subdevs that the VBIOS init table parsing may call out
10 * to during POST.
11 */
12 NVDEV_SUBDEV_DEVINIT,
13 NVDEV_SUBDEV_IBUS,
14 NVDEV_SUBDEV_GPIO,
15 NVDEV_SUBDEV_I2C,
16 NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
17
18 /* This grouping of subdevs are initialised right after they've
19 * been created, and are allowed to assume any subdevs in the
20 * list above them exist and have been initialised.
21 */
22 NVDEV_SUBDEV_FUSE,
23 NVDEV_SUBDEV_MXM,
24 NVDEV_SUBDEV_MC,
25 NVDEV_SUBDEV_BUS,
26 NVDEV_SUBDEV_TIMER,
27 NVDEV_SUBDEV_FB,
28 NVDEV_SUBDEV_LTC,
29 NVDEV_SUBDEV_INSTMEM,
30 NVDEV_SUBDEV_MMU,
31 NVDEV_SUBDEV_BAR,
32 NVDEV_SUBDEV_PMU,
33 NVDEV_SUBDEV_VOLT,
34 NVDEV_SUBDEV_THERM,
35 NVDEV_SUBDEV_CLK,
36
37 NVDEV_ENGINE_FIRST,
38 NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
39 NVDEV_ENGINE_IFB,
40 NVDEV_ENGINE_FIFO,
41 NVDEV_ENGINE_SW,
42 NVDEV_ENGINE_GR,
43 NVDEV_ENGINE_MPEG,
44 NVDEV_ENGINE_ME,
45 NVDEV_ENGINE_VP,
46 NVDEV_ENGINE_CIPHER,
47 NVDEV_ENGINE_BSP,
48 NVDEV_ENGINE_MSPPP,
49 NVDEV_ENGINE_CE0,
50 NVDEV_ENGINE_CE1,
51 NVDEV_ENGINE_CE2,
52 NVDEV_ENGINE_VIC,
53 NVDEV_ENGINE_MSENC,
54 NVDEV_ENGINE_DISP,
55 NVDEV_ENGINE_PM,
56 NVDEV_ENGINE_MSVLD,
57 NVDEV_ENGINE_SEC,
58 NVDEV_ENGINE_MSPDEC,
59
60 NVDEV_SUBDEV_NR,
61};
62#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
deleted file mode 100644
index 1bf2e8eb4268..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef __NVKM_ENGCTX_H__
2#define __NVKM_ENGCTX_H__
3#include <core/gpuobj.h>
4
5#include <subdev/mmu.h>
6
7#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
8#define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
9
10struct nvkm_engctx {
11 struct nvkm_gpuobj gpuobj;
12 struct nvkm_vma vma;
13 struct list_head head;
14 unsigned long save;
15 u64 addr;
16};
17
18static inline struct nvkm_engctx *
19nv_engctx(void *obj)
20{
21#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
22 if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
23 nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
24#endif
25 return obj;
26}
27
28#define nvkm_engctx_create(p,e,c,g,s,a,f,d) \
29 nvkm_engctx_create_((p), (e), (c), (g), (s), (a), (f), \
30 sizeof(**d), (void **)d)
31
32int nvkm_engctx_create_(struct nvkm_object *, struct nvkm_object *,
33 struct nvkm_oclass *, struct nvkm_object *,
34 u32 size, u32 align, u32 flags,
35 int length, void **data);
36void nvkm_engctx_destroy(struct nvkm_engctx *);
37int nvkm_engctx_init(struct nvkm_engctx *);
38int nvkm_engctx_fini(struct nvkm_engctx *, bool suspend);
39
40int _nvkm_engctx_ctor(struct nvkm_object *, struct nvkm_object *,
41 struct nvkm_oclass *, void *, u32,
42 struct nvkm_object **);
43void _nvkm_engctx_dtor(struct nvkm_object *);
44int _nvkm_engctx_init(struct nvkm_object *);
45int _nvkm_engctx_fini(struct nvkm_object *, bool suspend);
46#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
47#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
48
49struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr);
50void nvkm_engctx_put(struct nvkm_object *);
51#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index faf0fd2f0638..48bf128456a1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -1,56 +1,49 @@
1#ifndef __NVKM_ENGINE_H__ 1#ifndef __NVKM_ENGINE_H__
2#define __NVKM_ENGINE_H__ 2#define __NVKM_ENGINE_H__
3#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
3#include <core/subdev.h> 4#include <core/subdev.h>
4 5struct nvkm_fifo_chan;
5#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng)) 6struct nvkm_fb_tile;
6#define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var))
7 7
8struct nvkm_engine { 8struct nvkm_engine {
9 const struct nvkm_engine_func *func;
9 struct nvkm_subdev subdev; 10 struct nvkm_subdev subdev;
10 struct nvkm_oclass *cclass;
11 struct nvkm_oclass *sclass;
12
13 struct list_head contexts;
14 spinlock_t lock; 11 spinlock_t lock;
15 12
16 void (*tile_prog)(struct nvkm_engine *, int region); 13 int usecount;
17 int (*tlb_flush)(struct nvkm_engine *);
18}; 14};
19 15
20static inline struct nvkm_engine * 16struct nvkm_engine_func {
21nv_engine(void *obj) 17 void *(*dtor)(struct nvkm_engine *);
22{ 18 int (*oneinit)(struct nvkm_engine *);
23#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA 19 int (*init)(struct nvkm_engine *);
24 if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS))) 20 int (*fini)(struct nvkm_engine *, bool suspend);
25 nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj)); 21 void (*intr)(struct nvkm_engine *);
26#endif 22 void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
27 return obj; 23
28} 24 struct {
29 25 int (*sclass)(struct nvkm_oclass *, int index,
30static inline int 26 const struct nvkm_device_oclass **);
31nv_engidx(struct nvkm_engine *engine) 27 } base;
32{ 28
33 return nv_subidx(&engine->subdev); 29 struct {
34} 30 int (*cclass)(struct nvkm_fifo_chan *,
35 31 const struct nvkm_oclass *,
36struct nvkm_engine *nvkm_engine(void *obj, int idx); 32 struct nvkm_object **);
37 33 int (*sclass)(struct nvkm_oclass *, int index);
38#define nvkm_engine_create(p,e,c,d,i,f,r) \ 34 } fifo;
39 nvkm_engine_create_((p), (e), (c), (d), (i), (f), \ 35
40 sizeof(**r),(void **)r) 36 const struct nvkm_object_func *cclass;
41 37 struct nvkm_sclass sclass[];
42#define nvkm_engine_destroy(p) \ 38};
43 nvkm_subdev_destroy(&(p)->subdev)
44#define nvkm_engine_init(p) \
45 nvkm_subdev_init(&(p)->subdev)
46#define nvkm_engine_fini(p,s) \
47 nvkm_subdev_fini(&(p)->subdev, (s))
48
49int nvkm_engine_create_(struct nvkm_object *, struct nvkm_object *,
50 struct nvkm_oclass *, bool, const char *,
51 const char *, int, void **);
52 39
53#define _nvkm_engine_dtor _nvkm_subdev_dtor 40int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
54#define _nvkm_engine_init _nvkm_subdev_init 41 int index, u32 pmc_enable, bool enable,
55#define _nvkm_engine_fini _nvkm_subdev_fini 42 struct nvkm_engine *);
43int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
44 int index, u32 pmc_enable, bool enable,
45 struct nvkm_engine **);
46struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
47void nvkm_engine_unref(struct nvkm_engine **);
48void nvkm_engine_tile(struct nvkm_engine *, int region);
56#endif 49#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index e76f76f115e9..40429a82f792 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -10,12 +10,11 @@ struct nvkm_enum {
10}; 10};
11 11
12const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value); 12const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
13const struct nvkm_enum *nvkm_enum_print(const struct nvkm_enum *, u32 value);
14 13
15struct nvkm_bitfield { 14struct nvkm_bitfield {
16 u32 mask; 15 u32 mask;
17 const char *name; 16 const char *name;
18}; 17};
19 18
20void nvkm_bitfield_print(const struct nvkm_bitfield *, u32 value); 19void nvkm_snprintbf(char *, int, const struct nvkm_bitfield *, u32 value);
21#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index e0187e7abb6e..d4f56eafb073 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,64 +1,40 @@
1#ifndef __NVKM_GPUOBJ_H__ 1#ifndef __NVKM_GPUOBJ_H__
2#define __NVKM_GPUOBJ_H__ 2#define __NVKM_GPUOBJ_H__
3#include <core/object.h> 3#include <core/object.h>
4#include <core/memory.h>
4#include <core/mm.h> 5#include <core/mm.h>
5struct nvkm_vma; 6struct nvkm_vma;
6struct nvkm_vm; 7struct nvkm_vm;
7 8
8#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001 9#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
9#define NVOBJ_FLAG_ZERO_FREE 0x00000002
10#define NVOBJ_FLAG_HEAP 0x00000004 10#define NVOBJ_FLAG_HEAP 0x00000004
11 11
12struct nvkm_gpuobj { 12struct nvkm_gpuobj {
13 struct nvkm_object object; 13 struct nvkm_object object;
14 struct nvkm_object *parent; 14 const struct nvkm_gpuobj_func *func;
15 struct nvkm_gpuobj *parent;
16 struct nvkm_memory *memory;
15 struct nvkm_mm_node *node; 17 struct nvkm_mm_node *node;
16 struct nvkm_mm heap;
17 18
18 u32 flags;
19 u64 addr; 19 u64 addr;
20 u32 size; 20 u32 size;
21}; 21 struct nvkm_mm heap;
22 22
23static inline struct nvkm_gpuobj * 23 void __iomem *map;
24nv_gpuobj(void *obj) 24};
25{
26#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
27 if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
28 nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
29#endif
30 return obj;
31}
32 25
33#define nvkm_gpuobj_create(p,e,c,v,g,s,a,f,d) \ 26struct nvkm_gpuobj_func {
34 nvkm_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \ 27 void *(*acquire)(struct nvkm_gpuobj *);
35 sizeof(**d), (void **)d) 28 void (*release)(struct nvkm_gpuobj *);
36#define nvkm_gpuobj_init(p) nvkm_object_init(&(p)->object) 29 u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
37#define nvkm_gpuobj_fini(p,s) nvkm_object_fini(&(p)->object, (s)) 30 void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
38int nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *, 31};
39 struct nvkm_oclass *, u32 pclass,
40 struct nvkm_object *, u32 size, u32 align,
41 u32 flags, int length, void **);
42void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
43 32
44int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size, 33int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
45 u32 align, u32 flags, struct nvkm_gpuobj **); 34 struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
46int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *, 35void nvkm_gpuobj_del(struct nvkm_gpuobj **);
47 struct nvkm_gpuobj **); 36int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
48int nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *); 37int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
49int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access, 38 struct nvkm_vma *);
50 struct nvkm_vma *);
51void nvkm_gpuobj_unmap(struct nvkm_vma *); 39void nvkm_gpuobj_unmap(struct nvkm_vma *);
52
53static inline void
54nvkm_gpuobj_ref(struct nvkm_gpuobj *obj, struct nvkm_gpuobj **ref)
55{
56 nvkm_object_ref(&obj->object, (struct nvkm_object **)ref);
57}
58
59void _nvkm_gpuobj_dtor(struct nvkm_object *);
60int _nvkm_gpuobj_init(struct nvkm_object *);
61int _nvkm_gpuobj_fini(struct nvkm_object *, bool);
62u32 _nvkm_gpuobj_rd32(struct nvkm_object *, u64);
63void _nvkm_gpuobj_wr32(struct nvkm_object *, u64, u32);
64#endif 40#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
deleted file mode 100644
index 67f384d0916c..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef __NVKM_HANDLE_H__
2#define __NVKM_HANDLE_H__
3#include <core/os.h>
4struct nvkm_object;
5
6struct nvkm_handle {
7 struct nvkm_namedb *namedb;
8 struct list_head node;
9
10 struct list_head head;
11 struct list_head tree;
12 u32 name;
13 u32 priv;
14
15 u8 route;
16 u64 token;
17
18 struct nvkm_handle *parent;
19 struct nvkm_object *object;
20};
21
22int nvkm_handle_create(struct nvkm_object *, u32 parent, u32 handle,
23 struct nvkm_object *, struct nvkm_handle **);
24void nvkm_handle_destroy(struct nvkm_handle *);
25int nvkm_handle_init(struct nvkm_handle *);
26int nvkm_handle_fini(struct nvkm_handle *, bool suspend);
27
28struct nvkm_object *nvkm_handle_ref(struct nvkm_object *, u32 name);
29
30struct nvkm_handle *nvkm_handle_get_class(struct nvkm_object *, u16);
31struct nvkm_handle *nvkm_handle_get_vinst(struct nvkm_object *, u64);
32struct nvkm_handle *nvkm_handle_get_cinst(struct nvkm_object *, u32);
33void nvkm_handle_put(struct nvkm_handle *);
34#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
new file mode 100644
index 000000000000..9363b839a9da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -0,0 +1,53 @@
1#ifndef __NVKM_MEMORY_H__
2#define __NVKM_MEMORY_H__
3#include <core/os.h>
4struct nvkm_device;
5struct nvkm_vma;
6struct nvkm_vm;
7
8enum nvkm_memory_target {
9 NVKM_MEM_TARGET_INST,
10 NVKM_MEM_TARGET_VRAM,
11 NVKM_MEM_TARGET_HOST,
12};
13
14struct nvkm_memory {
15 const struct nvkm_memory_func *func;
16};
17
18struct nvkm_memory_func {
19 void *(*dtor)(struct nvkm_memory *);
20 enum nvkm_memory_target (*target)(struct nvkm_memory *);
21 u64 (*addr)(struct nvkm_memory *);
22 u64 (*size)(struct nvkm_memory *);
23 void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
24 void __iomem *(*acquire)(struct nvkm_memory *);
25 void (*release)(struct nvkm_memory *);
26 u32 (*rd32)(struct nvkm_memory *, u64 offset);
27 void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
28 void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
29};
30
31void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
32int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
33 u64 size, u32 align, bool zero, struct nvkm_memory **);
34void nvkm_memory_del(struct nvkm_memory **);
35#define nvkm_memory_target(p) (p)->func->target(p)
36#define nvkm_memory_addr(p) (p)->func->addr(p)
37#define nvkm_memory_size(p) (p)->func->size(p)
38#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
39#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
40
41/* accessor macros - kmap()/done() must bracket use of the other accessor
42 * macros to guarantee correct behaviour across all chipsets
43 */
44#define nvkm_kmap(o) (o)->func->acquire(o)
45#define nvkm_ro32(o,a) (o)->func->rd32((o), (a))
46#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
47#define nvkm_mo32(o,a,m,d) ({ \
48 u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
49 nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
50 _data; \
51})
52#define nvkm_done(o) (o)->func->release(o)
53#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 096eb1a623ee..d92fd41e4056 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -27,7 +27,7 @@ struct nvkm_mm {
27static inline bool 27static inline bool
28nvkm_mm_initialised(struct nvkm_mm *mm) 28nvkm_mm_initialised(struct nvkm_mm *mm)
29{ 29{
30 return mm->block_size != 0; 30 return mm->heap_nodes;
31} 31}
32 32
33int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block); 33int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
@@ -37,4 +37,5 @@ int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
37int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, 37int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
38 u32 size_min, u32 align, struct nvkm_mm_node **); 38 u32 size_min, u32 align, struct nvkm_mm_node **);
39void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); 39void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
40void nvkm_mm_dump(struct nvkm_mm *, const char *);
40#endif 41#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
deleted file mode 100644
index 4cfe16fcde9b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef __NVKM_NAMEDB_H__
2#define __NVKM_NAMEDB_H__
3#include <core/parent.h>
4struct nvkm_handle;
5
6struct nvkm_namedb {
7 struct nvkm_parent parent;
8 rwlock_t lock;
9 struct list_head list;
10};
11
12static inline struct nvkm_namedb *
13nv_namedb(void *obj)
14{
15#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
16 if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
17 nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
18#endif
19 return obj;
20}
21
22#define nvkm_namedb_create(p,e,c,v,s,m,d) \
23 nvkm_namedb_create_((p), (e), (c), (v), (s), (m), \
24 sizeof(**d), (void **)d)
25#define nvkm_namedb_init(p) \
26 nvkm_parent_init(&(p)->parent)
27#define nvkm_namedb_fini(p,s) \
28 nvkm_parent_fini(&(p)->parent, (s))
29#define nvkm_namedb_destroy(p) \
30 nvkm_parent_destroy(&(p)->parent)
31
32int nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *,
33 struct nvkm_oclass *, u32 pclass,
34 struct nvkm_oclass *, u64 engcls,
35 int size, void **);
36
37int _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *,
38 struct nvkm_oclass *, void *, u32,
39 struct nvkm_object **);
40#define _nvkm_namedb_dtor _nvkm_parent_dtor
41#define _nvkm_namedb_init _nvkm_parent_init
42#define _nvkm_namedb_fini _nvkm_parent_fini
43
44int nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *,
45 struct nvkm_handle *);
46void nvkm_namedb_remove(struct nvkm_handle *);
47
48struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32);
49struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, u16);
50struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64);
51struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32);
52void nvkm_namedb_put(struct nvkm_handle *);
53#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 6e3cd3908400..dcd048b91fac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,203 +1,88 @@
1#ifndef __NVKM_OBJECT_H__ 1#ifndef __NVKM_OBJECT_H__
2#define __NVKM_OBJECT_H__ 2#define __NVKM_OBJECT_H__
3#include <core/os.h> 3#include <core/os.h>
4#include <core/printk.h> 4#include <core/debug.h>
5 5struct nvkm_event;
6#define NV_PARENT_CLASS 0x80000000 6struct nvkm_gpuobj;
7#define NV_NAMEDB_CLASS 0x40000000 7struct nvkm_oclass;
8#define NV_CLIENT_CLASS 0x20000000
9#define NV_SUBDEV_CLASS 0x10000000
10#define NV_ENGINE_CLASS 0x08000000
11#define NV_MEMOBJ_CLASS 0x04000000
12#define NV_GPUOBJ_CLASS 0x02000000
13#define NV_ENGCTX_CLASS 0x01000000
14#define NV_OBJECT_CLASS 0x0000ffff
15 8
16struct nvkm_object { 9struct nvkm_object {
17 struct nvkm_oclass *oclass; 10 const struct nvkm_object_func *func;
18 struct nvkm_object *parent; 11 struct nvkm_client *client;
19 struct nvkm_engine *engine; 12 struct nvkm_engine *engine;
20 atomic_t refcount; 13 s32 oclass;
21 atomic_t usecount;
22#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
23#define NVKM_OBJECT_MAGIC 0x75ef0bad
24 struct list_head list;
25 u32 _magic;
26#endif
27};
28
29static inline struct nvkm_object *
30nv_object(void *obj)
31{
32#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
33 if (likely(obj)) {
34 struct nvkm_object *object = obj;
35 if (unlikely(object->_magic != NVKM_OBJECT_MAGIC))
36 nv_assert("BAD CAST -> NvObject, invalid magic");
37 }
38#endif
39 return obj;
40}
41
42#define nvkm_object_create(p,e,c,s,d) \
43 nvkm_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
44int nvkm_object_create_(struct nvkm_object *, struct nvkm_object *,
45 struct nvkm_oclass *, u32, int size, void **);
46void nvkm_object_destroy(struct nvkm_object *);
47int nvkm_object_init(struct nvkm_object *);
48int nvkm_object_fini(struct nvkm_object *, bool suspend);
49
50int _nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
51 struct nvkm_oclass *, void *, u32,
52 struct nvkm_object **);
53
54extern struct nvkm_ofuncs nvkm_object_ofuncs;
55
56/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
57 * ".data". */
58struct nvkm_oclass {
59 u32 handle; 14 u32 handle;
60 struct nvkm_ofuncs * const ofuncs;
61 struct nvkm_omthds * const omthds;
62 struct lock_class_key lock_class_key;
63};
64
65#define nv_oclass(o) nv_object(o)->oclass
66#define nv_hclass(o) nv_oclass(o)->handle
67#define nv_iclass(o,i) (nv_hclass(o) & (i))
68#define nv_mclass(o) nv_iclass(o, NV_OBJECT_CLASS)
69 15
70static inline struct nvkm_object * 16 struct list_head head;
71nv_pclass(struct nvkm_object *parent, u32 oclass) 17 struct list_head tree;
72{ 18 u8 route;
73 while (parent && !nv_iclass(parent, oclass)) 19 u64 token;
74 parent = parent->parent; 20 u64 object;
75 return parent; 21 struct rb_node node;
76} 22};
77 23
78struct nvkm_omthds { 24struct nvkm_object_func {
79 u32 start; 25 void *(*dtor)(struct nvkm_object *);
80 u32 limit; 26 int (*init)(struct nvkm_object *);
81 int (*call)(struct nvkm_object *, u32, void *, u32); 27 int (*fini)(struct nvkm_object *, bool suspend);
28 int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
29 int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
30 int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
31 int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
32 int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
33 int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
34 int (*wr08)(struct nvkm_object *, u64 addr, u8 data);
35 int (*wr16)(struct nvkm_object *, u64 addr, u16 data);
36 int (*wr32)(struct nvkm_object *, u64 addr, u32 data);
37 int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
38 struct nvkm_gpuobj **);
39 int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
82}; 40};
83 41
84struct nvkm_event; 42void nvkm_object_ctor(const struct nvkm_object_func *,
85struct nvkm_ofuncs { 43 const struct nvkm_oclass *, struct nvkm_object *);
86 int (*ctor)(struct nvkm_object *, struct nvkm_object *, 44int nvkm_object_new_(const struct nvkm_object_func *,
87 struct nvkm_oclass *, void *data, u32 size, 45 const struct nvkm_oclass *, void *data, u32 size,
88 struct nvkm_object **); 46 struct nvkm_object **);
89 void (*dtor)(struct nvkm_object *); 47int nvkm_object_new(const struct nvkm_oclass *, void *data, u32 size,
90 int (*init)(struct nvkm_object *); 48 struct nvkm_object **);
91 int (*fini)(struct nvkm_object *, bool suspend); 49void nvkm_object_del(struct nvkm_object **);
92 int (*mthd)(struct nvkm_object *, u32, void *, u32); 50void *nvkm_object_dtor(struct nvkm_object *);
93 int (*ntfy)(struct nvkm_object *, u32, struct nvkm_event **); 51int nvkm_object_init(struct nvkm_object *);
94 int (* map)(struct nvkm_object *, u64 *, u32 *); 52int nvkm_object_fini(struct nvkm_object *, bool suspend);
95 u8 (*rd08)(struct nvkm_object *, u64 offset); 53int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
96 u16 (*rd16)(struct nvkm_object *, u64 offset); 54int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
97 u32 (*rd32)(struct nvkm_object *, u64 offset); 55int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
98 void (*wr08)(struct nvkm_object *, u64 offset, u8 data); 56int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data);
99 void (*wr16)(struct nvkm_object *, u64 offset, u16 data); 57int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
100 void (*wr32)(struct nvkm_object *, u64 offset, u32 data); 58int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
59int nvkm_object_wr08(struct nvkm_object *, u64 addr, u8 data);
60int nvkm_object_wr16(struct nvkm_object *, u64 addr, u16 data);
61int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data);
62int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
63 struct nvkm_gpuobj **);
64
65struct nvkm_sclass {
66 int minver;
67 int maxver;
68 s32 oclass;
69 const struct nvkm_object_func *func;
70 int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
71 struct nvkm_object **);
101}; 72};
102 73
103static inline struct nvkm_ofuncs * 74struct nvkm_oclass {
104nv_ofuncs(void *obj) 75 int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
105{ 76 struct nvkm_object **);
106 return nv_oclass(obj)->ofuncs; 77 struct nvkm_sclass base;
107} 78 const void *priv;
108 79 const void *engn;
109int nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *, 80 u32 handle;
110 struct nvkm_oclass *, void *, u32, 81 u8 route;
111 struct nvkm_object **); 82 u64 token;
112void nvkm_object_ref(struct nvkm_object *, struct nvkm_object **); 83 u64 object;
113int nvkm_object_inc(struct nvkm_object *); 84 struct nvkm_client *client;
114int nvkm_object_dec(struct nvkm_object *, bool suspend); 85 struct nvkm_object *parent;
115void nvkm_object_debug(void); 86 struct nvkm_engine *engine;
116 87};
117static inline int
118nv_exec(void *obj, u32 mthd, void *data, u32 size)
119{
120 struct nvkm_omthds *method = nv_oclass(obj)->omthds;
121
122 while (method && method->call) {
123 if (mthd >= method->start && mthd <= method->limit)
124 return method->call(obj, mthd, data, size);
125 method++;
126 }
127
128 return -EINVAL;
129}
130
131static inline int
132nv_call(void *obj, u32 mthd, u32 data)
133{
134 return nv_exec(obj, mthd, &data, sizeof(data));
135}
136
137static inline u8
138nv_ro08(void *obj, u64 addr)
139{
140 u8 data = nv_ofuncs(obj)->rd08(obj, addr);
141 nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
142 return data;
143}
144
145static inline u16
146nv_ro16(void *obj, u64 addr)
147{
148 u16 data = nv_ofuncs(obj)->rd16(obj, addr);
149 nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
150 return data;
151}
152
153static inline u32
154nv_ro32(void *obj, u64 addr)
155{
156 u32 data = nv_ofuncs(obj)->rd32(obj, addr);
157 nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
158 return data;
159}
160
161static inline void
162nv_wo08(void *obj, u64 addr, u8 data)
163{
164 nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
165 nv_ofuncs(obj)->wr08(obj, addr, data);
166}
167
168static inline void
169nv_wo16(void *obj, u64 addr, u16 data)
170{
171 nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
172 nv_ofuncs(obj)->wr16(obj, addr, data);
173}
174
175static inline void
176nv_wo32(void *obj, u64 addr, u32 data)
177{
178 nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
179 nv_ofuncs(obj)->wr32(obj, addr, data);
180}
181
182static inline u32
183nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
184{
185 u32 temp = nv_ro32(obj, addr);
186 nv_wo32(obj, addr, (temp & ~mask) | data);
187 return temp;
188}
189
190static inline int
191nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
192{
193 unsigned char c1, c2;
194
195 while (len--) {
196 c1 = nv_ro08(obj, addr++);
197 c2 = *(str++);
198 if (c1 != c2)
199 return c1 - c2;
200 }
201 return 0;
202}
203#endif 88#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
new file mode 100644
index 000000000000..bd52236cc2f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
@@ -0,0 +1,22 @@
1#ifndef __NVKM_OPROXY_H__
2#define __NVKM_OPROXY_H__
3#define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
4#include <core/object.h>
5
6struct nvkm_oproxy {
7 const struct nvkm_oproxy_func *func;
8 struct nvkm_object base;
9 struct nvkm_object *object;
10};
11
12struct nvkm_oproxy_func {
13 void (*dtor[2])(struct nvkm_oproxy *);
14 int (*init[2])(struct nvkm_oproxy *);
15 int (*fini[2])(struct nvkm_oproxy *, bool suspend);
16};
17
18void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *,
19 const struct nvkm_oclass *, struct nvkm_oproxy *);
20int nvkm_oproxy_new_(const struct nvkm_oproxy_func *,
21 const struct nvkm_oclass *, struct nvkm_oproxy **);
22#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
index 532bfa8e3f72..80fdc146e816 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -4,6 +4,7 @@
4 4
5const char *nvkm_stropt(const char *optstr, const char *opt, int *len); 5const char *nvkm_stropt(const char *optstr, const char *opt, int *len);
6bool nvkm_boolopt(const char *optstr, const char *opt, bool value); 6bool nvkm_boolopt(const char *optstr, const char *opt, bool value);
7long nvkm_longopt(const char *optstr, const char *opt, long value);
7int nvkm_dbgopt(const char *optstr, const char *sub); 8int nvkm_dbgopt(const char *optstr, const char *sub);
8 9
9/* compares unterminated string 'str' with zero-terminated string 'cmp' */ 10/* compares unterminated string 'str' with zero-terminated string 'cmp' */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
deleted file mode 100644
index 837e4fe966a5..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef __NVKM_PARENT_H__
2#define __NVKM_PARENT_H__
3#include <core/object.h>
4
5struct nvkm_sclass {
6 struct nvkm_sclass *sclass;
7 struct nvkm_engine *engine;
8 struct nvkm_oclass *oclass;
9};
10
11struct nvkm_parent {
12 struct nvkm_object object;
13
14 struct nvkm_sclass *sclass;
15 u64 engine;
16
17 int (*context_attach)(struct nvkm_object *, struct nvkm_object *);
18 int (*context_detach)(struct nvkm_object *, bool suspend,
19 struct nvkm_object *);
20
21 int (*object_attach)(struct nvkm_object *parent,
22 struct nvkm_object *object, u32 name);
23 void (*object_detach)(struct nvkm_object *parent, int cookie);
24};
25
26static inline struct nvkm_parent *
27nv_parent(void *obj)
28{
29#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
30 if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
31 nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
32#endif
33 return obj;
34}
35
36#define nvkm_parent_create(p,e,c,v,s,m,d) \
37 nvkm_parent_create_((p), (e), (c), (v), (s), (m), \
38 sizeof(**d), (void **)d)
39#define nvkm_parent_init(p) \
40 nvkm_object_init(&(p)->object)
41#define nvkm_parent_fini(p,s) \
42 nvkm_object_fini(&(p)->object, (s))
43
44int nvkm_parent_create_(struct nvkm_object *, struct nvkm_object *,
45 struct nvkm_oclass *, u32 pclass,
46 struct nvkm_oclass *, u64 engcls,
47 int size, void **);
48void nvkm_parent_destroy(struct nvkm_parent *);
49
50void _nvkm_parent_dtor(struct nvkm_object *);
51#define _nvkm_parent_init nvkm_object_init
52#define _nvkm_parent_fini nvkm_object_fini
53
54int nvkm_parent_sclass(struct nvkm_object *, u16 handle,
55 struct nvkm_object **pengine,
56 struct nvkm_oclass **poclass);
57int nvkm_parent_lclass(struct nvkm_object *, u32 *, int);
58#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
new file mode 100644
index 000000000000..78d41be20b8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -0,0 +1,14 @@
1#ifndef __NVKM_DEVICE_PCI_H__
2#define __NVKM_DEVICE_PCI_H__
3#include <core/device.h>
4
5struct nvkm_device_pci {
6 struct nvkm_device device;
7 struct pci_dev *pdev;
8 bool suspend;
9};
10
11int nvkm_device_pci_new(struct pci_dev *, const char *cfg, const char *dbg,
12 bool detect, bool mmio, u64 subdev_mask,
13 struct nvkm_device **);
14#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
deleted file mode 100644
index 83648177059f..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __NVKM_PRINTK_H__
2#define __NVKM_PRINTK_H__
3#include <core/os.h>
4#include <core/debug.h>
5struct nvkm_object;
6
7void __printf(3, 4)
8nv_printk_(struct nvkm_object *, int, const char *, ...);
9
10#define nv_printk(o,l,f,a...) do { \
11 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
12 nv_printk_(nv_object(o), NV_DBG_##l, f, ##a); \
13} while(0)
14
15#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
16#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
17#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
18#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
19#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
20#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
21#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
22#define nv_ioctl(o,f,a...) nv_trace(nvkm_client(o), "ioctl: "f, ##a)
23
24#define nv_assert(f,a...) do { \
25 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
26 nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a); \
27 BUG_ON(1); \
28} while(0)
29#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index cc132eaa10cc..5ee6298991e2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -2,19 +2,27 @@
2#define __NVKM_RAMHT_H__ 2#define __NVKM_RAMHT_H__
3#include <core/gpuobj.h> 3#include <core/gpuobj.h>
4 4
5struct nvkm_ramht_data {
6 struct nvkm_gpuobj *inst;
7 int chid;
8 u32 handle;
9};
10
5struct nvkm_ramht { 11struct nvkm_ramht {
6 struct nvkm_gpuobj gpuobj; 12 struct nvkm_device *device;
13 struct nvkm_gpuobj *parent;
14 struct nvkm_gpuobj *gpuobj;
15 int size;
7 int bits; 16 int bits;
17 struct nvkm_ramht_data data[];
8}; 18};
9 19
10int nvkm_ramht_insert(struct nvkm_ramht *, int chid, u32 handle, u32 context); 20int nvkm_ramht_new(struct nvkm_device *, u32 size, u32 align,
21 struct nvkm_gpuobj *, struct nvkm_ramht **);
22void nvkm_ramht_del(struct nvkm_ramht **);
23int nvkm_ramht_insert(struct nvkm_ramht *, struct nvkm_object *,
24 int chid, int addr, u32 handle, u32 context);
11void nvkm_ramht_remove(struct nvkm_ramht *, int cookie); 25void nvkm_ramht_remove(struct nvkm_ramht *, int cookie);
12int nvkm_ramht_new(struct nvkm_object *, struct nvkm_object *, u32 size, 26struct nvkm_gpuobj *
13 u32 align, struct nvkm_ramht **); 27nvkm_ramht_search(struct nvkm_ramht *, int chid, u32 handle);
14
15static inline void
16nvkm_ramht_ref(struct nvkm_ramht *obj, struct nvkm_ramht **ref)
17{
18 nvkm_gpuobj_ref(&obj->gpuobj, (struct nvkm_gpuobj **)ref);
19}
20#endif 28#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 6fdc39116aac..3b5dc9c63069 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,119 +1,50 @@
1#ifndef __NVKM_SUBDEV_H__ 1#ifndef __NVKM_SUBDEV_H__
2#define __NVKM_SUBDEV_H__ 2#define __NVKM_SUBDEV_H__
3#include <core/object.h> 3#include <core/device.h>
4#include <core/devidx.h>
5
6#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
7#define NV_SUBDEV(name,var) NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
8 4
9struct nvkm_subdev { 5struct nvkm_subdev {
10 struct nvkm_object object; 6 const struct nvkm_subdev_func *func;
7 struct nvkm_device *device;
8 enum nvkm_devidx index;
9 u32 pmc_enable;
11 struct mutex mutex; 10 struct mutex mutex;
12 const char *name;
13 void __iomem *mmio;
14 u32 debug; 11 u32 debug;
15 u32 unit;
16 12
17 void (*intr)(struct nvkm_subdev *); 13 bool oneinit;
18}; 14};
19 15
20static inline struct nvkm_subdev * 16struct nvkm_subdev_func {
21nv_subdev(void *obj) 17 void *(*dtor)(struct nvkm_subdev *);
22{ 18 int (*preinit)(struct nvkm_subdev *);
23#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA 19 int (*oneinit)(struct nvkm_subdev *);
24 if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS))) 20 int (*init)(struct nvkm_subdev *);
25 nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj)); 21 int (*fini)(struct nvkm_subdev *, bool suspend);
26#endif 22 void (*intr)(struct nvkm_subdev *);
27 return obj; 23};
28}
29
30static inline int
31nv_subidx(struct nvkm_subdev *subdev)
32{
33 return nv_hclass(subdev) & 0xff;
34}
35
36struct nvkm_subdev *nvkm_subdev(void *obj, int idx);
37
38#define nvkm_subdev_create(p,e,o,v,s,f,d) \
39 nvkm_subdev_create_((p), (e), (o), (v), (s), (f), \
40 sizeof(**d),(void **)d)
41 24
42int nvkm_subdev_create_(struct nvkm_object *, struct nvkm_object *, 25extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
43 struct nvkm_oclass *, u32 pclass, 26void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
44 const char *sname, const char *fname, 27 int index, u32 pmc_enable, struct nvkm_subdev *);
45 int size, void **); 28void nvkm_subdev_del(struct nvkm_subdev **);
46void nvkm_subdev_destroy(struct nvkm_subdev *); 29int nvkm_subdev_preinit(struct nvkm_subdev *);
47int nvkm_subdev_init(struct nvkm_subdev *); 30int nvkm_subdev_init(struct nvkm_subdev *);
48int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend); 31int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
49void nvkm_subdev_reset(struct nvkm_object *); 32void nvkm_subdev_intr(struct nvkm_subdev *);
50 33
51void _nvkm_subdev_dtor(struct nvkm_object *); 34/* subdev logging */
52int _nvkm_subdev_init(struct nvkm_object *); 35#define nvkm_printk_(s,l,p,f,a...) do { \
53int _nvkm_subdev_fini(struct nvkm_object *, bool suspend); 36 struct nvkm_subdev *_subdev = (s); \
54 37 if (_subdev->debug >= (l)) { \
55#define s_printk(s,l,f,a...) do { \ 38 dev_##p(_subdev->device->dev, "%s: "f, \
56 if ((s)->debug >= OS_DBG_##l) { \ 39 nvkm_subdev_name[_subdev->index], ##a); \
57 nv_printk((s)->base.parent, (s)->name, l, f, ##a); \
58 } \ 40 } \
59} while(0) 41} while(0)
60 42#define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
61static inline u8 43#define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL, crit, f, ##a)
62nv_rd08(void *obj, u32 addr) 44#define nvkm_error(s,f,a...) nvkm_printk((s), ERROR, err, f, ##a)
63{ 45#define nvkm_warn(s,f,a...) nvkm_printk((s), WARN, notice, f, ##a)
64 struct nvkm_subdev *subdev = nv_subdev(obj); 46#define nvkm_info(s,f,a...) nvkm_printk((s), INFO, info, f, ##a)
65 u8 data = ioread8(subdev->mmio + addr); 47#define nvkm_debug(s,f,a...) nvkm_printk((s), DEBUG, info, f, ##a)
66 nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data); 48#define nvkm_trace(s,f,a...) nvkm_printk((s), TRACE, info, f, ##a)
67 return data; 49#define nvkm_spam(s,f,a...) nvkm_printk((s), SPAM, dbg, f, ##a)
68}
69
70static inline u16
71nv_rd16(void *obj, u32 addr)
72{
73 struct nvkm_subdev *subdev = nv_subdev(obj);
74 u16 data = ioread16_native(subdev->mmio + addr);
75 nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
76 return data;
77}
78
79static inline u32
80nv_rd32(void *obj, u32 addr)
81{
82 struct nvkm_subdev *subdev = nv_subdev(obj);
83 u32 data = ioread32_native(subdev->mmio + addr);
84 nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
85 return data;
86}
87
88static inline void
89nv_wr08(void *obj, u32 addr, u8 data)
90{
91 struct nvkm_subdev *subdev = nv_subdev(obj);
92 nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
93 iowrite8(data, subdev->mmio + addr);
94}
95
96static inline void
97nv_wr16(void *obj, u32 addr, u16 data)
98{
99 struct nvkm_subdev *subdev = nv_subdev(obj);
100 nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
101 iowrite16_native(data, subdev->mmio + addr);
102}
103
104static inline void
105nv_wr32(void *obj, u32 addr, u32 data)
106{
107 struct nvkm_subdev *subdev = nv_subdev(obj);
108 nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
109 iowrite32_native(data, subdev->mmio + addr);
110}
111
112static inline u32
113nv_mask(void *obj, u32 addr, u32 mask, u32 data)
114{
115 u32 temp = nv_rd32(obj, addr);
116 nv_wr32(obj, addr, (temp & ~mask) | data);
117 return temp;
118}
119#endif 50#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
new file mode 100644
index 000000000000..5aa2480da25f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -0,0 +1,35 @@
1#ifndef __NVKM_DEVICE_TEGRA_H__
2#define __NVKM_DEVICE_TEGRA_H__
3#include <core/device.h>
4#include <core/mm.h>
5
6struct nvkm_device_tegra {
7 struct nvkm_device device;
8 struct platform_device *pdev;
9 int irq;
10
11 struct reset_control *rst;
12 struct clk *clk;
13 struct clk *clk_pwr;
14
15 struct regulator *vdd;
16
17 struct {
18 /*
19 * Protects accesses to mm from subsystems
20 */
21 struct mutex mutex;
22
23 struct nvkm_mm mm;
24 struct iommu_domain *domain;
25 unsigned long pgshift;
26 } iommu;
27
28 int gpu_speedo;
29};
30
31int nvkm_device_tegra_new(struct platform_device *,
32 const char *cfg, const char *dbg,
33 bool detect, bool mmio, u64 subdev_mask,
34 struct nvkm_device **);
35#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index e489beef2b92..904820558fc0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -1,5 +1,5 @@
1#ifndef __NVKM_BSP_H__ 1#ifndef __NVKM_BSP_H__
2#define __NVKM_BSP_H__ 2#define __NVKM_BSP_H__
3#include <core/engine.h> 3#include <engine/xtensa.h>
4extern struct nvkm_oclass g84_bsp_oclass; 4int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **);
5#endif 5#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index e832f729e1b4..e2e22cd5305b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -1,16 +1,9 @@
1#ifndef __NVKM_CE_H__ 1#ifndef __NVKM_CE_H__
2#define __NVKM_CE_H__ 2#define __NVKM_CE_H__
3#include <core/engine.h> 3#include <engine/falcon.h>
4 4
5void gt215_ce_intr(struct nvkm_subdev *); 5int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
6 6int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
7extern struct nvkm_oclass gt215_ce_oclass; 7int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
8extern struct nvkm_oclass gf100_ce0_oclass; 8int gm204_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
9extern struct nvkm_oclass gf100_ce1_oclass;
10extern struct nvkm_oclass gk104_ce0_oclass;
11extern struct nvkm_oclass gk104_ce1_oclass;
12extern struct nvkm_oclass gk104_ce2_oclass;
13extern struct nvkm_oclass gm204_ce0_oclass;
14extern struct nvkm_oclass gm204_ce1_oclass;
15extern struct nvkm_oclass gm204_ce2_oclass;
16#endif 9#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 57c29e91bad5..03fa57a7c30a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -1,5 +1,5 @@
1#ifndef __NVKM_CIPHER_H__ 1#ifndef __NVKM_CIPHER_H__
2#define __NVKM_CIPHER_H__ 2#define __NVKM_CIPHER_H__
3#include <core/engine.h> 3#include <core/engine.h>
4extern struct nvkm_oclass g84_cipher_oclass; 4int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **);
5#endif 5#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
deleted file mode 100644
index 5d4805e67e76..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
2#define __NOUVEAU_SUBDEV_DEVICE_H__
3
4#include <core/device.h>
5
6struct platform_device;
7
8enum nv_bus_type {
9 NOUVEAU_BUS_PCI,
10 NOUVEAU_BUS_PLATFORM,
11};
12
13#define nouveau_device_create(p,t,n,s,c,d,u) \
14 nouveau_device_create_((void *)(p), (t), (n), (s), (c), (d), \
15 sizeof(**u), (void **)u)
16
17int nouveau_device_create_(void *, enum nv_bus_type type, u64 name,
18 const char *sname, const char *cfg, const char *dbg,
19 int, void **);
20
21int nv04_identify(struct nouveau_device *);
22int nv10_identify(struct nouveau_device *);
23int nv20_identify(struct nouveau_device *);
24int nv30_identify(struct nouveau_device *);
25int nv40_identify(struct nouveau_device *);
26int nv50_identify(struct nouveau_device *);
27int nvc0_identify(struct nouveau_device *);
28int nve0_identify(struct nouveau_device *);
29int gm100_identify(struct nouveau_device *);
30#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index a5e1ed81312f..efc74d03346b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -1,32 +1,35 @@
1#ifndef __NVKM_DISP_H__ 1#ifndef __NVKM_DISP_H__
2#define __NVKM_DISP_H__ 2#define __NVKM_DISP_H__
3#define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
3#include <core/engine.h> 4#include <core/engine.h>
4#include <core/event.h> 5#include <core/event.h>
5 6
6struct nvkm_disp { 7struct nvkm_disp {
7 struct nvkm_engine base; 8 const struct nvkm_disp_func *func;
9 struct nvkm_engine engine;
10
11 struct nvkm_oproxy *client;
8 12
9 struct list_head outp; 13 struct list_head outp;
14 struct list_head conn;
10 15
11 struct nvkm_event hpd; 16 struct nvkm_event hpd;
12 struct nvkm_event vblank; 17 struct nvkm_event vblank;
13};
14 18
15static inline struct nvkm_disp * 19 struct {
16nvkm_disp(void *obj) 20 int nr;
17{ 21 } head;
18 return (void *)nvkm_engine(obj, NVDEV_ENGINE_DISP); 22};
19}
20 23
21extern struct nvkm_oclass *nv04_disp_oclass; 24int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
22extern struct nvkm_oclass *nv50_disp_oclass; 25int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
23extern struct nvkm_oclass *g84_disp_oclass; 26int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
24extern struct nvkm_oclass *gt200_disp_oclass; 27int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
25extern struct nvkm_oclass *g94_disp_oclass; 28int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
26extern struct nvkm_oclass *gt215_disp_oclass; 29int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
27extern struct nvkm_oclass *gf110_disp_oclass; 30int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
28extern struct nvkm_oclass *gk104_disp_oclass; 31int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
29extern struct nvkm_oclass *gk110_disp_oclass; 32int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
30extern struct nvkm_oclass *gm107_disp_oclass; 33int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
31extern struct nvkm_oclass *gm204_disp_oclass; 34int gm204_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
32#endif 35#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
new file mode 100644
index 000000000000..114bfb737a81
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -0,0 +1,32 @@
1#ifndef __NVKM_DMA_H__
2#define __NVKM_DMA_H__
3#include <core/engine.h>
4struct nvkm_client;
5
6struct nvkm_dmaobj {
7 const struct nvkm_dmaobj_func *func;
8 struct nvkm_dma *dma;
9
10 struct nvkm_object object;
11 u32 target;
12 u32 access;
13 u64 start;
14 u64 limit;
15
16 struct rb_node rb;
17 u64 handle; /*XXX HANDLE MERGE */
18};
19
20struct nvkm_dma {
21 const struct nvkm_dma_func *func;
22 struct nvkm_engine engine;
23};
24
25struct nvkm_dmaobj *
26nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
27
28int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
29int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
30int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
31int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
32#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
deleted file mode 100644
index c4fce8afcf83..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef __NVKM_DMAOBJ_H__
2#define __NVKM_DMAOBJ_H__
3#include <core/engine.h>
4struct nvkm_gpuobj;
5
6struct nvkm_dmaobj {
7 struct nvkm_object base;
8 u32 target;
9 u32 access;
10 u64 start;
11 u64 limit;
12};
13
14struct nvkm_dmaeng {
15 struct nvkm_engine base;
16
17 /* creates a "physical" dma object from a struct nvkm_dmaobj */
18 int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
19 struct nvkm_gpuobj **);
20};
21
22extern struct nvkm_oclass *nv04_dmaeng_oclass;
23extern struct nvkm_oclass *nv50_dmaeng_oclass;
24extern struct nvkm_oclass *gf100_dmaeng_oclass;
25extern struct nvkm_oclass *gf110_dmaeng_oclass;
26#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index bd38cf9130fc..81c0bc66a9f8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,41 +1,18 @@
1#ifndef __NVKM_FALCON_H__ 1#ifndef __NVKM_FALCON_H__
2#define __NVKM_FALCON_H__ 2#define __NVKM_FALCON_H__
3#include <core/engctx.h> 3#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
4
5struct nvkm_falcon_chan {
6 struct nvkm_engctx base;
7};
8
9#define nvkm_falcon_context_create(p,e,c,g,s,a,f,d) \
10 nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
11#define nvkm_falcon_context_destroy(d) \
12 nvkm_engctx_destroy(&(d)->base)
13#define nvkm_falcon_context_init(d) \
14 nvkm_engctx_init(&(d)->base)
15#define nvkm_falcon_context_fini(d,s) \
16 nvkm_engctx_fini(&(d)->base, (s))
17
18#define _nvkm_falcon_context_ctor _nvkm_engctx_ctor
19#define _nvkm_falcon_context_dtor _nvkm_engctx_dtor
20#define _nvkm_falcon_context_init _nvkm_engctx_init
21#define _nvkm_falcon_context_fini _nvkm_engctx_fini
22#define _nvkm_falcon_context_rd32 _nvkm_engctx_rd32
23#define _nvkm_falcon_context_wr32 _nvkm_engctx_wr32
24
25struct nvkm_falcon_data {
26 bool external;
27};
28
29#include <core/engine.h> 4#include <core/engine.h>
5struct nvkm_fifo_chan;
30 6
31struct nvkm_falcon { 7struct nvkm_falcon {
32 struct nvkm_engine base; 8 const struct nvkm_falcon_func *func;
9 struct nvkm_engine engine;
33 10
34 u32 addr; 11 u32 addr;
35 u8 version; 12 u8 version;
36 u8 secret; 13 u8 secret;
37 14
38 struct nvkm_gpuobj *core; 15 struct nvkm_memory *core;
39 bool external; 16 bool external;
40 17
41 struct { 18 struct {
@@ -51,31 +28,21 @@ struct nvkm_falcon {
51 } data; 28 } data;
52}; 29};
53 30
54#define nv_falcon(priv) (&(priv)->base) 31int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
55 32 int index, bool enable, u32 addr, struct nvkm_engine **);
56#define nvkm_falcon_create(p,e,c,b,d,i,f,r) \
57 nvkm_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
58 sizeof(**r),(void **)r)
59#define nvkm_falcon_destroy(p) \
60 nvkm_engine_destroy(&(p)->base)
61#define nvkm_falcon_init(p) ({ \
62 struct nvkm_falcon *falcon = (p); \
63 _nvkm_falcon_init(nv_object(falcon)); \
64})
65#define nvkm_falcon_fini(p,s) ({ \
66 struct nvkm_falcon *falcon = (p); \
67 _nvkm_falcon_fini(nv_object(falcon), (s)); \
68})
69
70int nvkm_falcon_create_(struct nvkm_object *, struct nvkm_object *,
71 struct nvkm_oclass *, u32, bool, const char *,
72 const char *, int, void **);
73 33
74void nvkm_falcon_intr(struct nvkm_subdev *subdev); 34struct nvkm_falcon_func {
75 35 struct {
76#define _nvkm_falcon_dtor _nvkm_engine_dtor 36 u32 *data;
77int _nvkm_falcon_init(struct nvkm_object *); 37 u32 size;
78int _nvkm_falcon_fini(struct nvkm_object *, bool); 38 } code;
79u32 _nvkm_falcon_rd32(struct nvkm_object *, u64); 39 struct {
80void _nvkm_falcon_wr32(struct nvkm_object *, u64, u32); 40 u32 *data;
41 u32 size;
42 } data;
43 u32 pmc_enable;
44 void (*init)(struct nvkm_falcon *);
45 void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
46 struct nvkm_sclass sclass[];
47};
81#endif 48#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 97cdeab8e44c..9e6644955d19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,127 +1,67 @@
1#ifndef __NVKM_FIFO_H__ 1#ifndef __NVKM_FIFO_H__
2#define __NVKM_FIFO_H__ 2#define __NVKM_FIFO_H__
3#include <core/namedb.h> 3#include <core/engine.h>
4#include <core/event.h>
5
6#define NVKM_FIFO_CHID_NR 4096
7
8struct nvkm_fifo_engn {
9 struct nvkm_object *object;
10 int refcount;
11 int usecount;
12};
4 13
5struct nvkm_fifo_chan { 14struct nvkm_fifo_chan {
6 struct nvkm_namedb namedb; 15 const struct nvkm_fifo_chan_func *func;
7 struct nvkm_dmaobj *pushdma; 16 struct nvkm_fifo *fifo;
8 struct nvkm_gpuobj *pushgpu; 17 u64 engines;
18 struct nvkm_object object;
19
20 struct list_head head;
21 u16 chid;
22 struct nvkm_gpuobj *inst;
23 struct nvkm_gpuobj *push;
24 struct nvkm_vm *vm;
9 void __iomem *user; 25 void __iomem *user;
10 u64 addr; 26 u64 addr;
11 u32 size; 27 u32 size;
12 u16 chid;
13 atomic_t refcnt; /* NV04_NVSW_SET_REF */
14};
15
16static inline struct nvkm_fifo_chan *
17nvkm_fifo_chan(void *obj)
18{
19 return (void *)nv_namedb(obj);
20}
21
22#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
23 nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
24 (m), sizeof(**d), (void **)d)
25#define nvkm_fifo_channel_init(p) \
26 nvkm_namedb_init(&(p)->namedb)
27#define nvkm_fifo_channel_fini(p,s) \
28 nvkm_namedb_fini(&(p)->namedb, (s))
29
30int nvkm_fifo_channel_create_(struct nvkm_object *,
31 struct nvkm_object *,
32 struct nvkm_oclass *,
33 int bar, u32 addr, u32 size, u32 push,
34 u64 engmask, int len, void **);
35void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
36 28
37#define _nvkm_fifo_channel_init _nvkm_namedb_init 29 struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR];
38#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
39
40void _nvkm_fifo_channel_dtor(struct nvkm_object *);
41int _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
42u32 _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
43void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
44int _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
45
46#include <core/gpuobj.h>
47
48struct nvkm_fifo_base {
49 struct nvkm_gpuobj gpuobj;
50}; 30};
51 31
52#define nvkm_fifo_context_create(p,e,c,g,s,a,f,d) \
53 nvkm_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
54#define nvkm_fifo_context_destroy(p) \
55 nvkm_gpuobj_destroy(&(p)->gpuobj)
56#define nvkm_fifo_context_init(p) \
57 nvkm_gpuobj_init(&(p)->gpuobj)
58#define nvkm_fifo_context_fini(p,s) \
59 nvkm_gpuobj_fini(&(p)->gpuobj, (s))
60
61#define _nvkm_fifo_context_dtor _nvkm_gpuobj_dtor
62#define _nvkm_fifo_context_init _nvkm_gpuobj_init
63#define _nvkm_fifo_context_fini _nvkm_gpuobj_fini
64#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
65#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
66
67#include <core/engine.h>
68#include <core/event.h>
69
70struct nvkm_fifo { 32struct nvkm_fifo {
71 struct nvkm_engine base; 33 const struct nvkm_fifo_func *func;
34 struct nvkm_engine engine;
72 35
73 struct nvkm_event cevent; /* channel creation event */ 36 DECLARE_BITMAP(mask, NVKM_FIFO_CHID_NR);
74 struct nvkm_event uevent; /* async user trigger */ 37 int nr;
75 38 struct list_head chan;
76 struct nvkm_object **channel;
77 spinlock_t lock; 39 spinlock_t lock;
78 u16 min;
79 u16 max;
80 40
81 int (*chid)(struct nvkm_fifo *, struct nvkm_object *); 41 struct nvkm_event uevent; /* async user trigger */
82 void (*pause)(struct nvkm_fifo *, unsigned long *); 42 struct nvkm_event cevent; /* channel creation event */
83 void (*start)(struct nvkm_fifo *, unsigned long *);
84}; 43};
85 44
86static inline struct nvkm_fifo * 45void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
87nvkm_fifo(void *obj) 46void nvkm_fifo_start(struct nvkm_fifo *, unsigned long *);
88{ 47
89 return (void *)nvkm_engine(obj, NVDEV_ENGINE_FIFO); 48void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags,
90} 49 struct nvkm_fifo_chan **);
91 50struct nvkm_fifo_chan *
92#define nvkm_fifo_create(o,e,c,fc,lc,d) \ 51nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
93 nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d) 52struct nvkm_fifo_chan *
94#define nvkm_fifo_init(p) \ 53nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
95 nvkm_engine_init(&(p)->base) 54
96#define nvkm_fifo_fini(p,s) \ 55int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
97 nvkm_engine_fini(&(p)->base, (s)) 56int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
98 57int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
99int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *, 58int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
100 struct nvkm_oclass *, int min, int max, 59int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
101 int size, void **); 60int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
102void nvkm_fifo_destroy(struct nvkm_fifo *); 61int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
103const char * 62int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
104nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid); 63int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
105 64int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
106#define _nvkm_fifo_init _nvkm_engine_init 65int gm204_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
107#define _nvkm_fifo_fini _nvkm_engine_fini 66int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
108
109extern struct nvkm_oclass *nv04_fifo_oclass;
110extern struct nvkm_oclass *nv10_fifo_oclass;
111extern struct nvkm_oclass *nv17_fifo_oclass;
112extern struct nvkm_oclass *nv40_fifo_oclass;
113extern struct nvkm_oclass *nv50_fifo_oclass;
114extern struct nvkm_oclass *g84_fifo_oclass;
115extern struct nvkm_oclass *gf100_fifo_oclass;
116extern struct nvkm_oclass *gk104_fifo_oclass;
117extern struct nvkm_oclass *gk20a_fifo_oclass;
118extern struct nvkm_oclass *gk208_fifo_oclass;
119extern struct nvkm_oclass *gm204_fifo_oclass;
120
121int nvkm_fifo_uevent_ctor(struct nvkm_object *, void *, u32,
122 struct nvkm_notify *);
123void nvkm_fifo_uevent(struct nvkm_fifo *);
124
125void nv04_fifo_intr(struct nvkm_subdev *);
126int nv04_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
127#endif 67#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 7cbe20280760..f126e54d2e30 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -1,88 +1,46 @@
1#ifndef __NVKM_GR_H__ 1#ifndef __NVKM_GR_H__
2#define __NVKM_GR_H__ 2#define __NVKM_GR_H__
3#include <core/engctx.h>
4
5struct nvkm_gr_chan {
6 struct nvkm_engctx base;
7};
8
9#define nvkm_gr_context_create(p,e,c,g,s,a,f,d) \
10 nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
11#define nvkm_gr_context_destroy(d) \
12 nvkm_engctx_destroy(&(d)->base)
13#define nvkm_gr_context_init(d) \
14 nvkm_engctx_init(&(d)->base)
15#define nvkm_gr_context_fini(d,s) \
16 nvkm_engctx_fini(&(d)->base, (s))
17
18#define _nvkm_gr_context_dtor _nvkm_engctx_dtor
19#define _nvkm_gr_context_init _nvkm_engctx_init
20#define _nvkm_gr_context_fini _nvkm_engctx_fini
21#define _nvkm_gr_context_rd32 _nvkm_engctx_rd32
22#define _nvkm_gr_context_wr32 _nvkm_engctx_wr32
23
24#include <core/engine.h> 3#include <core/engine.h>
25 4
26struct nvkm_gr { 5struct nvkm_gr {
27 struct nvkm_engine base; 6 const struct nvkm_gr_func *func;
28 7 struct nvkm_engine engine;
29 /* Returns chipset-specific counts of units packed into an u64.
30 */
31 u64 (*units)(struct nvkm_gr *);
32}; 8};
33 9
34static inline struct nvkm_gr * 10u64 nvkm_gr_units(struct nvkm_gr *);
35nvkm_gr(void *obj) 11int nvkm_gr_tlb_flush(struct nvkm_gr *);
36{ 12
37 return (void *)nvkm_engine(obj, NVDEV_ENGINE_GR); 13int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
38} 14int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
39 15int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
40#define nvkm_gr_create(p,e,c,y,d) \ 16int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
41 nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d)) 17int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
42#define nvkm_gr_destroy(d) \ 18int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
43 nvkm_engine_destroy(&(d)->base) 19int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
44#define nvkm_gr_init(d) \ 20int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
45 nvkm_engine_init(&(d)->base) 21int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
46#define nvkm_gr_fini(d,s) \ 22int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
47 nvkm_engine_fini(&(d)->base, (s)) 23int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
48 24int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
49#define _nvkm_gr_dtor _nvkm_engine_dtor 25int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
50#define _nvkm_gr_init _nvkm_engine_init 26int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
51#define _nvkm_gr_fini _nvkm_engine_fini 27int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
52 28int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
53extern struct nvkm_oclass nv04_gr_oclass; 29int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
54extern struct nvkm_oclass nv10_gr_oclass; 30int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
55extern struct nvkm_oclass nv20_gr_oclass; 31int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
56extern struct nvkm_oclass nv25_gr_oclass; 32int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
57extern struct nvkm_oclass nv2a_gr_oclass; 33int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
58extern struct nvkm_oclass nv30_gr_oclass; 34int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
59extern struct nvkm_oclass nv34_gr_oclass; 35int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
60extern struct nvkm_oclass nv35_gr_oclass; 36int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
61extern struct nvkm_oclass nv40_gr_oclass; 37int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
62extern struct nvkm_oclass nv50_gr_oclass; 38int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
63extern struct nvkm_oclass *gf100_gr_oclass; 39int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
64extern struct nvkm_oclass *gf108_gr_oclass; 40int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
65extern struct nvkm_oclass *gf104_gr_oclass; 41int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
66extern struct nvkm_oclass *gf110_gr_oclass; 42int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
67extern struct nvkm_oclass *gf117_gr_oclass; 43int gm204_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
68extern struct nvkm_oclass *gf119_gr_oclass; 44int gm206_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
69extern struct nvkm_oclass *gk104_gr_oclass; 45int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
70extern struct nvkm_oclass *gk20a_gr_oclass;
71extern struct nvkm_oclass *gk110_gr_oclass;
72extern struct nvkm_oclass *gk110b_gr_oclass;
73extern struct nvkm_oclass *gk208_gr_oclass;
74extern struct nvkm_oclass *gm107_gr_oclass;
75extern struct nvkm_oclass *gm204_gr_oclass;
76extern struct nvkm_oclass *gm206_gr_oclass;
77
78#include <core/enum.h>
79
80extern const struct nvkm_bitfield nv04_gr_nsource[];
81extern struct nvkm_ofuncs nv04_gr_ofuncs;
82bool nv04_gr_idle(void *obj);
83
84extern const struct nvkm_bitfield nv10_gr_intr_name[];
85extern const struct nvkm_bitfield nv10_gr_nstatus[];
86
87extern const struct nvkm_enum nv50_data_error_names[];
88#endif 46#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 4e500b398064..257738eff9f6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -1,62 +1,9 @@
1#ifndef __NVKM_MPEG_H__ 1#ifndef __NVKM_MPEG_H__
2#define __NVKM_MPEG_H__ 2#define __NVKM_MPEG_H__
3#include <core/engctx.h>
4
5struct nvkm_mpeg_chan {
6 struct nvkm_engctx base;
7};
8
9#define nvkm_mpeg_context_create(p,e,c,g,s,a,f,d) \
10 nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
11#define nvkm_mpeg_context_destroy(d) \
12 nvkm_engctx_destroy(&(d)->base)
13#define nvkm_mpeg_context_init(d) \
14 nvkm_engctx_init(&(d)->base)
15#define nvkm_mpeg_context_fini(d,s) \
16 nvkm_engctx_fini(&(d)->base, (s))
17
18#define _nvkm_mpeg_context_dtor _nvkm_engctx_dtor
19#define _nvkm_mpeg_context_init _nvkm_engctx_init
20#define _nvkm_mpeg_context_fini _nvkm_engctx_fini
21#define _nvkm_mpeg_context_rd32 _nvkm_engctx_rd32
22#define _nvkm_mpeg_context_wr32 _nvkm_engctx_wr32
23
24#include <core/engine.h> 3#include <core/engine.h>
25 4int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
26struct nvkm_mpeg { 5int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
27 struct nvkm_engine base; 6int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
28}; 7int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
29 8int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
30#define nvkm_mpeg_create(p,e,c,d) \
31 nvkm_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
32#define nvkm_mpeg_destroy(d) \
33 nvkm_engine_destroy(&(d)->base)
34#define nvkm_mpeg_init(d) \
35 nvkm_engine_init(&(d)->base)
36#define nvkm_mpeg_fini(d,s) \
37 nvkm_engine_fini(&(d)->base, (s))
38
39#define _nvkm_mpeg_dtor _nvkm_engine_dtor
40#define _nvkm_mpeg_init _nvkm_engine_init
41#define _nvkm_mpeg_fini _nvkm_engine_fini
42
43extern struct nvkm_oclass nv31_mpeg_oclass;
44extern struct nvkm_oclass nv40_mpeg_oclass;
45extern struct nvkm_oclass nv44_mpeg_oclass;
46extern struct nvkm_oclass nv50_mpeg_oclass;
47extern struct nvkm_oclass g84_mpeg_oclass;
48extern struct nvkm_ofuncs nv31_mpeg_ofuncs;
49extern struct nvkm_oclass nv31_mpeg_cclass;
50extern struct nvkm_oclass nv31_mpeg_sclass[];
51extern struct nvkm_oclass nv40_mpeg_sclass[];
52void nv31_mpeg_intr(struct nvkm_subdev *);
53void nv31_mpeg_tile_prog(struct nvkm_engine *, int);
54int nv31_mpeg_init(struct nvkm_object *);
55
56extern struct nvkm_ofuncs nv50_mpeg_ofuncs;
57int nv50_mpeg_context_ctor(struct nvkm_object *, struct nvkm_object *,
58 struct nvkm_oclass *, void *, u32,
59 struct nvkm_object **);
60void nv50_mpeg_intr(struct nvkm_subdev *);
61int nv50_mpeg_init(struct nvkm_object *);
62#endif 9#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 54b7672eed9c..08516ca82e04 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -1,7 +1,8 @@
1#ifndef __NVKM_MSPDEC_H__ 1#ifndef __NVKM_MSPDEC_H__
2#define __NVKM_MSPDEC_H__ 2#define __NVKM_MSPDEC_H__
3#include <core/engine.h> 3#include <engine/falcon.h>
4extern struct nvkm_oclass g98_mspdec_oclass; 4int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
5extern struct nvkm_oclass gf100_mspdec_oclass; 5int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
6extern struct nvkm_oclass gk104_mspdec_oclass; 6int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
7int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
7#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index c6c69d0a8d01..85fd306021ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -1,6 +1,7 @@
1#ifndef __NVKM_MSPPP_H__ 1#ifndef __NVKM_MSPPP_H__
2#define __NVKM_MSPPP_H__ 2#define __NVKM_MSPPP_H__
3#include <core/engine.h> 3#include <engine/falcon.h>
4extern struct nvkm_oclass g98_msppp_oclass; 4int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
5extern struct nvkm_oclass gf100_msppp_oclass; 5int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
6int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
6#endif 7#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 1f193b7bd6c5..99757ed96f76 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -1,7 +1,9 @@
1#ifndef __NVKM_MSVLD_H__ 1#ifndef __NVKM_MSVLD_H__
2#define __NVKM_MSVLD_H__ 2#define __NVKM_MSVLD_H__
3#include <core/engine.h> 3#include <engine/falcon.h>
4extern struct nvkm_oclass g98_msvld_oclass; 4int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
5extern struct nvkm_oclass gf100_msvld_oclass; 5int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
6extern struct nvkm_oclass gk104_msvld_oclass; 6int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
7int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
8int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
7#endif 9#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 93181bbf0f63..240855ad8c8d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -2,33 +2,24 @@
2#define __NVKM_PM_H__ 2#define __NVKM_PM_H__
3#include <core/engine.h> 3#include <core/engine.h>
4 4
5struct nvkm_perfdom;
6struct nvkm_perfctr;
7struct nvkm_pm { 5struct nvkm_pm {
8 struct nvkm_engine base; 6 const struct nvkm_pm_func *func;
7 struct nvkm_engine engine;
9 8
10 struct nvkm_perfctx *context; 9 struct nvkm_object *perfmon;
11 void *profile_data;
12 10
13 struct list_head domains; 11 struct list_head domains;
12 struct list_head sources;
14 u32 sequence; 13 u32 sequence;
15
16 /*XXX: temp for daemon backend */
17 u32 pwr[8];
18 u32 last;
19}; 14};
20 15
21static inline struct nvkm_pm * 16int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
22nvkm_pm(void *obj) 17int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
23{ 18int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
24 return (void *)nvkm_engine(obj, NVDEV_ENGINE_PM); 19int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
25} 20int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
26 21int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
27extern struct nvkm_oclass *nv40_pm_oclass; 22int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
28extern struct nvkm_oclass *nv50_pm_oclass; 23int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
29extern struct nvkm_oclass *g84_pm_oclass; 24int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
30extern struct nvkm_oclass *gt215_pm_oclass;
31extern struct nvkm_oclass gf100_pm_oclass;
32extern struct nvkm_oclass gk104_pm_oclass;
33extern struct nvkm_oclass gk110_pm_oclass;
34#endif 25#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index 44590a2a479d..7317ef4c0207 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -1,5 +1,5 @@
1#ifndef __NVKM_SEC_H__ 1#ifndef __NVKM_SEC_H__
2#define __NVKM_SEC_H__ 2#define __NVKM_SEC_H__
3#include <core/engine.h> 3#include <engine/falcon.h>
4extern struct nvkm_oclass g98_sec_oclass; 4int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **);
5#endif 5#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index a529013c92ab..096e7dbd1e65 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -1,50 +1,18 @@
1#ifndef __NVKM_SW_H__ 1#ifndef __NVKM_SW_H__
2#define __NVKM_SW_H__ 2#define __NVKM_SW_H__
3#include <core/engctx.h>
4
5struct nvkm_sw_chan {
6 struct nvkm_engctx base;
7
8 int (*flip)(void *);
9 void *flip_data;
10};
11
12#define nvkm_sw_context_create(p,e,c,d) \
13 nvkm_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
14#define nvkm_sw_context_destroy(d) \
15 nvkm_engctx_destroy(&(d)->base)
16#define nvkm_sw_context_init(d) \
17 nvkm_engctx_init(&(d)->base)
18#define nvkm_sw_context_fini(d,s) \
19 nvkm_engctx_fini(&(d)->base, (s))
20
21#define _nvkm_sw_context_dtor _nvkm_engctx_dtor
22#define _nvkm_sw_context_init _nvkm_engctx_init
23#define _nvkm_sw_context_fini _nvkm_engctx_fini
24
25#include <core/engine.h> 3#include <core/engine.h>
26 4
27struct nvkm_sw { 5struct nvkm_sw {
28 struct nvkm_engine base; 6 const struct nvkm_sw_func *func;
29}; 7 struct nvkm_engine engine;
30 8
31#define nvkm_sw_create(p,e,c,d) \ 9 struct list_head chan;
32 nvkm_engine_create((p), (e), (c), true, "SW", "software", (d)) 10};
33#define nvkm_sw_destroy(d) \
34 nvkm_engine_destroy(&(d)->base)
35#define nvkm_sw_init(d) \
36 nvkm_engine_init(&(d)->base)
37#define nvkm_sw_fini(d,s) \
38 nvkm_engine_fini(&(d)->base, (s))
39
40#define _nvkm_sw_dtor _nvkm_engine_dtor
41#define _nvkm_sw_init _nvkm_engine_init
42#define _nvkm_sw_fini _nvkm_engine_fini
43 11
44extern struct nvkm_oclass *nv04_sw_oclass; 12bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data);
45extern struct nvkm_oclass *nv10_sw_oclass;
46extern struct nvkm_oclass *nv50_sw_oclass;
47extern struct nvkm_oclass *gf100_sw_oclass;
48 13
49void nv04_sw_intr(struct nvkm_subdev *); 14int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
15int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
16int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
17int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
50#endif 18#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 7851f18c5add..616ea91e03f8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -1,5 +1,5 @@
1#ifndef __NVKM_VP_H__ 1#ifndef __NVKM_VP_H__
2#define __NVKM_VP_H__ 2#define __NVKM_VP_H__
3#include <core/engine.h> 3#include <engine/xtensa.h>
4extern struct nvkm_oclass g84_vp_oclass; 4int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **);
5#endif 5#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index 7a216cca2865..3128d21a5d1a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -1,35 +1,23 @@
1#ifndef __NVKM_XTENSA_H__ 1#ifndef __NVKM_XTENSA_H__
2#define __NVKM_XTENSA_H__ 2#define __NVKM_XTENSA_H__
3#define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
3#include <core/engine.h> 4#include <core/engine.h>
4struct nvkm_gpuobj;
5 5
6struct nvkm_xtensa { 6struct nvkm_xtensa {
7 struct nvkm_engine base; 7 const struct nvkm_xtensa_func *func;
8
9 u32 addr; 8 u32 addr;
10 struct nvkm_gpuobj *gpu_fw; 9 struct nvkm_engine engine;
11 u32 fifo_val;
12 u32 unkd28;
13};
14 10
15#define nvkm_xtensa_create(p,e,c,b,d,i,f,r) \ 11 struct nvkm_memory *gpu_fw;
16 nvkm_xtensa_create_((p), (e), (c), (b), (d), (i), (f), \ 12};
17 sizeof(**r),(void **)r)
18 13
19int _nvkm_xtensa_engctx_ctor(struct nvkm_object *, 14int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
20 struct nvkm_object *, 15 int index, bool enable, u32 addr, struct nvkm_engine **);
21 struct nvkm_oclass *, void *, u32,
22 struct nvkm_object **);
23 16
24void _nvkm_xtensa_intr(struct nvkm_subdev *); 17struct nvkm_xtensa_func {
25int nvkm_xtensa_create_(struct nvkm_object *, 18 u32 pmc_enable;
26 struct nvkm_object *, 19 u32 fifo_val;
27 struct nvkm_oclass *, u32, bool, 20 u32 unkd28;
28 const char *, const char *, 21 struct nvkm_sclass sclass[];
29 int, void **); 22};
30#define _nvkm_xtensa_dtor _nvkm_engine_dtor
31int _nvkm_xtensa_init(struct nvkm_object *);
32int _nvkm_xtensa_fini(struct nvkm_object *, bool);
33u32 _nvkm_xtensa_rd32(struct nvkm_object *, u64);
34void _nvkm_xtensa_wr32(struct nvkm_object *, u64, u32);
35#endif 23#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index c7a007b8bc10..d3071b5a4f98 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -1,33 +1,24 @@
1#ifndef __NVKM_BAR_H__ 1#ifndef __NVKM_BAR_H__
2#define __NVKM_BAR_H__ 2#define __NVKM_BAR_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4struct nvkm_mem;
5struct nvkm_vma; 4struct nvkm_vma;
6 5
7struct nvkm_bar { 6struct nvkm_bar {
8 struct nvkm_subdev base; 7 const struct nvkm_bar_func *func;
8 struct nvkm_subdev subdev;
9 9
10 int (*alloc)(struct nvkm_bar *, struct nvkm_object *, 10 spinlock_t lock;
11 struct nvkm_mem *, struct nvkm_object **);
12
13 int (*kmap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
14 struct nvkm_vma *);
15 int (*umap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
16 struct nvkm_vma *);
17 void (*unmap)(struct nvkm_bar *, struct nvkm_vma *);
18 void (*flush)(struct nvkm_bar *);
19 11
20 /* whether the BAR supports to be ioremapped WC or should be uncached */ 12 /* whether the BAR supports to be ioremapped WC or should be uncached */
21 bool iomap_uncached; 13 bool iomap_uncached;
22}; 14};
23 15
24static inline struct nvkm_bar * 16void nvkm_bar_flush(struct nvkm_bar *);
25nvkm_bar(void *obj) 17struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
26{ 18int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
27 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BAR);
28}
29 19
30extern struct nvkm_oclass nv50_bar_oclass; 20int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
31extern struct nvkm_oclass gf100_bar_oclass; 21int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
32extern struct nvkm_oclass gk20a_bar_oclass; 22int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
23int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
33#endif 24#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index cef287e0bbf2..e39a1fea930b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -3,7 +3,7 @@
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5struct nvkm_bios { 5struct nvkm_bios {
6 struct nvkm_subdev base; 6 struct nvkm_subdev subdev;
7 u32 size; 7 u32 size;
8 u8 *data; 8 u8 *data;
9 9
@@ -19,14 +19,13 @@ struct nvkm_bios {
19 } version; 19 } version;
20}; 20};
21 21
22static inline struct nvkm_bios *
23nvkm_bios(void *obj)
24{
25 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VBIOS);
26}
27
28u8 nvbios_checksum(const u8 *data, int size); 22u8 nvbios_checksum(const u8 *data, int size);
29u16 nvbios_findstr(const u8 *data, int size, const char *str, int len); 23u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
24int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
25
26#define nvbios_rd08(b,o) (b)->data[(o)]
27#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
28#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
30 29
31extern struct nvkm_oclass nvkm_bios_oclass; 30int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
32#endif 31#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 4107aa546a21..3f0c7c414026 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -4,8 +4,8 @@ static inline u16
4bmp_version(struct nvkm_bios *bios) 4bmp_version(struct nvkm_bios *bios)
5{ 5{
6 if (bios->bmp_offset) { 6 if (bios->bmp_offset) {
7 return nv_ro08(bios, bios->bmp_offset + 5) << 8 | 7 return nvbios_rd08(bios, bios->bmp_offset + 5) << 8 |
8 nv_ro08(bios, bios->bmp_offset + 6); 8 nvbios_rd08(bios, bios->bmp_offset + 6);
9 } 9 }
10 10
11 return 0x0000; 11 return 0x0000;
@@ -15,7 +15,7 @@ static inline u16
15bmp_mem_init_table(struct nvkm_bios *bios) 15bmp_mem_init_table(struct nvkm_bios *bios)
16{ 16{
17 if (bmp_version(bios) >= 0x0300) 17 if (bmp_version(bios) >= 0x0300)
18 return nv_ro16(bios, bios->bmp_offset + 24); 18 return nvbios_rd16(bios, bios->bmp_offset + 24);
19 return 0x0000; 19 return 0x0000;
20} 20}
21 21
@@ -23,7 +23,7 @@ static inline u16
23bmp_sdr_seq_table(struct nvkm_bios *bios) 23bmp_sdr_seq_table(struct nvkm_bios *bios)
24{ 24{
25 if (bmp_version(bios) >= 0x0300) 25 if (bmp_version(bios) >= 0x0300)
26 return nv_ro16(bios, bios->bmp_offset + 26); 26 return nvbios_rd16(bios, bios->bmp_offset + 26);
27 return 0x0000; 27 return 0x0000;
28} 28}
29 29
@@ -31,7 +31,7 @@ static inline u16
31bmp_ddr_seq_table(struct nvkm_bios *bios) 31bmp_ddr_seq_table(struct nvkm_bios *bios)
32{ 32{
33 if (bmp_version(bios) >= 0x0300) 33 if (bmp_version(bios) >= 0x0300)
34 return nv_ro16(bios, bios->bmp_offset + 28); 34 return nvbios_rd16(bios, bios->bmp_offset + 28);
35 return 0x0000; 35 return 0x0000;
36} 36}
37#endif 37#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index 578a667eed3b..4dc1c8af840c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,5 +1,6 @@
1#ifndef __NVBIOS_INIT_H__ 1#ifndef __NVBIOS_INIT_H__
2#define __NVBIOS_INIT_H__ 2#define __NVBIOS_INIT_H__
3
3struct nvbios_init { 4struct nvbios_init {
4 struct nvkm_subdev *subdev; 5 struct nvkm_subdev *subdev;
5 struct nvkm_bios *bios; 6 struct nvkm_bios *bios;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index 420426793880..3a9abd38aca8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -7,6 +7,11 @@ struct nvbios_ramcfg {
7 unsigned rammap_max; 7 unsigned rammap_max;
8 union { 8 union {
9 struct { 9 struct {
10 unsigned rammap_00_16_20:1;
11 unsigned rammap_00_16_40:1;
12 unsigned rammap_00_17_02:1;
13 };
14 struct {
10 unsigned rammap_10_04_02:1; 15 unsigned rammap_10_04_02:1;
11 unsigned rammap_10_04_08:1; 16 unsigned rammap_10_04_08:1;
12 }; 17 };
@@ -32,15 +37,32 @@ struct nvbios_ramcfg {
32 unsigned ramcfg_ver; 37 unsigned ramcfg_ver;
33 unsigned ramcfg_hdr; 38 unsigned ramcfg_hdr;
34 unsigned ramcfg_timing; 39 unsigned ramcfg_timing;
40 unsigned ramcfg_DLLoff;
41 unsigned ramcfg_RON;
35 union { 42 union {
36 struct { 43 struct {
44 unsigned ramcfg_00_03_01:1;
45 unsigned ramcfg_00_03_02:1;
46 unsigned ramcfg_00_03_08:1;
47 unsigned ramcfg_00_03_10:1;
48 unsigned ramcfg_00_04_02:1;
49 unsigned ramcfg_00_04_04:1;
50 unsigned ramcfg_00_04_20:1;
51 unsigned ramcfg_00_05:8;
52 unsigned ramcfg_00_06:8;
53 unsigned ramcfg_00_07:8;
54 unsigned ramcfg_00_08:8;
55 unsigned ramcfg_00_09:8;
56 unsigned ramcfg_00_0a_0f:4;
57 unsigned ramcfg_00_0a_f0:4;
58 };
59 struct {
37 unsigned ramcfg_10_02_01:1; 60 unsigned ramcfg_10_02_01:1;
38 unsigned ramcfg_10_02_02:1; 61 unsigned ramcfg_10_02_02:1;
39 unsigned ramcfg_10_02_04:1; 62 unsigned ramcfg_10_02_04:1;
40 unsigned ramcfg_10_02_08:1; 63 unsigned ramcfg_10_02_08:1;
41 unsigned ramcfg_10_02_10:1; 64 unsigned ramcfg_10_02_10:1;
42 unsigned ramcfg_10_02_20:1; 65 unsigned ramcfg_10_02_20:1;
43 unsigned ramcfg_10_DLLoff:1;
44 unsigned ramcfg_10_03_0f:4; 66 unsigned ramcfg_10_03_0f:4;
45 unsigned ramcfg_10_04_01:1; 67 unsigned ramcfg_10_04_01:1;
46 unsigned ramcfg_10_05:8; 68 unsigned ramcfg_10_05:8;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
index 609a905ec780..8d8ee13721ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -7,6 +7,8 @@ u32 nvbios_rammapTe(struct nvkm_bios *, u8 *ver, u8 *hdr,
7 7
8u32 nvbios_rammapEe(struct nvkm_bios *, int idx, 8u32 nvbios_rammapEe(struct nvkm_bios *, int idx,
9 u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 9 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
10u32 nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
11 struct nvbios_ramcfg *p);
10u32 nvbios_rammapEp(struct nvkm_bios *, int idx, 12u32 nvbios_rammapEp(struct nvkm_bios *, int idx,
11 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *); 13 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
12u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz, 14u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
@@ -15,6 +17,8 @@ u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
15u32 nvbios_rammapSe(struct nvkm_bios *, u32 data, 17u32 nvbios_rammapSe(struct nvkm_bios *, u32 data,
16 u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, 18 u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
17 u8 *ver, u8 *hdr); 19 u8 *ver, u8 *hdr);
20u32 nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
21 struct nvbios_ramcfg *p);
18u32 nvbios_rammapSp(struct nvkm_bios *, u32 data, 22u32 nvbios_rammapSp(struct nvkm_bios *, u32 data,
19 u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, 23 u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
20 u8 *ver, u8 *hdr, struct nvbios_ramcfg *); 24 u8 *ver, u8 *hdr, struct nvbios_ramcfg *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index fba83c04849e..6a04d9c07944 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -2,49 +2,23 @@
2#define __NVKM_BUS_H__ 2#define __NVKM_BUS_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5struct nvkm_bus_intr {
6 u32 stat;
7 u32 unit;
8};
9
10struct nvkm_bus { 5struct nvkm_bus {
11 struct nvkm_subdev base; 6 const struct nvkm_bus_func *func;
12 int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32); 7 struct nvkm_subdev subdev;
13 u32 hwsq_size;
14}; 8};
15 9
16static inline struct nvkm_bus *
17nvkm_bus(void *obj)
18{
19 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BUS);
20}
21
22#define nvkm_bus_create(p, e, o, d) \
23 nvkm_subdev_create_((p), (e), (o), 0, "PBUS", "master", \
24 sizeof(**d), (void **)d)
25#define nvkm_bus_destroy(p) \
26 nvkm_subdev_destroy(&(p)->base)
27#define nvkm_bus_init(p) \
28 nvkm_subdev_init(&(p)->base)
29#define nvkm_bus_fini(p, s) \
30 nvkm_subdev_fini(&(p)->base, (s))
31
32#define _nvkm_bus_dtor _nvkm_subdev_dtor
33#define _nvkm_bus_init _nvkm_subdev_init
34#define _nvkm_bus_fini _nvkm_subdev_fini
35
36extern struct nvkm_oclass *nv04_bus_oclass;
37extern struct nvkm_oclass *nv31_bus_oclass;
38extern struct nvkm_oclass *nv50_bus_oclass;
39extern struct nvkm_oclass *g94_bus_oclass;
40extern struct nvkm_oclass *gf100_bus_oclass;
41
42/* interface to sequencer */ 10/* interface to sequencer */
43struct nvkm_hwsq; 11struct nvkm_hwsq;
44int nvkm_hwsq_init(struct nvkm_bus *, struct nvkm_hwsq **); 12int nvkm_hwsq_init(struct nvkm_subdev *, struct nvkm_hwsq **);
45int nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec); 13int nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec);
46void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data); 14void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data);
47void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data); 15void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data);
48void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data); 16void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
49void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec); 17void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
18
19int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
20int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
21int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
22int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
23int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
50#endif 24#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index f5d303850d8c..8708f0a4e188 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -71,9 +71,10 @@ struct nvkm_domain {
71}; 71};
72 72
73struct nvkm_clk { 73struct nvkm_clk {
74 struct nvkm_subdev base; 74 const struct nvkm_clk_func *func;
75 struct nvkm_subdev subdev;
75 76
76 struct nvkm_domain *domains; 77 const struct nvkm_domain *domains;
77 struct nvkm_pstate bstate; 78 struct nvkm_pstate bstate;
78 79
79 struct list_head states; 80 struct list_head states;
@@ -94,68 +95,27 @@ struct nvkm_clk {
94 95
95 bool allow_reclock; 96 bool allow_reclock;
96 97
97 int (*read)(struct nvkm_clk *, enum nv_clk_src);
98 int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
99 int (*prog)(struct nvkm_clk *);
100 void (*tidy)(struct nvkm_clk *);
101
102 /*XXX: die, these are here *only* to support the completely 98 /*XXX: die, these are here *only* to support the completely
103 * bat-shit insane what-was-nvkm_hw.c code 99 * bat-shit insane what-was-nouveau_hw.c code
104 */ 100 */
105 int (*pll_calc)(struct nvkm_clk *, struct nvbios_pll *, int clk, 101 int (*pll_calc)(struct nvkm_clk *, struct nvbios_pll *, int clk,
106 struct nvkm_pll_vals *pv); 102 struct nvkm_pll_vals *pv);
107 int (*pll_prog)(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *pv); 103 int (*pll_prog)(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *pv);
108}; 104};
109 105
110static inline struct nvkm_clk * 106int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
111nvkm_clk(void *obj)
112{
113 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_CLK);
114}
115
116#define nvkm_clk_create(p,e,o,i,r,s,n,d) \
117 nvkm_clk_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \
118 (void **)d)
119#define nvkm_clk_destroy(p) ({ \
120 struct nvkm_clk *clk = (p); \
121 _nvkm_clk_dtor(nv_object(clk)); \
122})
123#define nvkm_clk_init(p) ({ \
124 struct nvkm_clk *clk = (p); \
125 _nvkm_clk_init(nv_object(clk)); \
126})
127#define nvkm_clk_fini(p,s) ({ \
128 struct nvkm_clk *clk = (p); \
129 _nvkm_clk_fini(nv_object(clk), (s)); \
130})
131
132int nvkm_clk_create_(struct nvkm_object *, struct nvkm_object *,
133 struct nvkm_oclass *,
134 struct nvkm_domain *, struct nvkm_pstate *,
135 int, bool, int, void **);
136void _nvkm_clk_dtor(struct nvkm_object *);
137int _nvkm_clk_init(struct nvkm_object *);
138int _nvkm_clk_fini(struct nvkm_object *, bool);
139
140extern struct nvkm_oclass nv04_clk_oclass;
141extern struct nvkm_oclass nv40_clk_oclass;
142extern struct nvkm_oclass *nv50_clk_oclass;
143extern struct nvkm_oclass *g84_clk_oclass;
144extern struct nvkm_oclass *mcp77_clk_oclass;
145extern struct nvkm_oclass gt215_clk_oclass;
146extern struct nvkm_oclass gf100_clk_oclass;
147extern struct nvkm_oclass gk104_clk_oclass;
148extern struct nvkm_oclass gk20a_clk_oclass;
149
150int nv04_clk_pll_set(struct nvkm_clk *, u32 type, u32 freq);
151int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
152 struct nvkm_pll_vals *);
153int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
154int gt215_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *,
155 int clk, struct nvkm_pll_vals *);
156
157int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr); 107int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
158int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait); 108int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
159int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel); 109int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
160int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel); 110int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
111
112int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
113int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
114int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
115int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
116int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
117int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
118int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
119int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
120int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
161#endif 121#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index d1bbe0d62b35..6c1407fd317b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -1,32 +1,31 @@
1#ifndef __NVKM_DEVINIT_H__ 1#ifndef __NVKM_DEVINIT_H__
2#define __NVKM_DEVINIT_H__ 2#define __NVKM_DEVINIT_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4struct nvkm_devinit;
4 5
5struct nvkm_devinit { 6struct nvkm_devinit {
6 struct nvkm_subdev base; 7 const struct nvkm_devinit_func *func;
8 struct nvkm_subdev subdev;
7 bool post; 9 bool post;
8 void (*meminit)(struct nvkm_devinit *);
9 int (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
10 u32 (*mmio)(struct nvkm_devinit *, u32 addr);
11}; 10};
12 11
13static inline struct nvkm_devinit * 12u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
14nvkm_devinit(void *obj) 13int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz);
15{ 14void nvkm_devinit_meminit(struct nvkm_devinit *);
16 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_DEVINIT); 15u64 nvkm_devinit_disable(struct nvkm_devinit *);
17} 16int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable);
18 17
19extern struct nvkm_oclass *nv04_devinit_oclass; 18int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
20extern struct nvkm_oclass *nv05_devinit_oclass; 19int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
21extern struct nvkm_oclass *nv10_devinit_oclass; 20int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
22extern struct nvkm_oclass *nv1a_devinit_oclass; 21int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
23extern struct nvkm_oclass *nv20_devinit_oclass; 22int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
24extern struct nvkm_oclass *nv50_devinit_oclass; 23int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
25extern struct nvkm_oclass *g84_devinit_oclass; 24int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
26extern struct nvkm_oclass *g98_devinit_oclass; 25int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
27extern struct nvkm_oclass *gt215_devinit_oclass; 26int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
28extern struct nvkm_oclass *mcp89_devinit_oclass; 27int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
29extern struct nvkm_oclass *gf100_devinit_oclass; 28int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
30extern struct nvkm_oclass *gm107_devinit_oclass; 29int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
31extern struct nvkm_oclass *gm204_devinit_oclass; 30int gm204_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
32#endif 31#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 16da56cf43b0..85ab72c7f821 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -18,7 +18,7 @@
18#define NV_MEM_TARGET_VM 3 18#define NV_MEM_TARGET_VM 3
19#define NV_MEM_TARGET_GART 4 19#define NV_MEM_TARGET_GART 4
20 20
21#define NV_MEM_TYPE_VM 0x7f 21#define NVKM_RAM_TYPE_VM 0x7f
22#define NV_MEM_COMP_VM 0x03 22#define NV_MEM_COMP_VM 0x03
23 23
24struct nvkm_mem { 24struct nvkm_mem {
@@ -46,62 +46,47 @@ struct nvkm_fb_tile {
46}; 46};
47 47
48struct nvkm_fb { 48struct nvkm_fb {
49 struct nvkm_subdev base; 49 const struct nvkm_fb_func *func;
50 50 struct nvkm_subdev subdev;
51 bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
52 51
53 struct nvkm_ram *ram; 52 struct nvkm_ram *ram;
54 53
55 struct nvkm_mm vram;
56 struct nvkm_mm tags;
57
58 struct { 54 struct {
59 struct nvkm_fb_tile region[16]; 55 struct nvkm_fb_tile region[16];
60 int regions; 56 int regions;
61 void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
62 u32 pitch, u32 flags, struct nvkm_fb_tile *);
63 void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
64 struct nvkm_fb_tile *);
65 void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
66 void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
67 } tile; 57 } tile;
68}; 58};
69 59
70static inline struct nvkm_fb * 60bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
71nvkm_fb(void *obj) 61void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
72{ 62 u32 pitch, u32 flags, struct nvkm_fb_tile *);
73 /* fbram uses this before device subdev pointer is valid */ 63void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
74 if (nv_iclass(obj, NV_SUBDEV_CLASS) && 64void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
75 nv_subidx(obj) == NVDEV_SUBDEV_FB) 65
76 return obj; 66int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
77 67int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
78 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FB); 68int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
79} 69int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
80 70int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
81extern struct nvkm_oclass *nv04_fb_oclass; 71int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
82extern struct nvkm_oclass *nv10_fb_oclass; 72int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
83extern struct nvkm_oclass *nv1a_fb_oclass; 73int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
84extern struct nvkm_oclass *nv20_fb_oclass; 74int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
85extern struct nvkm_oclass *nv25_fb_oclass; 75int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
86extern struct nvkm_oclass *nv30_fb_oclass; 76int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
87extern struct nvkm_oclass *nv35_fb_oclass; 77int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
88extern struct nvkm_oclass *nv36_fb_oclass; 78int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
89extern struct nvkm_oclass *nv40_fb_oclass; 79int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
90extern struct nvkm_oclass *nv41_fb_oclass; 80int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
91extern struct nvkm_oclass *nv44_fb_oclass; 81int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
92extern struct nvkm_oclass *nv46_fb_oclass; 82int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
93extern struct nvkm_oclass *nv47_fb_oclass; 83int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
94extern struct nvkm_oclass *nv49_fb_oclass; 84int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
95extern struct nvkm_oclass *nv4e_fb_oclass; 85int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
96extern struct nvkm_oclass *nv50_fb_oclass; 86int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
97extern struct nvkm_oclass *g84_fb_oclass; 87int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
98extern struct nvkm_oclass *gt215_fb_oclass; 88int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
99extern struct nvkm_oclass *mcp77_fb_oclass; 89int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
100extern struct nvkm_oclass *mcp89_fb_oclass;
101extern struct nvkm_oclass *gf100_fb_oclass;
102extern struct nvkm_oclass *gk104_fb_oclass;
103extern struct nvkm_oclass *gk20a_fb_oclass;
104extern struct nvkm_oclass *gm107_fb_oclass;
105 90
106#include <subdev/bios.h> 91#include <subdev/bios.h>
107#include <subdev/bios/ramcfg.h> 92#include <subdev/bios/ramcfg.h>
@@ -112,36 +97,35 @@ struct nvkm_ram_data {
112 u32 freq; 97 u32 freq;
113}; 98};
114 99
100enum nvkm_ram_type {
101 NVKM_RAM_TYPE_UNKNOWN = 0,
102 NVKM_RAM_TYPE_STOLEN,
103 NVKM_RAM_TYPE_SGRAM,
104 NVKM_RAM_TYPE_SDRAM,
105 NVKM_RAM_TYPE_DDR1,
106 NVKM_RAM_TYPE_DDR2,
107 NVKM_RAM_TYPE_DDR3,
108 NVKM_RAM_TYPE_GDDR2,
109 NVKM_RAM_TYPE_GDDR3,
110 NVKM_RAM_TYPE_GDDR4,
111 NVKM_RAM_TYPE_GDDR5
112};
113
115struct nvkm_ram { 114struct nvkm_ram {
116 struct nvkm_object base; 115 const struct nvkm_ram_func *func;
117 enum { 116 struct nvkm_fb *fb;
118 NV_MEM_TYPE_UNKNOWN = 0, 117 enum nvkm_ram_type type;
119 NV_MEM_TYPE_STOLEN,
120 NV_MEM_TYPE_SGRAM,
121 NV_MEM_TYPE_SDRAM,
122 NV_MEM_TYPE_DDR1,
123 NV_MEM_TYPE_DDR2,
124 NV_MEM_TYPE_DDR3,
125 NV_MEM_TYPE_GDDR2,
126 NV_MEM_TYPE_GDDR3,
127 NV_MEM_TYPE_GDDR4,
128 NV_MEM_TYPE_GDDR5
129 } type;
130 u64 stolen;
131 u64 size; 118 u64 size;
132 u32 tags; 119
120#define NVKM_RAM_MM_SHIFT 12
121 struct nvkm_mm vram;
122 struct nvkm_mm tags;
123 u64 stolen;
133 124
134 int ranks; 125 int ranks;
135 int parts; 126 int parts;
136 int part_mask; 127 int part_mask;
137 128
138 int (*get)(struct nvkm_fb *, u64 size, u32 align, u32 size_nc,
139 u32 type, struct nvkm_mem **);
140 void (*put)(struct nvkm_fb *, struct nvkm_mem **);
141
142 int (*calc)(struct nvkm_fb *, u32 freq);
143 int (*prog)(struct nvkm_fb *);
144 void (*tidy)(struct nvkm_fb *);
145 u32 freq; 129 u32 freq;
146 u32 mr[16]; 130 u32 mr[16];
147 u32 mr1_nuts; 131 u32 mr1_nuts;
@@ -151,4 +135,17 @@ struct nvkm_ram {
151 struct nvkm_ram_data xition; 135 struct nvkm_ram_data xition;
152 struct nvkm_ram_data target; 136 struct nvkm_ram_data target;
153}; 137};
138
139struct nvkm_ram_func {
140 void *(*dtor)(struct nvkm_ram *);
141 int (*init)(struct nvkm_ram *);
142
143 int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
144 u32 type, struct nvkm_mem **);
145 void (*put)(struct nvkm_ram *, struct nvkm_mem **);
146
147 int (*calc)(struct nvkm_ram *, u32 freq);
148 int (*prog)(struct nvkm_ram *);
149 void (*tidy)(struct nvkm_ram *);
150};
154#endif 151#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index a1384786adc9..ae201e388487 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -1,28 +1,16 @@
1#ifndef __NVKM_FUSE_H__ 1#ifndef __NVKM_FUSE_H__
2#define __NVKM_FUSE_H__ 2#define __NVKM_FUSE_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4#include <core/device.h>
5 4
6struct nvkm_fuse { 5struct nvkm_fuse {
7 struct nvkm_subdev base; 6 const struct nvkm_fuse_func *func;
7 struct nvkm_subdev subdev;
8 spinlock_t lock;
8}; 9};
9 10
10static inline struct nvkm_fuse * 11u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr);
11nvkm_fuse(void *obj)
12{
13 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FUSE);
14}
15 12
16#define nvkm_fuse_create(p, e, o, d) \ 13int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
17 nvkm_fuse_create_((p), (e), (o), sizeof(**d), (void **)d) 14int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
18 15int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
19int nvkm_fuse_create_(struct nvkm_object *, struct nvkm_object *,
20 struct nvkm_oclass *, int, void **);
21void _nvkm_fuse_dtor(struct nvkm_object *);
22int _nvkm_fuse_init(struct nvkm_object *);
23#define _nvkm_fuse_fini _nvkm_subdev_fini
24
25extern struct nvkm_oclass nv50_fuse_oclass;
26extern struct nvkm_oclass gf100_fuse_oclass;
27extern struct nvkm_oclass gm107_fuse_oclass;
28#endif 16#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index ca5099a81b5a..9b9c6d2f90b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -19,26 +19,21 @@ struct nvkm_gpio_ntfy_rep {
19}; 19};
20 20
21struct nvkm_gpio { 21struct nvkm_gpio {
22 struct nvkm_subdev base; 22 const struct nvkm_gpio_func *func;
23 struct nvkm_subdev subdev;
23 24
24 struct nvkm_event event; 25 struct nvkm_event event;
25
26 void (*reset)(struct nvkm_gpio *, u8 func);
27 int (*find)(struct nvkm_gpio *, int idx, u8 tag, u8 line,
28 struct dcb_gpio_func *);
29 int (*set)(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
30 int (*get)(struct nvkm_gpio *, int idx, u8 tag, u8 line);
31}; 26};
32 27
33static inline struct nvkm_gpio * 28void nvkm_gpio_reset(struct nvkm_gpio *, u8 func);
34nvkm_gpio(void *obj) 29int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line,
35{ 30 struct dcb_gpio_func *);
36 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_GPIO); 31int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
37} 32int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line);
38 33
39extern struct nvkm_oclass *nv10_gpio_oclass; 34int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
40extern struct nvkm_oclass *nv50_gpio_oclass; 35int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
41extern struct nvkm_oclass *g94_gpio_oclass; 36int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
42extern struct nvkm_oclass *gf110_gpio_oclass; 37int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
43extern struct nvkm_oclass *gk104_gpio_oclass; 38int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
44#endif 39#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index a2e33730f05e..6b6224dbd5bb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -6,15 +6,6 @@
6#include <subdev/bios.h> 6#include <subdev/bios.h>
7#include <subdev/bios/i2c.h> 7#include <subdev/bios/i2c.h>
8 8
9#define NV_I2C_PORT(n) (0x00 + (n))
10#define NV_I2C_AUX(n) (0x10 + (n))
11#define NV_I2C_EXT(n) (0x20 + (n))
12#define NV_I2C_DEFAULT(n) (0x80 + (n))
13
14#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
15#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
16#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
17
18struct nvkm_i2c_ntfy_req { 9struct nvkm_i2c_ntfy_req {
19#define NVKM_I2C_PLUG 0x01 10#define NVKM_I2C_PLUG 0x01
20#define NVKM_I2C_UNPLUG 0x02 11#define NVKM_I2C_UNPLUG 0x02
@@ -29,72 +20,79 @@ struct nvkm_i2c_ntfy_rep {
29 u8 mask; 20 u8 mask;
30}; 21};
31 22
32struct nvkm_i2c_port { 23struct nvkm_i2c_bus_probe {
33 struct nvkm_object base; 24 struct i2c_board_info dev;
34 struct i2c_adapter adapter; 25 u8 udelay; /* set to 0 to use the standard delay */
35 struct mutex mutex; 26};
36 27
37 struct list_head head; 28struct nvkm_i2c_bus {
38 u8 index; 29 const struct nvkm_i2c_bus_func *func;
39 int aux; 30 struct nvkm_i2c_pad *pad;
31#define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */ (n)
32#define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
33#define NVKM_I2C_BUS_PRI /* ccb primary comm. port */ -1
34#define NVKM_I2C_BUS_SEC /* ccb secondary comm. port */ -2
35 int id;
40 36
41 const struct nvkm_i2c_func *func; 37 struct mutex mutex;
38 struct list_head head;
39 struct i2c_adapter i2c;
42}; 40};
43 41
44struct nvkm_i2c_func { 42int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
45 void (*drive_scl)(struct nvkm_i2c_port *, int); 43void nvkm_i2c_bus_release(struct nvkm_i2c_bus *);
46 void (*drive_sda)(struct nvkm_i2c_port *, int); 44int nvkm_i2c_bus_probe(struct nvkm_i2c_bus *, const char *,
47 int (*sense_scl)(struct nvkm_i2c_port *); 45 struct nvkm_i2c_bus_probe *,
48 int (*sense_sda)(struct nvkm_i2c_port *); 46 bool (*)(struct nvkm_i2c_bus *,
47 struct i2c_board_info *, void *), void *);
49 48
50 int (*aux)(struct nvkm_i2c_port *, bool, u8, u32, u8 *, u8); 49struct nvkm_i2c_aux {
51 int (*pattern)(struct nvkm_i2c_port *, int pattern); 50 const struct nvkm_i2c_aux_func *func;
52 int (*lnk_ctl)(struct nvkm_i2c_port *, int nr, int bw, bool enh); 51 struct nvkm_i2c_pad *pad;
53 int (*drv_ctl)(struct nvkm_i2c_port *, int lane, int sw, int pe); 52#define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */ (n)
54}; 53#define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
54 int id;
55 55
56struct nvkm_i2c_board_info { 56 struct mutex mutex;
57 struct i2c_board_info dev; 57 struct list_head head;
58 u8 udelay; /* set to 0 to use the standard delay */ 58 struct i2c_adapter i2c;
59
60 u32 intr;
59}; 61};
60 62
63void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor);
64int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *);
65void nvkm_i2c_aux_release(struct nvkm_i2c_aux *);
66int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
67 u32 addr, u8 *data, u8 size);
68int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw,
69 bool enhanced_framing);
70
61struct nvkm_i2c { 71struct nvkm_i2c {
62 struct nvkm_subdev base; 72 const struct nvkm_i2c_func *func;
63 struct nvkm_event event; 73 struct nvkm_subdev subdev;
64 74
65 struct nvkm_i2c_port *(*find)(struct nvkm_i2c *, u8 index); 75 struct list_head pad;
66 struct nvkm_i2c_port *(*find_type)(struct nvkm_i2c *, u16 type); 76 struct list_head bus;
67 int (*acquire_pad)(struct nvkm_i2c_port *, unsigned long timeout); 77 struct list_head aux;
68 void (*release_pad)(struct nvkm_i2c_port *); 78
69 int (*acquire)(struct nvkm_i2c_port *, unsigned long timeout); 79 struct nvkm_event event;
70 void (*release)(struct nvkm_i2c_port *);
71 int (*identify)(struct nvkm_i2c *, int index,
72 const char *what, struct nvkm_i2c_board_info *,
73 bool (*match)(struct nvkm_i2c_port *,
74 struct i2c_board_info *, void *),
75 void *);
76
77 wait_queue_head_t wait;
78 struct list_head ports;
79}; 80};
80 81
81static inline struct nvkm_i2c * 82struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int);
82nvkm_i2c(void *obj) 83struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int);
83{
84 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_I2C);
85}
86 84
87extern struct nvkm_oclass *nv04_i2c_oclass; 85int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
88extern struct nvkm_oclass *nv4e_i2c_oclass; 86int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
89extern struct nvkm_oclass *nv50_i2c_oclass; 87int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
90extern struct nvkm_oclass *g94_i2c_oclass; 88int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
91extern struct nvkm_oclass *gf110_i2c_oclass; 89int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
92extern struct nvkm_oclass *gf117_i2c_oclass; 90int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
93extern struct nvkm_oclass *gk104_i2c_oclass; 91int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
94extern struct nvkm_oclass *gm204_i2c_oclass; 92int gm204_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
95 93
96static inline int 94static inline int
97nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg) 95nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
98{ 96{
99 u8 val; 97 u8 val;
100 struct i2c_msg msgs[] = { 98 struct i2c_msg msgs[] = {
@@ -102,7 +100,7 @@ nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
102 { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val }, 100 { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
103 }; 101 };
104 102
105 int ret = i2c_transfer(&port->adapter, msgs, 2); 103 int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
106 if (ret != 2) 104 if (ret != 2)
107 return -EIO; 105 return -EIO;
108 106
@@ -110,14 +108,14 @@ nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
110} 108}
111 109
112static inline int 110static inline int
113nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val) 111nvkm_wri2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u8 val)
114{ 112{
115 u8 buf[2] = { reg, val }; 113 u8 buf[2] = { reg, val };
116 struct i2c_msg msgs[] = { 114 struct i2c_msg msgs[] = {
117 { .addr = addr, .flags = 0, .len = 2, .buf = buf }, 115 { .addr = addr, .flags = 0, .len = 2, .buf = buf },
118 }; 116 };
119 117
120 int ret = i2c_transfer(&port->adapter, msgs, 1); 118 int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
121 if (ret != 1) 119 if (ret != 1)
122 return -EIO; 120 return -EIO;
123 121
@@ -125,11 +123,30 @@ nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val)
125} 123}
126 124
127static inline bool 125static inline bool
128nv_probe_i2c(struct nvkm_i2c_port *port, u8 addr) 126nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
129{ 127{
130 return nv_rdi2cr(port, addr, 0) >= 0; 128 return nvkm_rdi2cr(adap, addr, 0) >= 0;
131} 129}
132 130
133int nv_rdaux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size); 131static inline int
134int nv_wraux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size); 132nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
133{
134 int ret = nvkm_i2c_aux_acquire(aux);
135 if (ret == 0) {
136 ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, size);
137 nvkm_i2c_aux_release(aux);
138 }
139 return ret;
140}
141
142static inline int
143nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
144{
145 int ret = nvkm_i2c_aux_acquire(aux);
146 if (ret == 0) {
147 ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, size);
148 nvkm_i2c_aux_release(aux);
149 }
150 return ret;
151}
135#endif 152#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 2150d8af0040..9d512cd5a0a7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -2,31 +2,7 @@
2#define __NVKM_IBUS_H__ 2#define __NVKM_IBUS_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5struct nvkm_ibus { 5int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
6 struct nvkm_subdev base; 6int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
7}; 7int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
8
9static inline struct nvkm_ibus *
10nvkm_ibus(void *obj)
11{
12 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_IBUS);
13}
14
15#define nvkm_ibus_create(p,e,o,d) \
16 nvkm_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus", \
17 sizeof(**d), (void **)d)
18#define nvkm_ibus_destroy(p) \
19 nvkm_subdev_destroy(&(p)->base)
20#define nvkm_ibus_init(p) \
21 nvkm_subdev_init(&(p)->base)
22#define nvkm_ibus_fini(p,s) \
23 nvkm_subdev_fini(&(p)->base, (s))
24
25#define _nvkm_ibus_dtor _nvkm_subdev_dtor
26#define _nvkm_ibus_init _nvkm_subdev_init
27#define _nvkm_ibus_fini _nvkm_subdev_fini
28
29extern struct nvkm_oclass gf100_ibus_oclass;
30extern struct nvkm_oclass gk104_ibus_oclass;
31extern struct nvkm_oclass gk20a_ibus_oclass;
32#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 1bcb763cfca0..28bc202f9753 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -1,49 +1,29 @@
1#ifndef __NVKM_INSTMEM_H__ 1#ifndef __NVKM_INSTMEM_H__
2#define __NVKM_INSTMEM_H__ 2#define __NVKM_INSTMEM_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4struct nvkm_memory;
5struct nvkm_instobj {
6 struct nvkm_object base;
7 struct list_head head;
8 u32 *suspend;
9 u64 addr;
10 u32 size;
11};
12
13static inline struct nvkm_instobj *
14nv_memobj(void *obj)
15{
16#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
17 if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
18 nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
19#endif
20 return obj;
21}
22 5
23struct nvkm_instmem { 6struct nvkm_instmem {
24 struct nvkm_subdev base; 7 const struct nvkm_instmem_func *func;
25 struct list_head list; 8 struct nvkm_subdev subdev;
26 9
10 struct list_head list;
27 u32 reserved; 11 u32 reserved;
28 int (*alloc)(struct nvkm_instmem *, struct nvkm_object *, 12
29 u32 size, u32 align, struct nvkm_object **); 13 struct nvkm_memory *vbios;
14 struct nvkm_ramht *ramht;
15 struct nvkm_memory *ramro;
16 struct nvkm_memory *ramfc;
30}; 17};
31 18
32static inline struct nvkm_instmem * 19u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
33nvkm_instmem(void *obj) 20void nvkm_instmem_wr32(struct nvkm_instmem *, u32 addr, u32 data);
34{ 21int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
35 /* nv04/nv40 impls need to create objects in their constructor, 22 struct nvkm_memory **);
36 * which is before the subdev pointer is valid
37 */
38 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
39 nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
40 return obj;
41 23
42 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_INSTMEM);
43}
44 24
45extern struct nvkm_oclass *nv04_instmem_oclass; 25int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
46extern struct nvkm_oclass *nv40_instmem_oclass; 26int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
47extern struct nvkm_oclass *nv50_instmem_oclass; 27int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
48extern struct nvkm_oclass *gk20a_instmem_oclass; 28int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
49#endif 29#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index cd5d29fc0565..c773b5e958b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -1,31 +1,36 @@
1#ifndef __NVKM_LTC_H__ 1#ifndef __NVKM_LTC_H__
2#define __NVKM_LTC_H__ 2#define __NVKM_LTC_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4struct nvkm_mm_node; 4#include <core/mm.h>
5 5
6#define NVKM_LTC_MAX_ZBC_CNT 16 6#define NVKM_LTC_MAX_ZBC_CNT 16
7 7
8struct nvkm_ltc { 8struct nvkm_ltc {
9 struct nvkm_subdev base; 9 const struct nvkm_ltc_func *func;
10 struct nvkm_subdev subdev;
10 11
11 int (*tags_alloc)(struct nvkm_ltc *, u32 count, 12 u32 ltc_nr;
12 struct nvkm_mm_node **); 13 u32 lts_nr;
13 void (*tags_free)(struct nvkm_ltc *, struct nvkm_mm_node **); 14
14 void (*tags_clear)(struct nvkm_ltc *, u32 first, u32 count); 15 u32 num_tags;
16 u32 tag_base;
17 struct nvkm_mm tags;
18 struct nvkm_mm_node *tag_ram;
15 19
16 int zbc_min; 20 int zbc_min;
17 int zbc_max; 21 int zbc_max;
18 int (*zbc_color_get)(struct nvkm_ltc *, int index, const u32[4]); 22 u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
19 int (*zbc_depth_get)(struct nvkm_ltc *, int index, const u32); 23 u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
20}; 24};
21 25
22static inline struct nvkm_ltc * 26int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
23nvkm_ltc(void *obj) 27void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
24{ 28void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
25 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_LTC); 29
26} 30int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
31int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
27 32
28extern struct nvkm_oclass *gf100_ltc_oclass; 33int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
29extern struct nvkm_oclass *gk104_ltc_oclass; 34int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
30extern struct nvkm_oclass *gm107_ltc_oclass; 35int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
31#endif 36#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 055bea7702a1..4de05e718f83 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -3,26 +3,19 @@
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5struct nvkm_mc { 5struct nvkm_mc {
6 struct nvkm_subdev base; 6 const struct nvkm_mc_func *func;
7 bool use_msi; 7 struct nvkm_subdev subdev;
8 unsigned int irq;
9 void (*unk260)(struct nvkm_mc *, u32);
10}; 8};
11 9
12static inline struct nvkm_mc * 10void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
13nvkm_mc(void *obj) 11void nvkm_mc_intr_unarm(struct nvkm_mc *);
14{ 12void nvkm_mc_intr_rearm(struct nvkm_mc *);
15 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MC); 13void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
16}
17 14
18extern struct nvkm_oclass *nv04_mc_oclass; 15int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
19extern struct nvkm_oclass *nv40_mc_oclass; 16int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
20extern struct nvkm_oclass *nv44_mc_oclass; 17int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
21extern struct nvkm_oclass *nv4c_mc_oclass; 18int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
22extern struct nvkm_oclass *nv50_mc_oclass; 19int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
23extern struct nvkm_oclass *g94_mc_oclass; 20int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
24extern struct nvkm_oclass *g98_mc_oclass;
25extern struct nvkm_oclass *gf100_mc_oclass;
26extern struct nvkm_oclass *gf106_mc_oclass;
27extern struct nvkm_oclass *gk20a_mc_oclass;
28#endif 21#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 3a5368776c31..dcd3deff27a4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -6,7 +6,7 @@ struct nvkm_device;
6struct nvkm_mem; 6struct nvkm_mem;
7 7
8struct nvkm_vm_pgt { 8struct nvkm_vm_pgt {
9 struct nvkm_gpuobj *obj[2]; 9 struct nvkm_memory *mem[2];
10 u32 refcount[2]; 10 u32 refcount[2];
11}; 11};
12 12
@@ -26,74 +26,23 @@ struct nvkm_vma {
26 26
27struct nvkm_vm { 27struct nvkm_vm {
28 struct nvkm_mmu *mmu; 28 struct nvkm_mmu *mmu;
29
30 struct mutex mutex;
29 struct nvkm_mm mm; 31 struct nvkm_mm mm;
30 struct kref refcount; 32 struct kref refcount;
31 33
32 struct list_head pgd_list; 34 struct list_head pgd_list;
33 atomic_t engref[NVDEV_SUBDEV_NR]; 35 atomic_t engref[NVKM_SUBDEV_NR];
34 36
35 struct nvkm_vm_pgt *pgt; 37 struct nvkm_vm_pgt *pgt;
36 u32 fpde; 38 u32 fpde;
37 u32 lpde; 39 u32 lpde;
38}; 40};
39 41
40struct nvkm_mmu {
41 struct nvkm_subdev base;
42
43 u64 limit;
44 u8 dma_bits;
45 u32 pgt_bits;
46 u8 spg_shift;
47 u8 lpg_shift;
48
49 int (*create)(struct nvkm_mmu *, u64 offset, u64 length,
50 u64 mm_offset, struct nvkm_vm **);
51
52 void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
53 struct nvkm_gpuobj *pgt[2]);
54 void (*map)(struct nvkm_vma *, struct nvkm_gpuobj *,
55 struct nvkm_mem *, u32 pte, u32 cnt,
56 u64 phys, u64 delta);
57 void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
58 struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
59 void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
60 void (*flush)(struct nvkm_vm *);
61};
62
63static inline struct nvkm_mmu *
64nvkm_mmu(void *obj)
65{
66 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MMU);
67}
68
69#define nvkm_mmu_create(p,e,o,i,f,d) \
70 nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d))
71#define nvkm_mmu_destroy(p) \
72 nvkm_subdev_destroy(&(p)->base)
73#define nvkm_mmu_init(p) \
74 nvkm_subdev_init(&(p)->base)
75#define nvkm_mmu_fini(p,s) \
76 nvkm_subdev_fini(&(p)->base, (s))
77
78#define _nvkm_mmu_dtor _nvkm_subdev_dtor
79#define _nvkm_mmu_init _nvkm_subdev_init
80#define _nvkm_mmu_fini _nvkm_subdev_fini
81
82extern struct nvkm_oclass nv04_mmu_oclass;
83extern struct nvkm_oclass nv41_mmu_oclass;
84extern struct nvkm_oclass nv44_mmu_oclass;
85extern struct nvkm_oclass nv50_mmu_oclass;
86extern struct nvkm_oclass gf100_mmu_oclass;
87
88int nv04_vm_create(struct nvkm_mmu *, u64, u64, u64,
89 struct nvkm_vm **);
90void nv04_mmu_dtor(struct nvkm_object *);
91
92int nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
93 u32 block, struct nvkm_vm **);
94int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset, 42int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
95 struct nvkm_vm **); 43 struct lock_class_key *, struct nvkm_vm **);
96int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd); 44int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
45int nvkm_vm_boot(struct nvkm_vm *, u64 size);
97int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access, 46int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
98 struct nvkm_vma *); 47 struct nvkm_vma *);
99void nvkm_vm_put(struct nvkm_vma *); 48void nvkm_vm_put(struct nvkm_vma *);
@@ -101,4 +50,19 @@ void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
101void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *); 50void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
102void nvkm_vm_unmap(struct nvkm_vma *); 51void nvkm_vm_unmap(struct nvkm_vma *);
103void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length); 52void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
53
54struct nvkm_mmu {
55 const struct nvkm_mmu_func *func;
56 struct nvkm_subdev subdev;
57
58 u64 limit;
59 u8 dma_bits;
60 u8 lpg_shift;
61};
62
63int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
64int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
65int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
66int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
67int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
104#endif 68#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index fba613477b1a..ed0250139dae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -2,33 +2,5 @@
2#define __NVKM_MXM_H__ 2#define __NVKM_MXM_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5#define MXM_SANITISE_DCB 0x00000001 5int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **);
6
7struct nvkm_mxm {
8 struct nvkm_subdev base;
9 u32 action;
10 u8 *mxms;
11};
12
13static inline struct nvkm_mxm *
14nvkm_mxm(void *obj)
15{
16 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MXM);
17}
18
19#define nvkm_mxm_create(p,e,o,d) \
20 nvkm_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
21#define nvkm_mxm_init(p) \
22 nvkm_subdev_init(&(p)->base)
23#define nvkm_mxm_fini(p,s) \
24 nvkm_subdev_fini(&(p)->base, (s))
25int nvkm_mxm_create_(struct nvkm_object *, struct nvkm_object *,
26 struct nvkm_oclass *, int, void **);
27void nvkm_mxm_destroy(struct nvkm_mxm *);
28
29#define _nvkm_mxm_dtor _nvkm_subdev_dtor
30#define _nvkm_mxm_init _nvkm_subdev_init
31#define _nvkm_mxm_fini _nvkm_subdev_fini
32
33extern struct nvkm_oclass nv50_mxm_oclass;
34#endif 6#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
new file mode 100644
index 000000000000..5b3c054f3b55
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -0,0 +1,34 @@
1#ifndef __NVKM_PCI_H__
2#define __NVKM_PCI_H__
3#include <core/subdev.h>
4
5struct nvkm_pci {
6 const struct nvkm_pci_func *func;
7 struct nvkm_subdev subdev;
8 struct pci_dev *pdev;
9 int irq;
10
11 struct {
12 struct agp_bridge_data *bridge;
13 u32 mode;
14 u64 base;
15 u64 size;
16 int mtrr;
17 bool cma;
18 bool acquired;
19 } agp;
20
21 bool msi;
22};
23
24u32 nvkm_pci_rd32(struct nvkm_pci *, u16 addr);
25void nvkm_pci_wr08(struct nvkm_pci *, u16 addr, u8 data);
26void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
27void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
28
29int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
30int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
31int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
32int nv50_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
33int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
34#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 755942352557..e61923d5e49c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -3,7 +3,8 @@
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5struct nvkm_pmu { 5struct nvkm_pmu {
6 struct nvkm_subdev base; 6 const struct nvkm_pmu_func *func;
7 struct nvkm_subdev subdev;
7 8
8 struct { 9 struct {
9 u32 base; 10 u32 base;
@@ -20,24 +21,20 @@ struct nvkm_pmu {
20 u32 message; 21 u32 message;
21 u32 data[2]; 22 u32 data[2];
22 } recv; 23 } recv;
23
24 int (*message)(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
25 void (*pgob)(struct nvkm_pmu *, bool);
26}; 24};
27 25
28static inline struct nvkm_pmu * 26int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
29nvkm_pmu(void *obj) 27 u32 message, u32 data0, u32 data1);
30{ 28void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
31 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_PMU); 29
32} 30int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
33 31int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
34extern struct nvkm_oclass *gt215_pmu_oclass; 32int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
35extern struct nvkm_oclass *gf100_pmu_oclass; 33int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
36extern struct nvkm_oclass *gf110_pmu_oclass; 34int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
37extern struct nvkm_oclass *gk104_pmu_oclass; 35int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
38extern struct nvkm_oclass *gk110_pmu_oclass; 36int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
39extern struct nvkm_oclass *gk208_pmu_oclass; 37int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
40extern struct nvkm_oclass *gk20a_pmu_oclass;
41 38
42/* interface to MEMX process running on PMU */ 39/* interface to MEMX process running on PMU */
43struct nvkm_memx; 40struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 6662829b6db1..b268b96faece 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -2,6 +2,28 @@
2#define __NVKM_THERM_H__ 2#define __NVKM_THERM_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5#include <subdev/bios.h>
6#include <subdev/bios/therm.h>
7#include <subdev/timer.h>
8
9enum nvkm_therm_thrs_direction {
10 NVKM_THERM_THRS_FALLING = 0,
11 NVKM_THERM_THRS_RISING = 1
12};
13
14enum nvkm_therm_thrs_state {
15 NVKM_THERM_THRS_LOWER = 0,
16 NVKM_THERM_THRS_HIGHER = 1
17};
18
19enum nvkm_therm_thrs {
20 NVKM_THERM_THRS_FANBOOST = 0,
21 NVKM_THERM_THRS_DOWNCLOCK = 1,
22 NVKM_THERM_THRS_CRITICAL = 2,
23 NVKM_THERM_THRS_SHUTDOWN = 3,
24 NVKM_THERM_THRS_NR
25};
26
5enum nvkm_therm_fan_mode { 27enum nvkm_therm_fan_mode {
6 NVKM_THERM_CTRL_NONE = 0, 28 NVKM_THERM_CTRL_NONE = 0,
7 NVKM_THERM_CTRL_MANUAL = 1, 29 NVKM_THERM_CTRL_MANUAL = 1,
@@ -24,56 +46,54 @@ enum nvkm_therm_attr_type {
24}; 46};
25 47
26struct nvkm_therm { 48struct nvkm_therm {
27 struct nvkm_subdev base; 49 const struct nvkm_therm_func *func;
50 struct nvkm_subdev subdev;
28 51
29 int (*pwm_ctrl)(struct nvkm_therm *, int line, bool); 52 /* automatic thermal management */
30 int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *); 53 struct nvkm_alarm alarm;
31 int (*pwm_set)(struct nvkm_therm *, int line, u32, u32); 54 spinlock_t lock;
32 int (*pwm_clock)(struct nvkm_therm *, int line); 55 struct nvbios_therm_trip_point *last_trip;
56 int mode;
57 int cstate;
58 int suspend;
59
60 /* bios */
61 struct nvbios_therm_sensor bios_sensor;
62
63 /* fan priv */
64 struct nvkm_fan *fan;
65
66 /* alarms priv */
67 struct {
68 spinlock_t alarm_program_lock;
69 struct nvkm_alarm therm_poll_alarm;
70 enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
71 } sensor;
72
73 /* what should be done if the card overheats */
74 struct {
75 void (*downclock)(struct nvkm_therm *, bool active);
76 void (*pause)(struct nvkm_therm *, bool active);
77 } emergency;
78
79 /* ic */
80 struct i2c_client *ic;
33 81
34 int (*fan_get)(struct nvkm_therm *); 82 int (*fan_get)(struct nvkm_therm *);
35 int (*fan_set)(struct nvkm_therm *, int); 83 int (*fan_set)(struct nvkm_therm *, int);
36 int (*fan_sense)(struct nvkm_therm *);
37
38 int (*temp_get)(struct nvkm_therm *);
39 84
40 int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type); 85 int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
41 int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int); 86 int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
42}; 87};
43 88
44static inline struct nvkm_therm * 89int nvkm_therm_temp_get(struct nvkm_therm *);
45nvkm_therm(void *obj) 90int nvkm_therm_fan_sense(struct nvkm_therm *);
46{ 91int nvkm_therm_cstate(struct nvkm_therm *, int, int);
47 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_THERM); 92
48} 93int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
49 94int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
50#define nvkm_therm_create(p,e,o,d) \ 95int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
51 nvkm_therm_create_((p), (e), (o), sizeof(**d), (void **)d) 96int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
52#define nvkm_therm_destroy(p) ({ \ 97int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
53 struct nvkm_therm *therm = (p); \ 98int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
54 _nvkm_therm_dtor(nv_object(therm)); \
55})
56#define nvkm_therm_init(p) ({ \
57 struct nvkm_therm *therm = (p); \
58 _nvkm_therm_init(nv_object(therm)); \
59})
60#define nvkm_therm_fini(p,s) ({ \
61 struct nvkm_therm *therm = (p); \
62 _nvkm_therm_init(nv_object(therm), (s)); \
63})
64
65int nvkm_therm_create_(struct nvkm_object *, struct nvkm_object *,
66 struct nvkm_oclass *, int, void **);
67void _nvkm_therm_dtor(struct nvkm_object *);
68int _nvkm_therm_init(struct nvkm_object *);
69int _nvkm_therm_fini(struct nvkm_object *, bool);
70
71int nvkm_therm_cstate(struct nvkm_therm *, int, int);
72
73extern struct nvkm_oclass nv40_therm_oclass;
74extern struct nvkm_oclass nv50_therm_oclass;
75extern struct nvkm_oclass g84_therm_oclass;
76extern struct nvkm_oclass gt215_therm_oclass;
77extern struct nvkm_oclass gf110_therm_oclass;
78extern struct nvkm_oclass gm107_therm_oclass;
79#endif 99#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 4ad55082ef7a..62ed0880b0e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -9,53 +9,58 @@ struct nvkm_alarm {
9}; 9};
10 10
11static inline void 11static inline void
12nvkm_alarm_init(struct nvkm_alarm *alarm, 12nvkm_alarm_init(struct nvkm_alarm *alarm, void (*func)(struct nvkm_alarm *))
13 void (*func)(struct nvkm_alarm *))
14{ 13{
15 INIT_LIST_HEAD(&alarm->head); 14 INIT_LIST_HEAD(&alarm->head);
16 alarm->func = func; 15 alarm->func = func;
17} 16}
18 17
19bool nvkm_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
20bool nvkm_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
21bool nvkm_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
22void nvkm_timer_alarm(void *, u32 nsec, struct nvkm_alarm *);
23void nvkm_timer_alarm_cancel(void *, struct nvkm_alarm *);
24
25#define NV_WAIT_DEFAULT 2000000000ULL
26#define nv_wait(o,a,m,v) \
27 nvkm_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
28#define nv_wait_ne(o,a,m,v) \
29 nvkm_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
30#define nv_wait_cb(o,c,d) \
31 nvkm_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
32
33struct nvkm_timer { 18struct nvkm_timer {
34 struct nvkm_subdev base; 19 const struct nvkm_timer_func *func;
35 u64 (*read)(struct nvkm_timer *); 20 struct nvkm_subdev subdev;
36 void (*alarm)(struct nvkm_timer *, u64 time, struct nvkm_alarm *);
37 void (*alarm_cancel)(struct nvkm_timer *, struct nvkm_alarm *);
38};
39 21
40static inline struct nvkm_timer * 22 struct list_head alarms;
41nvkm_timer(void *obj) 23 spinlock_t lock;
42{ 24};
43 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_TIMER);
44}
45 25
46#define nvkm_timer_create(p,e,o,d) \ 26u64 nvkm_timer_read(struct nvkm_timer *);
47 nvkm_subdev_create_((p), (e), (o), 0, "PTIMER", "timer", \ 27void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
48 sizeof(**d), (void **)d) 28void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
49#define nvkm_timer_destroy(p) \
50 nvkm_subdev_destroy(&(p)->base)
51#define nvkm_timer_init(p) \
52 nvkm_subdev_init(&(p)->base)
53#define nvkm_timer_fini(p,s) \
54 nvkm_subdev_fini(&(p)->base, (s))
55 29
56int nvkm_timer_create_(struct nvkm_object *, struct nvkm_engine *, 30/* Delay based on GPU time (ie. PTIMER).
57 struct nvkm_oclass *, int size, void **); 31 *
32 * Will return -ETIMEDOUT unless the loop was terminated with 'break',
33 * where it will return the number of nanoseconds taken instead.
34 *
35 * NVKM_DELAY can be passed for 'cond' to disable the timeout warning,
36 * which is useful for unconditional delay loops.
37 */
38#define NVKM_DELAY _warn = false;
39#define nvkm_nsec(d,n,cond...) ({ \
40 struct nvkm_device *_device = (d); \
41 struct nvkm_timer *_tmr = _device->timer; \
42 u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr); \
43 s64 _taken = 0; \
44 bool _warn = true; \
45 \
46 do { \
47 cond \
48 } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \
49 \
50 if (_taken >= _nsecs) { \
51 if (_warn) { \
52 dev_warn(_device->dev, "timeout at %s:%d/%s()!\n", \
53 __FILE__, __LINE__, __func__); \
54 } \
55 _taken = -ETIMEDOUT; \
56 } \
57 _taken; \
58})
59#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
60#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond)
58 61
59extern struct nvkm_oclass nv04_timer_oclass; 62int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
60extern struct nvkm_oclass gk20a_timer_oclass; 63int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
64int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
65int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
61#endif 66#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index fee09ad818e4..ce5636fe2a66 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
@@ -1,30 +1,28 @@
1#ifndef __NOUVEAU_VGA_H__ 1#ifndef __NOUVEAU_VGA_H__
2#define __NOUVEAU_VGA_H__ 2#define __NOUVEAU_VGA_H__
3 3#include <core/subdev.h>
4#include <core/os.h>
5 4
6/* access to various legacy io ports */ 5/* access to various legacy io ports */
7u8 nv_rdport(void *obj, int head, u16 port); 6u8 nvkm_rdport(struct nvkm_device *, int head, u16 port);
8void nv_wrport(void *obj, int head, u16 port, u8 value); 7void nvkm_wrport(struct nvkm_device *, int head, u16 port, u8 value);
9 8
10/* VGA Sequencer */ 9/* VGA Sequencer */
11u8 nv_rdvgas(void *obj, int head, u8 index); 10u8 nvkm_rdvgas(struct nvkm_device *, int head, u8 index);
12void nv_wrvgas(void *obj, int head, u8 index, u8 value); 11void nvkm_wrvgas(struct nvkm_device *, int head, u8 index, u8 value);
13 12
14/* VGA Graphics */ 13/* VGA Graphics */
15u8 nv_rdvgag(void *obj, int head, u8 index); 14u8 nvkm_rdvgag(struct nvkm_device *, int head, u8 index);
16void nv_wrvgag(void *obj, int head, u8 index, u8 value); 15void nvkm_wrvgag(struct nvkm_device *, int head, u8 index, u8 value);
17 16
18/* VGA CRTC */ 17/* VGA CRTC */
19u8 nv_rdvgac(void *obj, int head, u8 index); 18u8 nvkm_rdvgac(struct nvkm_device *, int head, u8 index);
20void nv_wrvgac(void *obj, int head, u8 index, u8 value); 19void nvkm_wrvgac(struct nvkm_device *, int head, u8 index, u8 value);
21 20
22/* VGA indexed port access dispatcher */ 21/* VGA indexed port access dispatcher */
23u8 nv_rdvgai(void *obj, int head, u16 port, u8 index); 22u8 nvkm_rdvgai(struct nvkm_device *, int head, u16 port, u8 index);
24void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value); 23void nvkm_wrvgai(struct nvkm_device *, int head, u16 port, u8 index, u8 value);
25
26bool nv_lockvgac(void *obj, bool lock);
27u8 nv_rdvgaowner(void *obj);
28void nv_wrvgaowner(void *obj, u8);
29 24
25bool nvkm_lockvgac(struct nvkm_device *, bool lock);
26u8 nvkm_rdvgaowner(struct nvkm_device *);
27void nvkm_wrvgaowner(struct nvkm_device *, u8);
30#endif 28#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index e3d7243fbb1d..5c8a3f1196de 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -2,19 +2,9 @@
2#define __NVKM_VOLT_H__ 2#define __NVKM_VOLT_H__
3#include <core/subdev.h> 3#include <core/subdev.h>
4 4
5struct nvkm_voltage {
6 u32 uv;
7 u8 id;
8};
9
10struct nvkm_volt { 5struct nvkm_volt {
11 struct nvkm_subdev base; 6 const struct nvkm_volt_func *func;
12 7 struct nvkm_subdev subdev;
13 int (*vid_get)(struct nvkm_volt *);
14 int (*get)(struct nvkm_volt *);
15 int (*vid_set)(struct nvkm_volt *, u8 vid);
16 int (*set)(struct nvkm_volt *, u32 uv);
17 int (*set_id)(struct nvkm_volt *, u8 id, int condition);
18 8
19 u8 vid_mask; 9 u8 vid_mask;
20 u8 vid_nr; 10 u8 vid_nr;
@@ -24,35 +14,9 @@ struct nvkm_volt {
24 } vid[256]; 14 } vid[256];
25}; 15};
26 16
27static inline struct nvkm_volt * 17int nvkm_volt_get(struct nvkm_volt *);
28nvkm_volt(void *obj) 18int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
29{
30 return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VOLT);
31}
32
33#define nvkm_volt_create(p, e, o, d) \
34 nvkm_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
35#define nvkm_volt_destroy(p) ({ \
36 struct nvkm_volt *v = (p); \
37 _nvkm_volt_dtor(nv_object(v)); \
38})
39#define nvkm_volt_init(p) ({ \
40 struct nvkm_volt *v = (p); \
41 _nvkm_volt_init(nv_object(v)); \
42})
43#define nvkm_volt_fini(p,s) \
44 nvkm_subdev_fini((p), (s))
45
46int nvkm_volt_create_(struct nvkm_object *, struct nvkm_object *,
47 struct nvkm_oclass *, int, void **);
48void _nvkm_volt_dtor(struct nvkm_object *);
49int _nvkm_volt_init(struct nvkm_object *);
50#define _nvkm_volt_fini _nvkm_subdev_fini
51
52extern struct nvkm_oclass nv40_volt_oclass;
53extern struct nvkm_oclass gk20a_volt_oclass;
54 19
55int nvkm_voltgpio_init(struct nvkm_volt *); 20int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
56int nvkm_voltgpio_get(struct nvkm_volt *); 21int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
57int nvkm_voltgpio_set(struct nvkm_volt *, u8);
58#endif 22#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d8b0891a141c..d336c2247d6a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -51,7 +51,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
51 * device (ie. the one that belongs to the fd it 51 * device (ie. the one that belongs to the fd it
52 * opened) 52 * opened)
53 */ 53 */
54 if (nvif_device_init(&cli->base.base, NULL, 54 if (nvif_device_init(&cli->base.object,
55 NOUVEAU_ABI16_DEVICE, NV_DEVICE, 55 NOUVEAU_ABI16_DEVICE, NV_DEVICE,
56 &args, sizeof(args), 56 &args, sizeof(args),
57 &abi16->device) == 0) 57 &abi16->device) == 0)
@@ -69,28 +69,28 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
69int 69int
70nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) 70nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
71{ 71{
72 struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base); 72 struct nouveau_cli *cli = (void *)abi16->device.object.client;
73 mutex_unlock(&cli->mutex); 73 mutex_unlock(&cli->mutex);
74 return ret; 74 return ret;
75} 75}
76 76
77u16 77s32
78nouveau_abi16_swclass(struct nouveau_drm *drm) 78nouveau_abi16_swclass(struct nouveau_drm *drm)
79{ 79{
80 switch (drm->device.info.family) { 80 switch (drm->device.info.family) {
81 case NV_DEVICE_INFO_V0_TNT: 81 case NV_DEVICE_INFO_V0_TNT:
82 return 0x006e; 82 return NVIF_IOCTL_NEW_V0_SW_NV04;
83 case NV_DEVICE_INFO_V0_CELSIUS: 83 case NV_DEVICE_INFO_V0_CELSIUS:
84 case NV_DEVICE_INFO_V0_KELVIN: 84 case NV_DEVICE_INFO_V0_KELVIN:
85 case NV_DEVICE_INFO_V0_RANKINE: 85 case NV_DEVICE_INFO_V0_RANKINE:
86 case NV_DEVICE_INFO_V0_CURIE: 86 case NV_DEVICE_INFO_V0_CURIE:
87 return 0x016e; 87 return NVIF_IOCTL_NEW_V0_SW_NV10;
88 case NV_DEVICE_INFO_V0_TESLA: 88 case NV_DEVICE_INFO_V0_TESLA:
89 return 0x506e; 89 return NVIF_IOCTL_NEW_V0_SW_NV50;
90 case NV_DEVICE_INFO_V0_FERMI: 90 case NV_DEVICE_INFO_V0_FERMI:
91 case NV_DEVICE_INFO_V0_KEPLER: 91 case NV_DEVICE_INFO_V0_KEPLER:
92 case NV_DEVICE_INFO_V0_MAXWELL: 92 case NV_DEVICE_INFO_V0_MAXWELL:
93 return 0x906e; 93 return NVIF_IOCTL_NEW_V0_SW_GF100;
94 } 94 }
95 95
96 return 0x0000; 96 return 0x0000;
@@ -100,6 +100,7 @@ static void
100nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan, 100nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
101 struct nouveau_abi16_ntfy *ntfy) 101 struct nouveau_abi16_ntfy *ntfy)
102{ 102{
103 nvif_object_fini(&ntfy->object);
103 nvkm_mm_free(&chan->heap, &ntfy->node); 104 nvkm_mm_free(&chan->heap, &ntfy->node);
104 list_del(&ntfy->head); 105 list_del(&ntfy->head);
105 kfree(ntfy); 106 kfree(ntfy);
@@ -132,7 +133,8 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
132 133
133 /* destroy channel object, all children will be killed too */ 134 /* destroy channel object, all children will be killed too */
134 if (chan->chan) { 135 if (chan->chan) {
135 abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff)); 136 abi16->handles &= ~(1ULL << (chan->chan->user.handle & 0xffff));
137 nouveau_channel_idle(chan->chan);
136 nouveau_channel_del(&chan->chan); 138 nouveau_channel_del(&chan->chan);
137 } 139 }
138 140
@@ -143,7 +145,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
143void 145void
144nouveau_abi16_fini(struct nouveau_abi16 *abi16) 146nouveau_abi16_fini(struct nouveau_abi16 *abi16)
145{ 147{
146 struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base); 148 struct nouveau_cli *cli = (void *)abi16->device.object.client;
147 struct nouveau_abi16_chan *chan, *temp; 149 struct nouveau_abi16_chan *chan, *temp;
148 150
149 /* cleanup channels */ 151 /* cleanup channels */
@@ -164,7 +166,6 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
164 struct nouveau_cli *cli = nouveau_cli(file_priv); 166 struct nouveau_cli *cli = nouveau_cli(file_priv);
165 struct nouveau_drm *drm = nouveau_drm(dev); 167 struct nouveau_drm *drm = nouveau_drm(dev);
166 struct nvif_device *device = &drm->device; 168 struct nvif_device *device = &drm->device;
167 struct nvkm_timer *ptimer = nvxx_timer(device);
168 struct nvkm_gr *gr = nvxx_gr(device); 169 struct nvkm_gr *gr = nvxx_gr(device);
169 struct drm_nouveau_getparam *getparam = data; 170 struct drm_nouveau_getparam *getparam = data;
170 171
@@ -173,19 +174,19 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
173 getparam->value = device->info.chipset; 174 getparam->value = device->info.chipset;
174 break; 175 break;
175 case NOUVEAU_GETPARAM_PCI_VENDOR: 176 case NOUVEAU_GETPARAM_PCI_VENDOR:
176 if (nv_device_is_pci(nvxx_device(device))) 177 if (nvxx_device(device)->func->pci)
177 getparam->value = dev->pdev->vendor; 178 getparam->value = dev->pdev->vendor;
178 else 179 else
179 getparam->value = 0; 180 getparam->value = 0;
180 break; 181 break;
181 case NOUVEAU_GETPARAM_PCI_DEVICE: 182 case NOUVEAU_GETPARAM_PCI_DEVICE:
182 if (nv_device_is_pci(nvxx_device(device))) 183 if (nvxx_device(device)->func->pci)
183 getparam->value = dev->pdev->device; 184 getparam->value = dev->pdev->device;
184 else 185 else
185 getparam->value = 0; 186 getparam->value = 0;
186 break; 187 break;
187 case NOUVEAU_GETPARAM_BUS_TYPE: 188 case NOUVEAU_GETPARAM_BUS_TYPE:
188 if (!nv_device_is_pci(nvxx_device(device))) 189 if (!nvxx_device(device)->func->pci)
189 getparam->value = 3; 190 getparam->value = 3;
190 else 191 else
191 if (drm_pci_device_is_agp(dev)) 192 if (drm_pci_device_is_agp(dev))
@@ -206,7 +207,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
206 getparam->value = 0; /* deprecated */ 207 getparam->value = 0; /* deprecated */
207 break; 208 break;
208 case NOUVEAU_GETPARAM_PTIMER_TIME: 209 case NOUVEAU_GETPARAM_PTIMER_TIME:
209 getparam->value = ptimer->read(ptimer); 210 getparam->value = nvif_device_time(device);
210 break; 211 break;
211 case NOUVEAU_GETPARAM_HAS_BO_USAGE: 212 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
212 getparam->value = 1; 213 getparam->value = 1;
@@ -215,10 +216,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
215 getparam->value = 1; 216 getparam->value = 1;
216 break; 217 break;
217 case NOUVEAU_GETPARAM_GRAPH_UNITS: 218 case NOUVEAU_GETPARAM_GRAPH_UNITS:
218 getparam->value = gr->units ? gr->units(gr) : 0; 219 getparam->value = nvkm_gr_units(gr);
219 break; 220 break;
220 default: 221 default:
221 NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param); 222 NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
222 return -EINVAL; 223 return -EINVAL;
223 } 224 }
224 225
@@ -337,7 +338,7 @@ nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
337 struct nouveau_abi16_chan *chan; 338 struct nouveau_abi16_chan *chan;
338 339
339 list_for_each_entry(chan, &abi16->channels, head) { 340 list_for_each_entry(chan, &abi16->channels, head) {
340 if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel)) 341 if (chan->chan->user.handle == NOUVEAU_ABI16_CHAN(channel))
341 return chan; 342 return chan;
342 } 343 }
343 344
@@ -365,40 +366,91 @@ int
365nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) 366nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
366{ 367{
367 struct drm_nouveau_grobj_alloc *init = data; 368 struct drm_nouveau_grobj_alloc *init = data;
368 struct {
369 struct nvif_ioctl_v0 ioctl;
370 struct nvif_ioctl_new_v0 new;
371 } args = {
372 .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
373 .ioctl.type = NVIF_IOCTL_V0_NEW,
374 .ioctl.path_nr = 3,
375 .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
376 .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
377 .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
378 .new.route = NVDRM_OBJECT_ABI16,
379 .new.handle = init->handle,
380 .new.oclass = init->class,
381 };
382 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 369 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
383 struct nouveau_drm *drm = nouveau_drm(dev); 370 struct nouveau_abi16_chan *chan;
371 struct nouveau_abi16_ntfy *ntfy;
384 struct nvif_client *client; 372 struct nvif_client *client;
385 int ret; 373 struct nvif_sclass *sclass;
374 s32 oclass = 0;
375 int ret, i;
386 376
387 if (unlikely(!abi16)) 377 if (unlikely(!abi16))
388 return -ENOMEM; 378 return -ENOMEM;
389 379
390 if (init->handle == ~0) 380 if (init->handle == ~0)
391 return nouveau_abi16_put(abi16, -EINVAL); 381 return nouveau_abi16_put(abi16, -EINVAL);
392 client = nvif_client(nvif_object(&abi16->device)); 382 client = abi16->device.object.client;
383
384 chan = nouveau_abi16_chan(abi16, init->channel);
385 if (!chan)
386 return nouveau_abi16_put(abi16, -ENOENT);
387
388 ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
389 if (ret < 0)
390 return nouveau_abi16_put(abi16, ret);
393 391
394 /* compatibility with userspace that assumes 506e for all chipsets */ 392 if ((init->class & 0x00ff) == 0x006e) {
395 if (init->class == 0x506e) { 393 /* nvsw: compatibility with older 0x*6e class identifier */
396 init->class = nouveau_abi16_swclass(drm); 394 for (i = 0; !oclass && i < ret; i++) {
397 if (init->class == 0x906e) 395 switch (sclass[i].oclass) {
398 return nouveau_abi16_put(abi16, 0); 396 case NVIF_IOCTL_NEW_V0_SW_NV04:
397 case NVIF_IOCTL_NEW_V0_SW_NV10:
398 case NVIF_IOCTL_NEW_V0_SW_NV50:
399 case NVIF_IOCTL_NEW_V0_SW_GF100:
400 oclass = sclass[i].oclass;
401 break;
402 default:
403 break;
404 }
405 }
406 } else
407 if ((init->class & 0x00ff) == 0x00b1) {
408 /* msvld: compatibility with incorrect version exposure */
409 for (i = 0; i < ret; i++) {
410 if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
411 oclass = sclass[i].oclass;
412 break;
413 }
414 }
415 } else
416 if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
417 /* mspdec: compatibility with incorrect version exposure */
418 for (i = 0; i < ret; i++) {
419 if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
420 oclass = sclass[i].oclass;
421 break;
422 }
423 }
424 } else
425 if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
426 /* msppp: compatibility with incorrect version exposure */
427 for (i = 0; i < ret; i++) {
428 if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
429 oclass = sclass[i].oclass;
430 break;
431 }
432 }
433 } else {
434 oclass = init->class;
399 } 435 }
400 436
401 ret = nvif_client_ioctl(client, &args, sizeof(args)); 437 nvif_object_sclass_put(&sclass);
438 if (!oclass)
439 return nouveau_abi16_put(abi16, -EINVAL);
440
441 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
442 if (!ntfy)
443 return nouveau_abi16_put(abi16, -ENOMEM);
444
445 list_add(&ntfy->head, &chan->notifiers);
446
447 client->route = NVDRM_OBJECT_ABI16;
448 ret = nvif_object_init(&chan->chan->user, init->handle, oclass,
449 NULL, 0, &ntfy->object);
450 client->route = NVDRM_OBJECT_NVIF;
451
452 if (ret)
453 nouveau_abi16_ntfy_fini(chan, ntfy);
402 return nouveau_abi16_put(abi16, ret); 454 return nouveau_abi16_put(abi16, ret);
403} 455}
404 456
@@ -406,27 +458,13 @@ int
406nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) 458nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
407{ 459{
408 struct drm_nouveau_notifierobj_alloc *info = data; 460 struct drm_nouveau_notifierobj_alloc *info = data;
409 struct {
410 struct nvif_ioctl_v0 ioctl;
411 struct nvif_ioctl_new_v0 new;
412 struct nv_dma_v0 ctxdma;
413 } args = {
414 .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
415 .ioctl.type = NVIF_IOCTL_V0_NEW,
416 .ioctl.path_nr = 3,
417 .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
418 .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
419 .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
420 .new.route = NVDRM_OBJECT_ABI16,
421 .new.handle = info->handle,
422 .new.oclass = NV_DMA_IN_MEMORY,
423 };
424 struct nouveau_drm *drm = nouveau_drm(dev); 461 struct nouveau_drm *drm = nouveau_drm(dev);
425 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 462 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
426 struct nouveau_abi16_chan *chan; 463 struct nouveau_abi16_chan *chan;
427 struct nouveau_abi16_ntfy *ntfy; 464 struct nouveau_abi16_ntfy *ntfy;
428 struct nvif_device *device = &abi16->device; 465 struct nvif_device *device = &abi16->device;
429 struct nvif_client *client; 466 struct nvif_client *client;
467 struct nv_dma_v0 args = {};
430 int ret; 468 int ret;
431 469
432 if (unlikely(!abi16)) 470 if (unlikely(!abi16))
@@ -435,7 +473,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
435 /* completely unnecessary for these chipsets... */ 473 /* completely unnecessary for these chipsets... */
436 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI)) 474 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
437 return nouveau_abi16_put(abi16, -EINVAL); 475 return nouveau_abi16_put(abi16, -EINVAL);
438 client = nvif_client(nvif_object(&abi16->device)); 476 client = abi16->device.object.client;
439 477
440 chan = nouveau_abi16_chan(abi16, info->channel); 478 chan = nouveau_abi16_chan(abi16, info->channel);
441 if (!chan) 479 if (!chan)
@@ -446,41 +484,43 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
446 return nouveau_abi16_put(abi16, -ENOMEM); 484 return nouveau_abi16_put(abi16, -ENOMEM);
447 485
448 list_add(&ntfy->head, &chan->notifiers); 486 list_add(&ntfy->head, &chan->notifiers);
449 ntfy->handle = info->handle;
450 487
451 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1, 488 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
452 &ntfy->node); 489 &ntfy->node);
453 if (ret) 490 if (ret)
454 goto done; 491 goto done;
455 492
456 args.ctxdma.start = ntfy->node->offset; 493 args.start = ntfy->node->offset;
457 args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1; 494 args.limit = ntfy->node->offset + ntfy->node->length - 1;
458 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { 495 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
459 args.ctxdma.target = NV_DMA_V0_TARGET_VM; 496 args.target = NV_DMA_V0_TARGET_VM;
460 args.ctxdma.access = NV_DMA_V0_ACCESS_VM; 497 args.access = NV_DMA_V0_ACCESS_VM;
461 args.ctxdma.start += chan->ntfy_vma.offset; 498 args.start += chan->ntfy_vma.offset;
462 args.ctxdma.limit += chan->ntfy_vma.offset; 499 args.limit += chan->ntfy_vma.offset;
463 } else 500 } else
464 if (drm->agp.stat == ENABLED) { 501 if (drm->agp.bridge) {
465 args.ctxdma.target = NV_DMA_V0_TARGET_AGP; 502 args.target = NV_DMA_V0_TARGET_AGP;
466 args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR; 503 args.access = NV_DMA_V0_ACCESS_RDWR;
467 args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset; 504 args.start += drm->agp.base + chan->ntfy->bo.offset;
468 args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset; 505 args.limit += drm->agp.base + chan->ntfy->bo.offset;
469 client->super = true;
470 } else { 506 } else {
471 args.ctxdma.target = NV_DMA_V0_TARGET_VM; 507 args.target = NV_DMA_V0_TARGET_VM;
472 args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR; 508 args.access = NV_DMA_V0_ACCESS_RDWR;
473 args.ctxdma.start += chan->ntfy->bo.offset; 509 args.start += chan->ntfy->bo.offset;
474 args.ctxdma.limit += chan->ntfy->bo.offset; 510 args.limit += chan->ntfy->bo.offset;
475 } 511 }
476 512
477 ret = nvif_client_ioctl(client, &args, sizeof(args)); 513 client->route = NVDRM_OBJECT_ABI16;
514 client->super = true;
515 ret = nvif_object_init(&chan->chan->user, info->handle,
516 NV_DMA_IN_MEMORY, &args, sizeof(args),
517 &ntfy->object);
478 client->super = false; 518 client->super = false;
519 client->route = NVDRM_OBJECT_NVIF;
479 if (ret) 520 if (ret)
480 goto done; 521 goto done;
481 522
482 info->offset = ntfy->node->offset; 523 info->offset = ntfy->node->offset;
483
484done: 524done:
485 if (ret) 525 if (ret)
486 nouveau_abi16_ntfy_fini(chan, ntfy); 526 nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -491,47 +531,28 @@ int
491nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) 531nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
492{ 532{
493 struct drm_nouveau_gpuobj_free *fini = data; 533 struct drm_nouveau_gpuobj_free *fini = data;
494 struct {
495 struct nvif_ioctl_v0 ioctl;
496 struct nvif_ioctl_del del;
497 } args = {
498 .ioctl.owner = NVDRM_OBJECT_ABI16,
499 .ioctl.type = NVIF_IOCTL_V0_DEL,
500 .ioctl.path_nr = 4,
501 .ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
502 .ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
503 .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
504 .ioctl.path[0] = fini->handle,
505 };
506 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 534 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
507 struct nouveau_abi16_chan *chan; 535 struct nouveau_abi16_chan *chan;
508 struct nouveau_abi16_ntfy *ntfy; 536 struct nouveau_abi16_ntfy *ntfy;
509 struct nvif_client *client; 537 int ret = -ENOENT;
510 int ret;
511 538
512 if (unlikely(!abi16)) 539 if (unlikely(!abi16))
513 return -ENOMEM; 540 return -ENOMEM;
514 541
515 chan = nouveau_abi16_chan(abi16, fini->channel); 542 chan = nouveau_abi16_chan(abi16, fini->channel);
516 if (!chan) 543 if (!chan)
517 return nouveau_abi16_put(abi16, -ENOENT); 544 return nouveau_abi16_put(abi16, -EINVAL);
518 client = nvif_client(nvif_object(&abi16->device));
519 545
520 /* synchronize with the user channel and destroy the gpu object */ 546 /* synchronize with the user channel and destroy the gpu object */
521 nouveau_channel_idle(chan->chan); 547 nouveau_channel_idle(chan->chan);
522 548
523 ret = nvif_client_ioctl(client, &args, sizeof(args));
524 if (ret)
525 return nouveau_abi16_put(abi16, ret);
526
527 /* cleanup extra state if this object was a notifier */
528 list_for_each_entry(ntfy, &chan->notifiers, head) { 549 list_for_each_entry(ntfy, &chan->notifiers, head) {
529 if (ntfy->handle == fini->handle) { 550 if (ntfy->object.handle == fini->handle) {
530 nvkm_mm_free(&chan->heap, &ntfy->node); 551 nouveau_abi16_ntfy_fini(chan, ntfy);
531 list_del(&ntfy->head); 552 ret = 0;
532 break; 553 break;
533 } 554 }
534 } 555 }
535 556
536 return nouveau_abi16_put(abi16, 0); 557 return nouveau_abi16_put(abi16, ret);
537} 558}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 86eb1caf4957..6584557afa40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -13,9 +13,9 @@ int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
13int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS); 13int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
14 14
15struct nouveau_abi16_ntfy { 15struct nouveau_abi16_ntfy {
16 struct nvif_object object;
16 struct list_head head; 17 struct list_head head;
17 struct nvkm_mm_node *node; 18 struct nvkm_mm_node *node;
18 u32 handle;
19}; 19};
20 20
21struct nouveau_abi16_chan { 21struct nouveau_abi16_chan {
@@ -37,7 +37,7 @@ struct nouveau_drm;
37struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *); 37struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
38int nouveau_abi16_put(struct nouveau_abi16 *, int); 38int nouveau_abi16_put(struct nouveau_abi16 *, int);
39void nouveau_abi16_fini(struct nouveau_abi16 *); 39void nouveau_abi16_fini(struct nouveau_abi16 *);
40u16 nouveau_abi16_swclass(struct nouveau_drm *); 40s32 nouveau_abi16_swclass(struct nouveau_drm *);
41 41
42#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 42#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
43#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 43#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 622424692b3b..df2d9818aba3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -372,12 +372,12 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
372 return len; 372 return len;
373} 373}
374 374
375bool nouveau_acpi_rom_supported(struct pci_dev *pdev) 375bool nouveau_acpi_rom_supported(struct device *dev)
376{ 376{
377 acpi_status status; 377 acpi_status status;
378 acpi_handle dhandle, rom_handle; 378 acpi_handle dhandle, rom_handle;
379 379
380 dhandle = ACPI_HANDLE(&pdev->dev); 380 dhandle = ACPI_HANDLE(dev);
381 if (!dhandle) 381 if (!dhandle)
382 return false; 382 return false;
383 383
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 74acf0f87785..2f03653aff86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -10,7 +10,7 @@ void nouveau_register_dsm_handler(void);
10void nouveau_unregister_dsm_handler(void); 10void nouveau_unregister_dsm_handler(void);
11void nouveau_switcheroo_optimus_dsm(void); 11void nouveau_switcheroo_optimus_dsm(void);
12int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); 12int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
13bool nouveau_acpi_rom_supported(struct pci_dev *pdev); 13bool nouveau_acpi_rom_supported(struct device *);
14void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *); 14void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
15#else 15#else
16static inline bool nouveau_is_optimus(void) { return false; }; 16static inline bool nouveau_is_optimus(void) { return false; };
@@ -18,7 +18,7 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
18static inline void nouveau_register_dsm_handler(void) {} 18static inline void nouveau_register_dsm_handler(void) {}
19static inline void nouveau_unregister_dsm_handler(void) {} 19static inline void nouveau_unregister_dsm_handler(void) {}
20static inline void nouveau_switcheroo_optimus_dsm(void) {} 20static inline void nouveau_switcheroo_optimus_dsm(void) {}
21static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } 21static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
22static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } 22static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
23static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; } 23static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
24#endif 24#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
deleted file mode 100644
index 0b5970955604..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ /dev/null
@@ -1,195 +0,0 @@
1#include <linux/module.h>
2
3#include "nouveau_drm.h"
4#include "nouveau_agp.h"
5#include "nouveau_reg.h"
6
7#if __OS_HAS_AGP
8MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
9static int nouveau_agpmode = -1;
10module_param_named(agpmode, nouveau_agpmode, int, 0400);
11
12struct nouveau_agpmode_quirk {
13 u16 hostbridge_vendor;
14 u16 hostbridge_device;
15 u16 chip_vendor;
16 u16 chip_device;
17 int mode;
18};
19
20static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
21 /* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
22 { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
23
24 {},
25};
26
27static unsigned long
28get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
29{
30 struct nvif_device *device = &drm->device;
31 struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
32 int agpmode = nouveau_agpmode;
33 unsigned long mode = info->mode;
34
35 /*
36 * FW seems to be broken on nv18, it makes the card lock up
37 * randomly.
38 */
39 if (device->info.chipset == 0x18)
40 mode &= ~PCI_AGP_COMMAND_FW;
41
42 /*
43 * Go through the quirks list and adjust the agpmode accordingly.
44 */
45 while (agpmode == -1 && quirk->hostbridge_vendor) {
46 if (info->id_vendor == quirk->hostbridge_vendor &&
47 info->id_device == quirk->hostbridge_device &&
48 nvxx_device(device)->pdev->vendor == quirk->chip_vendor &&
49 nvxx_device(device)->pdev->device == quirk->chip_device) {
50 agpmode = quirk->mode;
51 NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
52 agpmode);
53 break;
54 }
55 ++quirk;
56 }
57
58 /*
59 * AGP mode set in the command line.
60 */
61 if (agpmode > 0) {
62 bool agpv3 = mode & 0x8;
63 int rate = agpv3 ? agpmode / 4 : agpmode;
64
65 mode = (mode & ~0x7) | (rate & 0x7);
66 }
67
68 return mode;
69}
70
71static bool
72nouveau_agp_enabled(struct nouveau_drm *drm)
73{
74 struct drm_device *dev = drm->dev;
75
76 if (!dev->pdev || !drm_pci_device_is_agp(dev) || !dev->agp)
77 return false;
78
79 if (drm->agp.stat == UNKNOWN) {
80 if (!nouveau_agpmode)
81 return false;
82#ifdef __powerpc__
83 /* Disable AGP by default on all PowerPC machines for
84 * now -- At least some UniNorth-2 AGP bridges are
85 * known to be broken: DMA from the host to the card
86 * works just fine, but writeback from the card to the
87 * host goes straight to memory untranslated bypassing
88 * the GATT somehow, making them quite painful to deal
89 * with...
90 */
91 if (nouveau_agpmode == -1)
92 return false;
93#endif
94 return true;
95 }
96
97 return (drm->agp.stat == ENABLED);
98}
99#endif
100
101void
102nouveau_agp_reset(struct nouveau_drm *drm)
103{
104#if __OS_HAS_AGP
105 struct nvif_device *device = &drm->device;
106 struct drm_device *dev = drm->dev;
107 u32 save[2];
108 int ret;
109
110 if (!nouveau_agp_enabled(drm))
111 return;
112
113 /* First of all, disable fast writes, otherwise if it's
114 * already enabled in the AGP bridge and we disable the card's
115 * AGP controller we might be locking ourselves out of it. */
116 if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) |
117 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
118 struct drm_agp_info info;
119 struct drm_agp_mode mode;
120
121 ret = drm_agp_info(dev, &info);
122 if (ret)
123 return;
124
125 mode.mode = get_agp_mode(drm, &info);
126 mode.mode &= ~PCI_AGP_COMMAND_FW;
127
128 ret = drm_agp_enable(dev, mode);
129 if (ret)
130 return;
131 }
132
133
134 /* clear busmaster bit, and disable AGP */
135 save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
136 nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0);
137
138 /* reset PGRAPH, PFIFO and PTIMER */
139 save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000);
140 nvif_mask(device, 0x000200, 0x00011100, save[1]);
141
142 /* and restore bustmaster bit (gives effect of resetting AGP) */
143 nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
144#endif
145}
146
147void
148nouveau_agp_init(struct nouveau_drm *drm)
149{
150#if __OS_HAS_AGP
151 struct drm_device *dev = drm->dev;
152 struct drm_agp_info info;
153 struct drm_agp_mode mode;
154 int ret;
155
156 if (!nouveau_agp_enabled(drm))
157 return;
158 drm->agp.stat = DISABLE;
159
160 ret = drm_agp_acquire(dev);
161 if (ret) {
162 NV_ERROR(drm, "unable to acquire AGP: %d\n", ret);
163 return;
164 }
165
166 ret = drm_agp_info(dev, &info);
167 if (ret) {
168 NV_ERROR(drm, "unable to get AGP info: %d\n", ret);
169 return;
170 }
171
172 /* see agp.h for the AGPSTAT_* modes available */
173 mode.mode = get_agp_mode(drm, &info);
174
175 ret = drm_agp_enable(dev, mode);
176 if (ret) {
177 NV_ERROR(drm, "unable to enable AGP: %d\n", ret);
178 return;
179 }
180
181 drm->agp.stat = ENABLED;
182 drm->agp.base = info.aperture_base;
183 drm->agp.size = info.aperture_size;
184#endif
185}
186
187void
188nouveau_agp_fini(struct nouveau_drm *drm)
189{
190#if __OS_HAS_AGP
191 struct drm_device *dev = drm->dev;
192 if (dev->agp && dev->agp->acquired)
193 drm_agp_release(dev);
194#endif
195}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h
deleted file mode 100644
index b55c08652963..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __NOUVEAU_AGP_H__
2#define __NOUVEAU_AGP_H__
3
4struct nouveau_drm;
5
6void nouveau_agp_reset(struct nouveau_drm *);
7void nouveau_agp_init(struct nouveau_drm *);
8void nouveau_agp_fini(struct nouveau_drm *);
9
10#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index e566c5b53651..89eb46040b13 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -40,7 +40,7 @@ static int
40nv40_get_intensity(struct backlight_device *bd) 40nv40_get_intensity(struct backlight_device *bd)
41{ 41{
42 struct nouveau_drm *drm = bl_get_data(bd); 42 struct nouveau_drm *drm = bl_get_data(bd);
43 struct nvif_device *device = &drm->device; 43 struct nvif_object *device = &drm->device.object;
44 int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) & 44 int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
45 NV40_PMC_BACKLIGHT_MASK) >> 16; 45 NV40_PMC_BACKLIGHT_MASK) >> 16;
46 46
@@ -51,7 +51,7 @@ static int
51nv40_set_intensity(struct backlight_device *bd) 51nv40_set_intensity(struct backlight_device *bd)
52{ 52{
53 struct nouveau_drm *drm = bl_get_data(bd); 53 struct nouveau_drm *drm = bl_get_data(bd);
54 struct nvif_device *device = &drm->device; 54 struct nvif_object *device = &drm->device.object;
55 int val = bd->props.brightness; 55 int val = bd->props.brightness;
56 int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT); 56 int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
57 57
@@ -71,7 +71,7 @@ static int
71nv40_backlight_init(struct drm_connector *connector) 71nv40_backlight_init(struct drm_connector *connector)
72{ 72{
73 struct nouveau_drm *drm = nouveau_drm(connector->dev); 73 struct nouveau_drm *drm = nouveau_drm(connector->dev);
74 struct nvif_device *device = &drm->device; 74 struct nvif_object *device = &drm->device.object;
75 struct backlight_properties props; 75 struct backlight_properties props;
76 struct backlight_device *bd; 76 struct backlight_device *bd;
77 77
@@ -97,7 +97,7 @@ nv50_get_intensity(struct backlight_device *bd)
97{ 97{
98 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 98 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
99 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 99 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
100 struct nvif_device *device = &drm->device; 100 struct nvif_object *device = &drm->device.object;
101 int or = nv_encoder->or; 101 int or = nv_encoder->or;
102 u32 div = 1025; 102 u32 div = 1025;
103 u32 val; 103 u32 val;
@@ -112,7 +112,7 @@ nv50_set_intensity(struct backlight_device *bd)
112{ 112{
113 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 113 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
114 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 114 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
115 struct nvif_device *device = &drm->device; 115 struct nvif_object *device = &drm->device.object;
116 int or = nv_encoder->or; 116 int or = nv_encoder->or;
117 u32 div = 1025; 117 u32 div = 1025;
118 u32 val = (bd->props.brightness * div) / 100; 118 u32 val = (bd->props.brightness * div) / 100;
@@ -133,7 +133,7 @@ nva3_get_intensity(struct backlight_device *bd)
133{ 133{
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 134 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
136 struct nvif_device *device = &drm->device; 136 struct nvif_object *device = &drm->device.object;
137 int or = nv_encoder->or; 137 int or = nv_encoder->or;
138 u32 div, val; 138 u32 div, val;
139 139
@@ -151,7 +151,7 @@ nva3_set_intensity(struct backlight_device *bd)
151{ 151{
152 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 152 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
153 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 153 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
154 struct nvif_device *device = &drm->device; 154 struct nvif_object *device = &drm->device.object;
155 int or = nv_encoder->or; 155 int or = nv_encoder->or;
156 u32 div, val; 156 u32 div, val;
157 157
@@ -177,7 +177,7 @@ static int
177nv50_backlight_init(struct drm_connector *connector) 177nv50_backlight_init(struct drm_connector *connector)
178{ 178{
179 struct nouveau_drm *drm = nouveau_drm(connector->dev); 179 struct nouveau_drm *drm = nouveau_drm(connector->dev);
180 struct nvif_device *device = &drm->device; 180 struct nvif_object *device = &drm->device.object;
181 struct nouveau_encoder *nv_encoder; 181 struct nouveau_encoder *nv_encoder;
182 struct backlight_properties props; 182 struct backlight_properties props;
183 struct backlight_device *bd; 183 struct backlight_device *bd;
@@ -193,9 +193,9 @@ nv50_backlight_init(struct drm_connector *connector)
193 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 193 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
194 return 0; 194 return 0;
195 195
196 if (device->info.chipset <= 0xa0 || 196 if (drm->device.info.chipset <= 0xa0 ||
197 device->info.chipset == 0xaa || 197 drm->device.info.chipset == 0xaa ||
198 device->info.chipset == 0xac) 198 drm->device.info.chipset == 0xac)
199 ops = &nv50_bl_ops; 199 ops = &nv50_bl_ops;
200 else 200 else
201 ops = &nva3_bl_ops; 201 ops = &nva3_bl_ops;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0190b69bbe25..4dca65a63b92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -215,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
215 */ 215 */
216 216
217 struct nouveau_drm *drm = nouveau_drm(dev); 217 struct nouveau_drm *drm = nouveau_drm(dev);
218 struct nvif_device *device = &drm->device; 218 struct nvif_object *device = &drm->device.object;
219 struct nvbios *bios = &drm->vbios; 219 struct nvbios *bios = &drm->vbios;
220 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 220 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
221 uint32_t sel_clk_binding, sel_clk; 221 uint32_t sel_clk_binding, sel_clk;
@@ -318,7 +318,8 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
318static int 318static int
319get_fp_strap(struct drm_device *dev, struct nvbios *bios) 319get_fp_strap(struct drm_device *dev, struct nvbios *bios)
320{ 320{
321 struct nvif_device *device = &nouveau_drm(dev)->device; 321 struct nouveau_drm *drm = nouveau_drm(dev);
322 struct nvif_object *device = &drm->device.object;
322 323
323 /* 324 /*
324 * The fp strap is normally dictated by the "User Strap" in 325 * The fp strap is normally dictated by the "User Strap" in
@@ -332,7 +333,7 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
332 if (bios->major_version < 5 && bios->data[0x48] & 0x4) 333 if (bios->major_version < 5 && bios->data[0x48] & 0x4)
333 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; 334 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
334 335
335 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) 336 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
336 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; 337 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
337 else 338 else
338 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; 339 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
@@ -634,7 +635,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
634 */ 635 */
635 636
636 struct nouveau_drm *drm = nouveau_drm(dev); 637 struct nouveau_drm *drm = nouveau_drm(dev);
637 struct nvif_device *device = &drm->device; 638 struct nvif_object *device = &drm->device.object;
638 struct nvbios *bios = &drm->vbios; 639 struct nvbios *bios = &drm->vbios;
639 int cv = bios->chip_version; 640 int cv = bios->chip_version;
640 uint16_t clktable = 0, scriptptr; 641 uint16_t clktable = 0, scriptptr;
@@ -1481,22 +1482,20 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1481 entry->dpconf.link_bw = 540000; 1482 entry->dpconf.link_bw = 540000;
1482 break; 1483 break;
1483 } 1484 }
1484 entry->dpconf.link_nr = (conf & 0x0f000000) >> 24; 1485 switch ((conf & 0x0f000000) >> 24) {
1485 if (dcb->version < 0x41) { 1486 case 0xf:
1486 switch (entry->dpconf.link_nr) { 1487 case 0x4:
1487 case 0xf: 1488 entry->dpconf.link_nr = 4;
1488 entry->dpconf.link_nr = 4; 1489 break;
1489 break; 1490 case 0x3:
1490 case 0x3: 1491 case 0x2:
1491 entry->dpconf.link_nr = 2; 1492 entry->dpconf.link_nr = 2;
1492 break; 1493 break;
1493 default: 1494 default:
1494 entry->dpconf.link_nr = 1; 1495 entry->dpconf.link_nr = 1;
1495 break; 1496 break;
1496 }
1497 } 1497 }
1498 link = entry->dpconf.sor.link; 1498 link = entry->dpconf.sor.link;
1499 entry->i2c_index += NV_I2C_AUX(0);
1500 break; 1499 break;
1501 case DCB_OUTPUT_TMDS: 1500 case DCB_OUTPUT_TMDS:
1502 if (dcb->version >= 0x40) { 1501 if (dcb->version >= 0x40) {
@@ -1892,11 +1891,12 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
1892 idx = -1; 1891 idx = -1;
1893 while ((conn = olddcb_conn(dev, ++idx))) { 1892 while ((conn = olddcb_conn(dev, ++idx))) {
1894 if (conn[0] != 0xff) { 1893 if (conn[0] != 0xff) {
1895 NV_INFO(drm, "DCB conn %02d: ", idx);
1896 if (olddcb_conntab(dev)[3] < 4) 1894 if (olddcb_conntab(dev)[3] < 4)
1897 pr_cont("%04x\n", ROM16(conn[0])); 1895 NV_INFO(drm, "DCB conn %02d: %04x\n",
1896 idx, ROM16(conn[0]));
1898 else 1897 else
1899 pr_cont("%08x\n", ROM32(conn[0])); 1898 NV_INFO(drm, "DCB conn %02d: %08x\n",
1899 idx, ROM32(conn[0]));
1900 } 1900 }
1901 } 1901 }
1902 dcb_fake_connectors(bios); 1902 dcb_fake_connectors(bios);
@@ -1915,7 +1915,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
1915 */ 1915 */
1916 1916
1917 struct nouveau_drm *drm = nouveau_drm(dev); 1917 struct nouveau_drm *drm = nouveau_drm(dev);
1918 struct nvif_device *device = &drm->device; 1918 struct nvif_object *device = &drm->device.object;
1919 uint8_t bytes_to_write; 1919 uint8_t bytes_to_write;
1920 uint16_t hwsq_entry_offset; 1920 uint16_t hwsq_entry_offset;
1921 int i; 1921 int i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6edcce1658b7..15057b39491c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,24 +48,19 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
48{ 48{
49 struct nouveau_drm *drm = nouveau_drm(dev); 49 struct nouveau_drm *drm = nouveau_drm(dev);
50 int i = reg - drm->tile.reg; 50 int i = reg - drm->tile.reg;
51 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 51 struct nvkm_device *device = nvxx_device(&drm->device);
52 struct nvkm_fb_tile *tile = &pfb->tile.region[i]; 52 struct nvkm_fb *fb = device->fb;
53 struct nvkm_engine *engine; 53 struct nvkm_fb_tile *tile = &fb->tile.region[i];
54 54
55 nouveau_fence_unref(&reg->fence); 55 nouveau_fence_unref(&reg->fence);
56 56
57 if (tile->pitch) 57 if (tile->pitch)
58 pfb->tile.fini(pfb, i, tile); 58 nvkm_fb_tile_fini(fb, i, tile);
59 59
60 if (pitch) 60 if (pitch)
61 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); 61 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
62
63 pfb->tile.prog(pfb, i, tile);
64 62
65 if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR))) 63 nvkm_fb_tile_prog(fb, i, tile);
66 engine->tile_prog(engine, i);
67 if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
68 engine->tile_prog(engine, i);
69} 64}
70 65
71static struct nouveau_drm_tile * 66static struct nouveau_drm_tile *
@@ -105,18 +100,18 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 flags) 100 u32 size, u32 pitch, u32 flags)
106{ 101{
107 struct nouveau_drm *drm = nouveau_drm(dev); 102 struct nouveau_drm *drm = nouveau_drm(dev);
108 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 103 struct nvkm_fb *fb = nvxx_fb(&drm->device);
109 struct nouveau_drm_tile *tile, *found = NULL; 104 struct nouveau_drm_tile *tile, *found = NULL;
110 int i; 105 int i;
111 106
112 for (i = 0; i < pfb->tile.regions; i++) { 107 for (i = 0; i < fb->tile.regions; i++) {
113 tile = nv10_bo_get_tile_region(dev, i); 108 tile = nv10_bo_get_tile_region(dev, i);
114 109
115 if (pitch && !found) { 110 if (pitch && !found) {
116 found = tile; 111 found = tile;
117 continue; 112 continue;
118 113
119 } else if (tile && pfb->tile.region[i].pitch) { 114 } else if (tile && fb->tile.region[i].pitch) {
120 /* Kill an unused tile region. */ 115 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); 116 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 } 117 }
@@ -214,7 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
214 nvbo->tile_flags = tile_flags; 209 nvbo->tile_flags = tile_flags;
215 nvbo->bo.bdev = &drm->ttm.bdev; 210 nvbo->bo.bdev = &drm->ttm.bdev;
216 211
217 if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device))) 212 if (!nvxx_device(&drm->device)->func->cpu_coherent)
218 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; 213 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
219 214
220 nvbo->page_shift = 12; 215 nvbo->page_shift = 12;
@@ -471,8 +466,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
471 return; 466 return;
472 467
473 for (i = 0; i < ttm_dma->ttm.num_pages; i++) 468 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
474 dma_sync_single_for_device(nv_device_base(device), 469 dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
475 ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE); 470 PAGE_SIZE, DMA_TO_DEVICE);
476} 471}
477 472
478void 473void
@@ -491,8 +486,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
491 return; 486 return;
492 487
493 for (i = 0; i < ttm_dma->ttm.num_pages; i++) 488 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
494 dma_sync_single_for_cpu(nv_device_base(device), 489 dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
495 ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE); 490 PAGE_SIZE, DMA_FROM_DEVICE);
496} 491}
497 492
498int 493int
@@ -581,10 +576,9 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
581{ 576{
582#if __OS_HAS_AGP 577#if __OS_HAS_AGP
583 struct nouveau_drm *drm = nouveau_bdev(bdev); 578 struct nouveau_drm *drm = nouveau_bdev(bdev);
584 struct drm_device *dev = drm->dev;
585 579
586 if (drm->agp.stat == ENABLED) { 580 if (drm->agp.bridge) {
587 return ttm_agp_tt_create(bdev, dev->agp->bridge, size, 581 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
588 page_flags, dummy_read); 582 page_flags, dummy_read);
589 } 583 }
590#endif 584#endif
@@ -636,12 +630,12 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
636 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 630 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
637 man->func = &nouveau_gart_manager; 631 man->func = &nouveau_gart_manager;
638 else 632 else
639 if (drm->agp.stat != ENABLED) 633 if (!drm->agp.bridge)
640 man->func = &nv04_gart_manager; 634 man->func = &nv04_gart_manager;
641 else 635 else
642 man->func = &ttm_bo_manager_func; 636 man->func = &ttm_bo_manager_func;
643 637
644 if (drm->agp.stat == ENABLED) { 638 if (drm->agp.bridge) {
645 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 639 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
646 man->available_caching = TTM_PL_FLAG_UNCACHED | 640 man->available_caching = TTM_PL_FLAG_UNCACHED |
647 TTM_PL_FLAG_WC; 641 TTM_PL_FLAG_WC;
@@ -1064,7 +1058,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1064{ 1058{
1065 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1059 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1066 struct nouveau_channel *chan = drm->ttm.chan; 1060 struct nouveau_channel *chan = drm->ttm.chan;
1067 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); 1061 struct nouveau_cli *cli = (void *)chan->user.client;
1068 struct nouveau_fence *fence; 1062 struct nouveau_fence *fence;
1069 int ret; 1063 int ret;
1070 1064
@@ -1104,7 +1098,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1104 static const struct { 1098 static const struct {
1105 const char *name; 1099 const char *name;
1106 int engine; 1100 int engine;
1107 u32 oclass; 1101 s32 oclass;
1108 int (*exec)(struct nouveau_channel *, 1102 int (*exec)(struct nouveau_channel *,
1109 struct ttm_buffer_object *, 1103 struct ttm_buffer_object *,
1110 struct ttm_mem_reg *, struct ttm_mem_reg *); 1104 struct ttm_mem_reg *, struct ttm_mem_reg *);
@@ -1137,7 +1131,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1137 if (chan == NULL) 1131 if (chan == NULL)
1138 continue; 1132 continue;
1139 1133
1140 ret = nvif_object_init(chan->object, NULL, 1134 ret = nvif_object_init(&chan->user,
1141 mthd->oclass | (mthd->engine << 16), 1135 mthd->oclass | (mthd->engine << 16),
1142 mthd->oclass, NULL, 0, 1136 mthd->oclass, NULL, 0,
1143 &drm->ttm.copy); 1137 &drm->ttm.copy);
@@ -1356,6 +1350,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1356{ 1350{
1357 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1351 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1358 struct nouveau_drm *drm = nouveau_bdev(bdev); 1352 struct nouveau_drm *drm = nouveau_bdev(bdev);
1353 struct nvkm_device *device = nvxx_device(&drm->device);
1359 struct nvkm_mem *node = mem->mm_node; 1354 struct nvkm_mem *node = mem->mm_node;
1360 int ret; 1355 int ret;
1361 1356
@@ -1372,10 +1367,10 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1372 return 0; 1367 return 0;
1373 case TTM_PL_TT: 1368 case TTM_PL_TT:
1374#if __OS_HAS_AGP 1369#if __OS_HAS_AGP
1375 if (drm->agp.stat == ENABLED) { 1370 if (drm->agp.bridge) {
1376 mem->bus.offset = mem->start << PAGE_SHIFT; 1371 mem->bus.offset = mem->start << PAGE_SHIFT;
1377 mem->bus.base = drm->agp.base; 1372 mem->bus.base = drm->agp.base;
1378 mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture; 1373 mem->bus.is_iomem = !drm->agp.cma;
1379 } 1374 }
1380#endif 1375#endif
1381 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) 1376 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
@@ -1384,16 +1379,20 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1384 /* fallthrough, tiled memory */ 1379 /* fallthrough, tiled memory */
1385 case TTM_PL_VRAM: 1380 case TTM_PL_VRAM:
1386 mem->bus.offset = mem->start << PAGE_SHIFT; 1381 mem->bus.offset = mem->start << PAGE_SHIFT;
1387 mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1); 1382 mem->bus.base = device->func->resource_addr(device, 1);
1388 mem->bus.is_iomem = true; 1383 mem->bus.is_iomem = true;
1389 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 1384 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1390 struct nvkm_bar *bar = nvxx_bar(&drm->device); 1385 struct nvkm_bar *bar = nvxx_bar(&drm->device);
1386 int page_shift = 12;
1387 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1388 page_shift = node->page_shift;
1391 1389
1392 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, 1390 ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
1393 &node->bar_vma); 1391 &node->bar_vma);
1394 if (ret) 1392 if (ret)
1395 return ret; 1393 return ret;
1396 1394
1395 nvkm_vm_map(&node->bar_vma, node);
1397 mem->bus.offset = node->bar_vma.offset; 1396 mem->bus.offset = node->bar_vma.offset;
1398 } 1397 }
1399 break; 1398 break;
@@ -1406,14 +1405,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1406static void 1405static void
1407nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1406nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1408{ 1407{
1409 struct nouveau_drm *drm = nouveau_bdev(bdev);
1410 struct nvkm_bar *bar = nvxx_bar(&drm->device);
1411 struct nvkm_mem *node = mem->mm_node; 1408 struct nvkm_mem *node = mem->mm_node;
1412 1409
1413 if (!node->bar_vma.node) 1410 if (!node->bar_vma.node)
1414 return; 1411 return;
1415 1412
1416 bar->unmap(bar, &node->bar_vma); 1413 nvkm_vm_unmap(&node->bar_vma);
1414 nvkm_vm_put(&node->bar_vma);
1417} 1415}
1418 1416
1419static int 1417static int
@@ -1421,8 +1419,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1421{ 1419{
1422 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1420 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1423 struct nouveau_bo *nvbo = nouveau_bo(bo); 1421 struct nouveau_bo *nvbo = nouveau_bo(bo);
1424 struct nvif_device *device = &drm->device; 1422 struct nvkm_device *device = nvxx_device(&drm->device);
1425 u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT; 1423 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1426 int i, ret; 1424 int i, ret;
1427 1425
1428 /* as long as the bo isn't in vram, and isn't tiled, we've got 1426 /* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1488,18 +1486,18 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1488 drm = nouveau_bdev(ttm->bdev); 1486 drm = nouveau_bdev(ttm->bdev);
1489 device = nvxx_device(&drm->device); 1487 device = nvxx_device(&drm->device);
1490 dev = drm->dev; 1488 dev = drm->dev;
1491 pdev = nv_device_base(device); 1489 pdev = device->dev;
1492 1490
1493 /* 1491 /*
1494 * Objects matching this condition have been marked as force_coherent, 1492 * Objects matching this condition have been marked as force_coherent,
1495 * so use the DMA API for them. 1493 * so use the DMA API for them.
1496 */ 1494 */
1497 if (!nv_device_is_cpu_coherent(device) && 1495 if (!nvxx_device(&drm->device)->func->cpu_coherent &&
1498 ttm->caching_state == tt_uncached) 1496 ttm->caching_state == tt_uncached)
1499 return ttm_dma_populate(ttm_dma, dev->dev); 1497 return ttm_dma_populate(ttm_dma, dev->dev);
1500 1498
1501#if __OS_HAS_AGP 1499#if __OS_HAS_AGP
1502 if (drm->agp.stat == ENABLED) { 1500 if (drm->agp.bridge) {
1503 return ttm_agp_tt_populate(ttm); 1501 return ttm_agp_tt_populate(ttm);
1504 } 1502 }
1505#endif 1503#endif
@@ -1553,20 +1551,20 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1553 drm = nouveau_bdev(ttm->bdev); 1551 drm = nouveau_bdev(ttm->bdev);
1554 device = nvxx_device(&drm->device); 1552 device = nvxx_device(&drm->device);
1555 dev = drm->dev; 1553 dev = drm->dev;
1556 pdev = nv_device_base(device); 1554 pdev = device->dev;
1557 1555
1558 /* 1556 /*
1559 * Objects matching this condition have been marked as force_coherent, 1557 * Objects matching this condition have been marked as force_coherent,
1560 * so use the DMA API for them. 1558 * so use the DMA API for them.
1561 */ 1559 */
1562 if (!nv_device_is_cpu_coherent(device) && 1560 if (!nvxx_device(&drm->device)->func->cpu_coherent &&
1563 ttm->caching_state == tt_uncached) { 1561 ttm->caching_state == tt_uncached) {
1564 ttm_dma_unpopulate(ttm_dma, dev->dev); 1562 ttm_dma_unpopulate(ttm_dma, dev->dev);
1565 return; 1563 return;
1566 } 1564 }
1567 1565
1568#if __OS_HAS_AGP 1566#if __OS_HAS_AGP
1569 if (drm->agp.stat == ENABLED) { 1567 if (drm->agp.bridge) {
1570 ttm_agp_tt_unpopulate(ttm); 1568 ttm_agp_tt_unpopulate(ttm);
1571 return; 1569 return;
1572 } 1570 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 0589babc506e..ff5e59db49db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -24,6 +24,7 @@
24 24
25#include <nvif/os.h> 25#include <nvif/os.h>
26#include <nvif/class.h> 26#include <nvif/class.h>
27#include <nvif/ioctl.h>
27 28
28/*XXX*/ 29/*XXX*/
29#include <core/client.h> 30#include <core/client.h>
@@ -42,20 +43,26 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
42int 43int
43nouveau_channel_idle(struct nouveau_channel *chan) 44nouveau_channel_idle(struct nouveau_channel *chan)
44{ 45{
45 struct nouveau_cli *cli = (void *)nvif_client(chan->object); 46 if (likely(chan && chan->fence)) {
46 struct nouveau_fence *fence = NULL; 47 struct nouveau_cli *cli = (void *)chan->user.client;
47 int ret; 48 struct nouveau_fence *fence = NULL;
49 int ret;
50
51 ret = nouveau_fence_new(chan, false, &fence);
52 if (!ret) {
53 ret = nouveau_fence_wait(fence, false, false);
54 nouveau_fence_unref(&fence);
55 }
48 56
49 ret = nouveau_fence_new(chan, false, &fence); 57 if (ret) {
50 if (!ret) { 58 NV_PRINTK(err, cli, "failed to idle channel "
51 ret = nouveau_fence_wait(fence, false, false); 59 "0x%08x [%s]\n",
52 nouveau_fence_unref(&fence); 60 chan->user.handle,
61 nvxx_client(&cli->base)->name);
62 return ret;
63 }
53 } 64 }
54 65 return 0;
55 if (ret)
56 NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
57 chan->object->handle, nvxx_client(&cli->base)->name);
58 return ret;
59} 66}
60 67
61void 68void
@@ -63,21 +70,18 @@ nouveau_channel_del(struct nouveau_channel **pchan)
63{ 70{
64 struct nouveau_channel *chan = *pchan; 71 struct nouveau_channel *chan = *pchan;
65 if (chan) { 72 if (chan) {
66 if (chan->fence) { 73 if (chan->fence)
67 nouveau_channel_idle(chan);
68 nouveau_fence(chan->drm)->context_del(chan); 74 nouveau_fence(chan->drm)->context_del(chan);
69 }
70 nvif_object_fini(&chan->nvsw); 75 nvif_object_fini(&chan->nvsw);
71 nvif_object_fini(&chan->gart); 76 nvif_object_fini(&chan->gart);
72 nvif_object_fini(&chan->vram); 77 nvif_object_fini(&chan->vram);
73 nvif_object_ref(NULL, &chan->object); 78 nvif_object_fini(&chan->user);
74 nvif_object_fini(&chan->push.ctxdma); 79 nvif_object_fini(&chan->push.ctxdma);
75 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); 80 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
76 nouveau_bo_unmap(chan->push.buffer); 81 nouveau_bo_unmap(chan->push.buffer);
77 if (chan->push.buffer && chan->push.buffer->pin_refcnt) 82 if (chan->push.buffer && chan->push.buffer->pin_refcnt)
78 nouveau_bo_unpin(chan->push.buffer); 83 nouveau_bo_unpin(chan->push.buffer);
79 nouveau_bo_ref(NULL, &chan->push.buffer); 84 nouveau_bo_ref(NULL, &chan->push.buffer);
80 nvif_device_ref(NULL, &chan->device);
81 kfree(chan); 85 kfree(chan);
82 } 86 }
83 *pchan = NULL; 87 *pchan = NULL;
@@ -87,7 +91,7 @@ static int
87nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, 91nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
88 u32 handle, u32 size, struct nouveau_channel **pchan) 92 u32 handle, u32 size, struct nouveau_channel **pchan)
89{ 93{
90 struct nouveau_cli *cli = (void *)nvif_client(&device->base); 94 struct nouveau_cli *cli = (void *)device->object.client;
91 struct nvkm_mmu *mmu = nvxx_mmu(device); 95 struct nvkm_mmu *mmu = nvxx_mmu(device);
92 struct nv_dma_v0 args = {}; 96 struct nv_dma_v0 args = {};
93 struct nouveau_channel *chan; 97 struct nouveau_channel *chan;
@@ -98,7 +102,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
98 if (!chan) 102 if (!chan)
99 return -ENOMEM; 103 return -ENOMEM;
100 104
101 nvif_device_ref(device, &chan->device); 105 chan->device = device;
102 chan->drm = drm; 106 chan->drm = drm;
103 107
104 /* allocate memory for dma push buffer */ 108 /* allocate memory for dma push buffer */
@@ -146,7 +150,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
146 */ 150 */
147 args.target = NV_DMA_V0_TARGET_PCI; 151 args.target = NV_DMA_V0_TARGET_PCI;
148 args.access = NV_DMA_V0_ACCESS_RDWR; 152 args.access = NV_DMA_V0_ACCESS_RDWR;
149 args.start = nv_device_resource_start(nvxx_device(device), 1); 153 args.start = nvxx_device(device)->func->
154 resource_addr(nvxx_device(device), 1);
150 args.limit = args.start + device->info.ram_user - 1; 155 args.limit = args.start + device->info.ram_user - 1;
151 } else { 156 } else {
152 args.target = NV_DMA_V0_TARGET_VRAM; 157 args.target = NV_DMA_V0_TARGET_VRAM;
@@ -155,7 +160,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
155 args.limit = device->info.ram_user - 1; 160 args.limit = device->info.ram_user - 1;
156 } 161 }
157 } else { 162 } else {
158 if (chan->drm->agp.stat == ENABLED) { 163 if (chan->drm->agp.bridge) {
159 args.target = NV_DMA_V0_TARGET_AGP; 164 args.target = NV_DMA_V0_TARGET_AGP;
160 args.access = NV_DMA_V0_ACCESS_RDWR; 165 args.access = NV_DMA_V0_ACCESS_RDWR;
161 args.start = chan->drm->agp.base; 166 args.start = chan->drm->agp.base;
@@ -169,7 +174,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
169 } 174 }
170 } 175 }
171 176
172 ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH | 177 ret = nvif_object_init(&device->object, NVDRM_PUSH |
173 (handle & 0xffff), NV_DMA_FROM_MEMORY, 178 (handle & 0xffff), NV_DMA_FROM_MEMORY,
174 &args, sizeof(args), &chan->push.ctxdma); 179 &args, sizeof(args), &chan->push.ctxdma);
175 if (ret) { 180 if (ret) {
@@ -193,8 +198,9 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
193 const u16 *oclass = oclasses; 198 const u16 *oclass = oclasses;
194 union { 199 union {
195 struct nv50_channel_gpfifo_v0 nv50; 200 struct nv50_channel_gpfifo_v0 nv50;
201 struct fermi_channel_gpfifo_v0 fermi;
196 struct kepler_channel_gpfifo_a_v0 kepler; 202 struct kepler_channel_gpfifo_a_v0 kepler;
197 } args, *retn; 203 } args;
198 struct nouveau_channel *chan; 204 struct nouveau_channel *chan;
199 u32 size; 205 u32 size;
200 int ret; 206 int ret;
@@ -210,26 +216,36 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
210 if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) { 216 if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
211 args.kepler.version = 0; 217 args.kepler.version = 0;
212 args.kepler.engine = engine; 218 args.kepler.engine = engine;
213 args.kepler.pushbuf = chan->push.ctxdma.handle;
214 args.kepler.ilength = 0x02000; 219 args.kepler.ilength = 0x02000;
215 args.kepler.ioffset = 0x10000 + chan->push.vma.offset; 220 args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
221 args.kepler.vm = 0;
216 size = sizeof(args.kepler); 222 size = sizeof(args.kepler);
223 } else
224 if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
225 args.fermi.version = 0;
226 args.fermi.ilength = 0x02000;
227 args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
228 args.fermi.vm = 0;
229 size = sizeof(args.fermi);
217 } else { 230 } else {
218 args.nv50.version = 0; 231 args.nv50.version = 0;
219 args.nv50.pushbuf = chan->push.ctxdma.handle;
220 args.nv50.ilength = 0x02000; 232 args.nv50.ilength = 0x02000;
221 args.nv50.ioffset = 0x10000 + chan->push.vma.offset; 233 args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
234 args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
235 args.nv50.vm = 0;
222 size = sizeof(args.nv50); 236 size = sizeof(args.nv50);
223 } 237 }
224 238
225 ret = nvif_object_new(nvif_object(device), handle, *oclass++, 239 ret = nvif_object_init(&device->object, handle, *oclass++,
226 &args, size, &chan->object); 240 &args, size, &chan->user);
227 if (ret == 0) { 241 if (ret == 0) {
228 retn = chan->object->data; 242 if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
229 if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A) 243 chan->chid = args.kepler.chid;
230 chan->chid = retn->kepler.chid; 244 else
245 if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
246 chan->chid = args.fermi.chid;
231 else 247 else
232 chan->chid = retn->nv50.chid; 248 chan->chid = args.nv50.chid;
233 return ret; 249 return ret;
234 } 250 }
235 } while (*oclass); 251 } while (*oclass);
@@ -248,7 +264,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
248 NV03_CHANNEL_DMA, 264 NV03_CHANNEL_DMA,
249 0 }; 265 0 };
250 const u16 *oclass = oclasses; 266 const u16 *oclass = oclasses;
251 struct nv03_channel_dma_v0 args, *retn; 267 struct nv03_channel_dma_v0 args;
252 struct nouveau_channel *chan; 268 struct nouveau_channel *chan;
253 int ret; 269 int ret;
254 270
@@ -260,15 +276,14 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
260 276
261 /* create channel object */ 277 /* create channel object */
262 args.version = 0; 278 args.version = 0;
263 args.pushbuf = chan->push.ctxdma.handle; 279 args.pushbuf = nvif_handle(&chan->push.ctxdma);
264 args.offset = chan->push.vma.offset; 280 args.offset = chan->push.vma.offset;
265 281
266 do { 282 do {
267 ret = nvif_object_new(nvif_object(device), handle, *oclass++, 283 ret = nvif_object_init(&device->object, handle, *oclass++,
268 &args, sizeof(args), &chan->object); 284 &args, sizeof(args), &chan->user);
269 if (ret == 0) { 285 if (ret == 0) {
270 retn = chan->object->data; 286 chan->chid = args.chid;
271 chan->chid = retn->chid;
272 return ret; 287 return ret;
273 } 288 }
274 } while (ret && *oclass); 289 } while (ret && *oclass);
@@ -281,13 +296,12 @@ static int
281nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) 296nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
282{ 297{
283 struct nvif_device *device = chan->device; 298 struct nvif_device *device = chan->device;
284 struct nouveau_cli *cli = (void *)nvif_client(&device->base); 299 struct nouveau_cli *cli = (void *)chan->user.client;
285 struct nvkm_mmu *mmu = nvxx_mmu(device); 300 struct nvkm_mmu *mmu = nvxx_mmu(device);
286 struct nvkm_sw_chan *swch;
287 struct nv_dma_v0 args = {}; 301 struct nv_dma_v0 args = {};
288 int ret, i; 302 int ret, i;
289 303
290 nvif_object_map(chan->object); 304 nvif_object_map(&chan->user);
291 305
292 /* allocate dma objects to cover all allowed vram, and gart */ 306 /* allocate dma objects to cover all allowed vram, and gart */
293 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { 307 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -303,9 +317,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
303 args.limit = device->info.ram_user - 1; 317 args.limit = device->info.ram_user - 1;
304 } 318 }
305 319
306 ret = nvif_object_init(chan->object, NULL, vram, 320 ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
307 NV_DMA_IN_MEMORY, &args, 321 &args, sizeof(args), &chan->vram);
308 sizeof(args), &chan->vram);
309 if (ret) 322 if (ret)
310 return ret; 323 return ret;
311 324
@@ -315,7 +328,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
315 args.start = 0; 328 args.start = 0;
316 args.limit = cli->vm->mmu->limit - 1; 329 args.limit = cli->vm->mmu->limit - 1;
317 } else 330 } else
318 if (chan->drm->agp.stat == ENABLED) { 331 if (chan->drm->agp.bridge) {
319 args.target = NV_DMA_V0_TARGET_AGP; 332 args.target = NV_DMA_V0_TARGET_AGP;
320 args.access = NV_DMA_V0_ACCESS_RDWR; 333 args.access = NV_DMA_V0_ACCESS_RDWR;
321 args.start = chan->drm->agp.base; 334 args.start = chan->drm->agp.base;
@@ -328,15 +341,14 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
328 args.limit = mmu->limit - 1; 341 args.limit = mmu->limit - 1;
329 } 342 }
330 343
331 ret = nvif_object_init(chan->object, NULL, gart, 344 ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
332 NV_DMA_IN_MEMORY, &args, 345 &args, sizeof(args), &chan->gart);
333 sizeof(args), &chan->gart);
334 if (ret) 346 if (ret)
335 return ret; 347 return ret;
336 } 348 }
337 349
338 /* initialise dma tracking parameters */ 350 /* initialise dma tracking parameters */
339 switch (chan->object->oclass & 0x00ff) { 351 switch (chan->user.oclass & 0x00ff) {
340 case 0x006b: 352 case 0x006b:
341 case 0x006e: 353 case 0x006e:
342 chan->user_put = 0x40; 354 chan->user_put = 0x40;
@@ -368,15 +380,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
368 380
369 /* allocate software object class (used for fences on <= nv05) */ 381 /* allocate software object class (used for fences on <= nv05) */
370 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { 382 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
371 ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e, 383 ret = nvif_object_init(&chan->user, 0x006e,
384 NVIF_IOCTL_NEW_V0_SW_NV04,
372 NULL, 0, &chan->nvsw); 385 NULL, 0, &chan->nvsw);
373 if (ret) 386 if (ret)
374 return ret; 387 return ret;
375 388
376 swch = (void *)nvxx_object(&chan->nvsw)->parent;
377 swch->flip = nouveau_flip_complete;
378 swch->flip_data = chan;
379
380 ret = RING_SPACE(chan, 2); 389 ret = RING_SPACE(chan, 2);
381 if (ret) 390 if (ret)
382 return ret; 391 return ret;
@@ -395,7 +404,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
395 u32 handle, u32 arg0, u32 arg1, 404 u32 handle, u32 arg0, u32 arg1,
396 struct nouveau_channel **pchan) 405 struct nouveau_channel **pchan)
397{ 406{
398 struct nouveau_cli *cli = (void *)nvif_client(&device->base); 407 struct nouveau_cli *cli = (void *)device->object.client;
399 bool super; 408 bool super;
400 int ret; 409 int ret;
401 410
@@ -405,17 +414,17 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
405 414
406 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan); 415 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
407 if (ret) { 416 if (ret) {
408 NV_PRINTK(debug, cli, "ib channel create, %d\n", ret); 417 NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
409 ret = nouveau_channel_dma(drm, device, handle, pchan); 418 ret = nouveau_channel_dma(drm, device, handle, pchan);
410 if (ret) { 419 if (ret) {
411 NV_PRINTK(debug, cli, "dma channel create, %d\n", ret); 420 NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
412 goto done; 421 goto done;
413 } 422 }
414 } 423 }
415 424
416 ret = nouveau_channel_init(*pchan, arg0, arg1); 425 ret = nouveau_channel_init(*pchan, arg0, arg1);
417 if (ret) { 426 if (ret) {
418 NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret); 427 NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
419 nouveau_channel_del(pchan); 428 nouveau_channel_del(pchan);
420 } 429 }
421 430
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 8b3640f69e4f..2ed32414cb69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -37,7 +37,7 @@ struct nouveau_channel {
37 u32 user_get; 37 u32 user_get;
38 u32 user_put; 38 u32 user_put;
39 39
40 struct nvif_object *object; 40 struct nvif_object user;
41}; 41};
42 42
43 43
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1f26eba245d1..2e7cbe933533 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -125,9 +125,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
125 * is handled by the SOR itself, and not required for LVDS DDC. 125 * is handled by the SOR itself, and not required for LVDS DDC.
126 */ 126 */
127 if (nv_connector->type == DCB_CONNECTOR_eDP) { 127 if (nv_connector->type == DCB_CONNECTOR_eDP) {
128 panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); 128 panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
129 if (panel == 0) { 129 if (panel == 0) {
130 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); 130 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
131 msleep(300); 131 msleep(300);
132 } 132 }
133 } 133 }
@@ -148,7 +148,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
148 break; 148 break;
149 } else 149 } else
150 if (nv_encoder->i2c) { 150 if (nv_encoder->i2c) {
151 if (nv_probe_i2c(nv_encoder->i2c, 0x50)) 151 if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
152 break; 152 break;
153 } 153 }
154 } 154 }
@@ -157,7 +157,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
157 * state to avoid confusing the SOR for other output types. 157 * state to avoid confusing the SOR for other output types.
158 */ 158 */
159 if (!nv_encoder && panel == 0) 159 if (!nv_encoder && panel == 0)
160 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel); 160 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
161 161
162 return nv_encoder; 162 return nv_encoder;
163} 163}
@@ -241,7 +241,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
241 struct nouveau_connector *nv_connector = nouveau_connector(connector); 241 struct nouveau_connector *nv_connector = nouveau_connector(connector);
242 struct nouveau_encoder *nv_encoder = NULL; 242 struct nouveau_encoder *nv_encoder = NULL;
243 struct nouveau_encoder *nv_partner; 243 struct nouveau_encoder *nv_partner;
244 struct nvkm_i2c_port *i2c; 244 struct i2c_adapter *i2c;
245 int type; 245 int type;
246 int ret; 246 int ret;
247 enum drm_connector_status conn_status = connector_status_disconnected; 247 enum drm_connector_status conn_status = connector_status_disconnected;
@@ -259,7 +259,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
259 259
260 nv_encoder = nouveau_connector_ddc_detect(connector); 260 nv_encoder = nouveau_connector_ddc_detect(connector);
261 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { 261 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
262 nv_connector->edid = drm_get_edid(connector, &i2c->adapter); 262 nv_connector->edid = drm_get_edid(connector, i2c);
263 drm_mode_connector_update_edid_property(connector, 263 drm_mode_connector_update_edid_property(connector,
264 nv_connector->edid); 264 nv_connector->edid);
265 if (!nv_connector->edid) { 265 if (!nv_connector->edid) {
@@ -930,11 +930,11 @@ nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
930 nv_encoder->dcb->type == DCB_OUTPUT_DP) { 930 nv_encoder->dcb->type == DCB_OUTPUT_DP) {
931 if (mode == DRM_MODE_DPMS_ON) { 931 if (mode == DRM_MODE_DPMS_ON) {
932 u8 data = DP_SET_POWER_D0; 932 u8 data = DP_SET_POWER_D0;
933 nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1); 933 nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
934 usleep_range(1000, 2000); 934 usleep_range(1000, 2000);
935 } else { 935 } else {
936 u8 data = DP_SET_POWER_D3; 936 u8 data = DP_SET_POWER_D3;
937 nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1); 937 nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
938 } 938 }
939 } 939 }
940 940
@@ -980,29 +980,29 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
980} 980}
981 981
982static ssize_t 982static ssize_t
983nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 983nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
984{ 984{
985 struct nouveau_connector *nv_connector = 985 struct nouveau_connector *nv_connector =
986 container_of(aux, typeof(*nv_connector), aux); 986 container_of(obj, typeof(*nv_connector), aux);
987 struct nouveau_encoder *nv_encoder; 987 struct nouveau_encoder *nv_encoder;
988 struct nvkm_i2c_port *port; 988 struct nvkm_i2c_aux *aux;
989 int ret; 989 int ret;
990 990
991 nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP); 991 nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
992 if (!nv_encoder || !(port = nv_encoder->i2c)) 992 if (!nv_encoder || !(aux = nv_encoder->aux))
993 return -ENODEV; 993 return -ENODEV;
994 if (WARN_ON(msg->size > 16)) 994 if (WARN_ON(msg->size > 16))
995 return -E2BIG; 995 return -E2BIG;
996 if (msg->size == 0) 996 if (msg->size == 0)
997 return msg->size; 997 return msg->size;
998 998
999 ret = nvkm_i2c(port)->acquire(port, 0); 999 ret = nvkm_i2c_aux_acquire(aux);
1000 if (ret) 1000 if (ret)
1001 return ret; 1001 return ret;
1002 1002
1003 ret = port->func->aux(port, false, msg->request, msg->address, 1003 ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
1004 msg->buffer, msg->size); 1004 msg->buffer, msg->size);
1005 nvkm_i2c(port)->release(port); 1005 nvkm_i2c_aux_release(aux);
1006 if (ret >= 0) { 1006 if (ret >= 0) {
1007 msg->reply = ret; 1007 msg->reply = ret;
1008 return msg->size; 1008 return msg->size;
@@ -1256,8 +1256,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
1256 break; 1256 break;
1257 } 1257 }
1258 1258
1259 ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug, 1259 ret = nvif_notify_init(&disp->disp, nouveau_connector_hotplug, true,
1260 true, NV04_DISP_NTFY_CONN, 1260 NV04_DISP_NTFY_CONN,
1261 &(struct nvif_notify_conn_req_v0) { 1261 &(struct nvif_notify_conn_req_v0) {
1262 .mask = NVIF_NOTIFY_CONN_V0_ANY, 1262 .mask = NVIF_NOTIFY_CONN_V0_ANY,
1263 .conn = index, 1263 .conn = index,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8670d90cdc11..cc6c228e11c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -185,7 +185,7 @@ nouveau_display_vblank_init(struct drm_device *dev)
185 185
186 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 186 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
187 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 187 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
188 ret = nvif_notify_init(&disp->disp, NULL, 188 ret = nvif_notify_init(&disp->disp,
189 nouveau_display_vblank_handler, false, 189 nouveau_display_vblank_handler, false,
190 NV04_DISP_NTFY_VBLANK, 190 NV04_DISP_NTFY_VBLANK,
191 &(struct nvif_notify_head_req_v0) { 191 &(struct nvif_notify_head_req_v0) {
@@ -358,6 +358,7 @@ int
358nouveau_display_init(struct drm_device *dev) 358nouveau_display_init(struct drm_device *dev)
359{ 359{
360 struct nouveau_display *disp = nouveau_display(dev); 360 struct nouveau_display *disp = nouveau_display(dev);
361 struct nouveau_drm *drm = nouveau_drm(dev);
361 struct drm_connector *connector; 362 struct drm_connector *connector;
362 int ret; 363 int ret;
363 364
@@ -374,6 +375,8 @@ nouveau_display_init(struct drm_device *dev)
374 nvif_notify_get(&conn->hpd); 375 nvif_notify_get(&conn->hpd);
375 } 376 }
376 377
378 /* enable flip completion events */
379 nvif_notify_get(&drm->flip);
377 return ret; 380 return ret;
378} 381}
379 382
@@ -381,6 +384,7 @@ void
381nouveau_display_fini(struct drm_device *dev) 384nouveau_display_fini(struct drm_device *dev)
382{ 385{
383 struct nouveau_display *disp = nouveau_display(dev); 386 struct nouveau_display *disp = nouveau_display(dev);
387 struct nouveau_drm *drm = nouveau_drm(dev);
384 struct drm_connector *connector; 388 struct drm_connector *connector;
385 int head; 389 int head;
386 390
@@ -388,6 +392,9 @@ nouveau_display_fini(struct drm_device *dev)
388 for (head = 0; head < dev->mode_config.num_crtc; head++) 392 for (head = 0; head < dev->mode_config.num_crtc; head++)
389 drm_vblank_off(dev, head); 393 drm_vblank_off(dev, head);
390 394
395 /* disable flip completion events */
396 nvif_notify_put(&drm->flip);
397
391 /* disable hotplug interrupts */ 398 /* disable hotplug interrupts */
392 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 399 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
393 struct nouveau_connector *conn = nouveau_connector(connector); 400 struct nouveau_connector *conn = nouveau_connector(connector);
@@ -438,6 +445,7 @@ int
438nouveau_display_create(struct drm_device *dev) 445nouveau_display_create(struct drm_device *dev)
439{ 446{
440 struct nouveau_drm *drm = nouveau_drm(dev); 447 struct nouveau_drm *drm = nouveau_drm(dev);
448 struct nvkm_device *device = nvxx_device(&drm->device);
441 struct nouveau_display *disp; 449 struct nouveau_display *disp;
442 int ret; 450 int ret;
443 451
@@ -450,7 +458,7 @@ nouveau_display_create(struct drm_device *dev)
450 drm_mode_create_dvi_i_properties(dev); 458 drm_mode_create_dvi_i_properties(dev);
451 459
452 dev->mode_config.funcs = &nouveau_mode_config_funcs; 460 dev->mode_config.funcs = &nouveau_mode_config_funcs;
453 dev->mode_config.fb_base = nv_device_resource_start(nvxx_device(&drm->device), 1); 461 dev->mode_config.fb_base = device->func->resource_addr(device, 1);
454 462
455 dev->mode_config.min_width = 0; 463 dev->mode_config.min_width = 0;
456 dev->mode_config.min_height = 0; 464 dev->mode_config.min_height = 0;
@@ -494,7 +502,7 @@ nouveau_display_create(struct drm_device *dev)
494 int i; 502 int i;
495 503
496 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { 504 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
497 ret = nvif_object_init(nvif_object(&drm->device), NULL, 505 ret = nvif_object_init(&drm->device.object,
498 NVDRM_DISPLAY, oclass[i], 506 NVDRM_DISPLAY, oclass[i],
499 NULL, 0, &disp->disp); 507 NULL, 0, &disp->disp);
500 } 508 }
@@ -711,7 +719,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
711 chan = drm->channel; 719 chan = drm->channel;
712 if (!chan) 720 if (!chan)
713 return -ENODEV; 721 return -ENODEV;
714 cli = (void *)nvif_client(&chan->device->base); 722 cli = (void *)chan->user.client;
715 723
716 s = kzalloc(sizeof(*s), GFP_KERNEL); 724 s = kzalloc(sizeof(*s), GFP_KERNEL);
717 if (!s) 725 if (!s)
@@ -847,10 +855,10 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
847} 855}
848 856
849int 857int
850nouveau_flip_complete(void *data) 858nouveau_flip_complete(struct nvif_notify *notify)
851{ 859{
852 struct nouveau_channel *chan = data; 860 struct nouveau_drm *drm = container_of(notify, typeof(*drm), flip);
853 struct nouveau_drm *drm = chan->drm; 861 struct nouveau_channel *chan = drm->channel;
854 struct nouveau_page_flip_state state; 862 struct nouveau_page_flip_state state;
855 863
856 if (!nouveau_finish_page_flip(chan, &state)) { 864 if (!nouveau_finish_page_flip(chan, &state)) {
@@ -861,7 +869,7 @@ nouveau_flip_complete(void *data)
861 } 869 }
862 } 870 }
863 871
864 return 0; 872 return NVIF_NOTIFY_KEEP;
865} 873}
866 874
867int 875int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 6d9245aa81a6..d168c63533c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -52,9 +52,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
52{ 52{
53 uint64_t val; 53 uint64_t val;
54 54
55 val = nvif_rd32(chan, chan->user_get); 55 val = nvif_rd32(&chan->user, chan->user_get);
56 if (chan->user_get_hi) 56 if (chan->user_get_hi)
57 val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32; 57 val |= (uint64_t)nvif_rd32(&chan->user, chan->user_get_hi) << 32;
58 58
59 /* reset counter as long as GET is still advancing, this is 59 /* reset counter as long as GET is still advancing, this is
60 * to avoid misdetecting a GPU lockup if the GPU happens to 60 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -82,7 +82,7 @@ void
82nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 82nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
83 int delta, int length) 83 int delta, int length)
84{ 84{
85 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); 85 struct nouveau_cli *cli = (void *)chan->user.client;
86 struct nouveau_bo *pb = chan->push.buffer; 86 struct nouveau_bo *pb = chan->push.buffer;
87 struct nvkm_vma *vma; 87 struct nvkm_vma *vma;
88 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 88 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
@@ -103,7 +103,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
103 /* Flush writes. */ 103 /* Flush writes. */
104 nouveau_bo_rd32(pb, 0); 104 nouveau_bo_rd32(pb, 0);
105 105
106 nvif_wr32(chan, 0x8c, chan->dma.ib_put); 106 nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
107 chan->dma.ib_free--; 107 chan->dma.ib_free--;
108} 108}
109 109
@@ -113,7 +113,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
113 uint32_t cnt = 0, prev_get = 0; 113 uint32_t cnt = 0, prev_get = 0;
114 114
115 while (chan->dma.ib_free < count) { 115 while (chan->dma.ib_free < count) {
116 uint32_t get = nvif_rd32(chan, 0x88); 116 uint32_t get = nvif_rd32(&chan->user, 0x88);
117 if (get != prev_get) { 117 if (get != prev_get) {
118 prev_get = get; 118 prev_get = get;
119 cnt = 0; 119 cnt = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8da0a272c45a..aff3a9d0a1fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
140#define WRITE_PUT(val) do { \ 140#define WRITE_PUT(val) do { \
141 mb(); \ 141 mb(); \
142 nouveau_bo_rd32(chan->push.buffer, 0); \ 142 nouveau_bo_rd32(chan->push.buffer, 0); \
143 nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ 143 nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
144} while (0) 144} while (0)
145 145
146static inline void 146static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index c3ef30b3a5ec..e17e15ec7d43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -31,8 +31,7 @@
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32 32
33static void 33static void
34nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch, 34nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
35 u8 *dpcd)
36{ 35{
37 struct nouveau_drm *drm = nouveau_drm(dev); 36 struct nouveau_drm *drm = nouveau_drm(dev);
38 u8 buf[3]; 37 u8 buf[3];
@@ -40,11 +39,11 @@ nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch,
40 if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 39 if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
41 return; 40 return;
42 41
43 if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3)) 42 if (!nvkm_rdaux(aux, DP_SINK_OUI, buf, 3))
44 NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n", 43 NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
45 buf[0], buf[1], buf[2]); 44 buf[0], buf[1], buf[2]);
46 45
47 if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3)) 46 if (!nvkm_rdaux(aux, DP_BRANCH_OUI, buf, 3))
48 NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n", 47 NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
49 buf[0], buf[1], buf[2]); 48 buf[0], buf[1], buf[2]);
50 49
@@ -55,15 +54,15 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
55{ 54{
56 struct drm_device *dev = nv_encoder->base.base.dev; 55 struct drm_device *dev = nv_encoder->base.base.dev;
57 struct nouveau_drm *drm = nouveau_drm(dev); 56 struct nouveau_drm *drm = nouveau_drm(dev);
58 struct nvkm_i2c_port *auxch; 57 struct nvkm_i2c_aux *aux;
59 u8 *dpcd = nv_encoder->dp.dpcd; 58 u8 *dpcd = nv_encoder->dp.dpcd;
60 int ret; 59 int ret;
61 60
62 auxch = nv_encoder->i2c; 61 aux = nv_encoder->aux;
63 if (!auxch) 62 if (!aux)
64 return -ENODEV; 63 return -ENODEV;
65 64
66 ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8); 65 ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
67 if (ret) 66 if (ret)
68 return ret; 67 return ret;
69 68
@@ -84,6 +83,6 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
84 NV_DEBUG(drm, "maximum: %dx%d\n", 83 NV_DEBUG(drm, "maximum: %dx%d\n",
85 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); 84 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
86 85
87 nouveau_dp_probe_oui(dev, auxch, dpcd); 86 nouveau_dp_probe_oui(dev, aux, dpcd);
88 return 0; 87 return 0;
89} 88}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 109b8262dc85..ccefb645fd55 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -32,15 +32,15 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "drm_crtc_helper.h" 33#include "drm_crtc_helper.h"
34 34
35#include <core/device.h>
36#include <core/gpuobj.h> 35#include <core/gpuobj.h>
37#include <core/option.h> 36#include <core/option.h>
37#include <core/pci.h>
38#include <core/tegra.h>
38 39
39#include "nouveau_drm.h" 40#include "nouveau_drm.h"
40#include "nouveau_dma.h" 41#include "nouveau_dma.h"
41#include "nouveau_ttm.h" 42#include "nouveau_ttm.h"
42#include "nouveau_gem.h" 43#include "nouveau_gem.h"
43#include "nouveau_agp.h"
44#include "nouveau_vga.h" 44#include "nouveau_vga.h"
45#include "nouveau_sysfs.h" 45#include "nouveau_sysfs.h"
46#include "nouveau_hwmon.h" 46#include "nouveau_hwmon.h"
@@ -105,14 +105,18 @@ nouveau_name(struct drm_device *dev)
105} 105}
106 106
107static int 107static int
108nouveau_cli_create(u64 name, const char *sname, 108nouveau_cli_create(struct drm_device *dev, const char *sname,
109 int size, void **pcli) 109 int size, void **pcli)
110{ 110{
111 struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL); 111 struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
112 int ret;
112 if (cli) { 113 if (cli) {
113 int ret = nvif_client_init(NULL, NULL, sname, name, 114 snprintf(cli->name, sizeof(cli->name), "%s", sname);
114 nouveau_config, nouveau_debug, 115 cli->dev = dev;
115 &cli->base); 116
117 ret = nvif_client_init(NULL, cli->name, nouveau_name(dev),
118 nouveau_config, nouveau_debug,
119 &cli->base);
116 if (ret == 0) { 120 if (ret == 0) {
117 mutex_init(&cli->mutex); 121 mutex_init(&cli->mutex);
118 usif_client_init(cli); 122 usif_client_init(cli);
@@ -134,12 +138,17 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
134static void 138static void
135nouveau_accel_fini(struct nouveau_drm *drm) 139nouveau_accel_fini(struct nouveau_drm *drm)
136{ 140{
137 nouveau_channel_del(&drm->channel); 141 nouveau_channel_idle(drm->channel);
138 nvif_object_fini(&drm->ntfy); 142 nvif_object_fini(&drm->ntfy);
139 nvkm_gpuobj_ref(NULL, &drm->notify); 143 nvkm_gpuobj_del(&drm->notify);
144 nvif_notify_fini(&drm->flip);
140 nvif_object_fini(&drm->nvsw); 145 nvif_object_fini(&drm->nvsw);
141 nouveau_channel_del(&drm->cechan); 146 nouveau_channel_del(&drm->channel);
147
148 nouveau_channel_idle(drm->cechan);
142 nvif_object_fini(&drm->ttm.copy); 149 nvif_object_fini(&drm->ttm.copy);
150 nouveau_channel_del(&drm->cechan);
151
143 if (drm->fence) 152 if (drm->fence)
144 nouveau_fence(drm)->dtor(drm); 153 nouveau_fence(drm)->dtor(drm);
145} 154}
@@ -148,9 +157,9 @@ static void
148nouveau_accel_init(struct nouveau_drm *drm) 157nouveau_accel_init(struct nouveau_drm *drm)
149{ 158{
150 struct nvif_device *device = &drm->device; 159 struct nvif_device *device = &drm->device;
160 struct nvif_sclass *sclass;
151 u32 arg0, arg1; 161 u32 arg0, arg1;
152 u32 sclass[16]; 162 int ret, i, n;
153 int ret, i;
154 163
155 if (nouveau_noaccel) 164 if (nouveau_noaccel)
156 return; 165 return;
@@ -159,12 +168,12 @@ nouveau_accel_init(struct nouveau_drm *drm)
159 /*XXX: this is crap, but the fence/channel stuff is a little 168 /*XXX: this is crap, but the fence/channel stuff is a little
160 * backwards in some places. this will be fixed. 169 * backwards in some places. this will be fixed.
161 */ 170 */
162 ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass)); 171 ret = n = nvif_object_sclass_get(&device->object, &sclass);
163 if (ret < 0) 172 if (ret < 0)
164 return; 173 return;
165 174
166 for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) { 175 for (ret = -ENOSYS, i = 0; i < n; i++) {
167 switch (sclass[i]) { 176 switch (sclass[i].oclass) {
168 case NV03_CHANNEL_DMA: 177 case NV03_CHANNEL_DMA:
169 ret = nv04_fence_create(drm); 178 ret = nv04_fence_create(drm);
170 break; 179 break;
@@ -191,6 +200,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
191 } 200 }
192 } 201 }
193 202
203 nvif_object_sclass_put(&sclass);
194 if (ret) { 204 if (ret) {
195 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); 205 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
196 nouveau_accel_fini(drm); 206 nouveau_accel_fini(drm);
@@ -231,10 +241,9 @@ nouveau_accel_init(struct nouveau_drm *drm)
231 return; 241 return;
232 } 242 }
233 243
234 ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW, 244 ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
235 nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw); 245 nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
236 if (ret == 0) { 246 if (ret == 0) {
237 struct nvkm_sw_chan *swch;
238 ret = RING_SPACE(drm->channel, 2); 247 ret = RING_SPACE(drm->channel, 2);
239 if (ret == 0) { 248 if (ret == 0) {
240 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { 249 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -246,9 +255,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
246 OUT_RING (drm->channel, 0x001f0000); 255 OUT_RING (drm->channel, 0x001f0000);
247 } 256 }
248 } 257 }
249 swch = (void *)nvxx_object(&drm->nvsw)->parent; 258
250 swch->flip = nouveau_flip_complete; 259 ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
251 swch->flip_data = drm->channel; 260 false, NVSW_NTFY_UEVENT, NULL, 0, 0,
261 &drm->flip);
262 if (ret == 0)
263 ret = nvif_notify_get(&drm->flip);
264 if (ret) {
265 nouveau_accel_fini(drm);
266 return;
267 }
252 } 268 }
253 269
254 if (ret) { 270 if (ret) {
@@ -258,15 +274,15 @@ nouveau_accel_init(struct nouveau_drm *drm)
258 } 274 }
259 275
260 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { 276 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
261 ret = nvkm_gpuobj_new(nvxx_object(&drm->device), NULL, 32, 277 ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
262 0, 0, &drm->notify); 278 NULL, &drm->notify);
263 if (ret) { 279 if (ret) {
264 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); 280 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
265 nouveau_accel_fini(drm); 281 nouveau_accel_fini(drm);
266 return; 282 return;
267 } 283 }
268 284
269 ret = nvif_object_init(drm->channel->object, NULL, NvNotify0, 285 ret = nvif_object_init(&drm->channel->user, NvNotify0,
270 NV_DMA_IN_MEMORY, 286 NV_DMA_IN_MEMORY,
271 &(struct nv_dma_v0) { 287 &(struct nv_dma_v0) {
272 .target = NV_DMA_V0_TARGET_VRAM, 288 .target = NV_DMA_V0_TARGET_VRAM,
@@ -321,9 +337,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
321 remove_conflicting_framebuffers(aper, "nouveaufb", boot); 337 remove_conflicting_framebuffers(aper, "nouveaufb", boot);
322 kfree(aper); 338 kfree(aper);
323 339
324 ret = nvkm_device_create(pdev, NVKM_BUS_PCI, 340 ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
325 nouveau_pci_name(pdev), pci_name(pdev), 341 true, true, ~0ULL, &device);
326 nouveau_config, nouveau_debug, &device);
327 if (ret) 342 if (ret)
328 return ret; 343 return ret;
329 344
@@ -331,7 +346,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
331 346
332 ret = drm_get_pci_dev(pdev, pent, &driver_pci); 347 ret = drm_get_pci_dev(pdev, pent, &driver_pci);
333 if (ret) { 348 if (ret) {
334 nvkm_object_ref(NULL, (struct nvkm_object **)&device); 349 nvkm_device_del(&device);
335 return ret; 350 return ret;
336 } 351 }
337 352
@@ -371,12 +386,10 @@ nouveau_get_hdmi_dev(struct nouveau_drm *drm)
371static int 386static int
372nouveau_drm_load(struct drm_device *dev, unsigned long flags) 387nouveau_drm_load(struct drm_device *dev, unsigned long flags)
373{ 388{
374 struct pci_dev *pdev = dev->pdev;
375 struct nouveau_drm *drm; 389 struct nouveau_drm *drm;
376 int ret; 390 int ret;
377 391
378 ret = nouveau_cli_create(nouveau_name(dev), "DRM", sizeof(*drm), 392 ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm);
379 (void **)&drm);
380 if (ret) 393 if (ret)
381 return ret; 394 return ret;
382 395
@@ -390,36 +403,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
390 403
391 nouveau_get_hdmi_dev(drm); 404 nouveau_get_hdmi_dev(drm);
392 405
393 /* make sure AGP controller is in a consistent state before we 406 ret = nvif_device_init(&drm->client.base.object,
394 * (possibly) execute vbios init tables (see nouveau_agp.h) 407 NVDRM_DEVICE, NV_DEVICE,
395 */
396 if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
397 const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY |
398 NV_DEVICE_V0_DISABLE_MMIO;
399 /* dummy device object, doesn't init anything, but allows
400 * agp code access to registers
401 */
402 ret = nvif_device_init(&drm->client.base.base, NULL,
403 NVDRM_DEVICE, NV_DEVICE,
404 &(struct nv_device_v0) {
405 .device = ~0,
406 .disable = ~enables,
407 .debug0 = ~0,
408 }, sizeof(struct nv_device_v0),
409 &drm->device);
410 if (ret)
411 goto fail_device;
412
413 nouveau_agp_reset(drm);
414 nvif_device_fini(&drm->device);
415 }
416
417 ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE,
418 NV_DEVICE,
419 &(struct nv_device_v0) { 408 &(struct nv_device_v0) {
420 .device = ~0, 409 .device = ~0,
421 .disable = 0,
422 .debug0 = 0,
423 }, sizeof(struct nv_device_v0), 410 }, sizeof(struct nv_device_v0),
424 &drm->device); 411 &drm->device);
425 if (ret) 412 if (ret)
@@ -432,14 +419,13 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
432 * better fix is found - assuming there is one... 419 * better fix is found - assuming there is one...
433 */ 420 */
434 if (drm->device.info.chipset == 0xc1) 421 if (drm->device.info.chipset == 0xc1)
435 nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000); 422 nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000);
436 423
437 nouveau_vga_init(drm); 424 nouveau_vga_init(drm);
438 nouveau_agp_init(drm);
439 425
440 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 426 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
441 ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), 427 ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
442 0x1000, &drm->client.vm); 428 0x1000, NULL, &drm->client.vm);
443 if (ret) 429 if (ret)
444 goto fail_device; 430 goto fail_device;
445 431
@@ -486,7 +472,6 @@ fail_dispctor:
486fail_bios: 472fail_bios:
487 nouveau_ttm_fini(drm); 473 nouveau_ttm_fini(drm);
488fail_ttm: 474fail_ttm:
489 nouveau_agp_fini(drm);
490 nouveau_vga_fini(drm); 475 nouveau_vga_fini(drm);
491fail_device: 476fail_device:
492 nvif_device_fini(&drm->device); 477 nvif_device_fini(&drm->device);
@@ -512,7 +497,6 @@ nouveau_drm_unload(struct drm_device *dev)
512 nouveau_bios_takedown(dev); 497 nouveau_bios_takedown(dev);
513 498
514 nouveau_ttm_fini(drm); 499 nouveau_ttm_fini(drm);
515 nouveau_agp_fini(drm);
516 nouveau_vga_fini(drm); 500 nouveau_vga_fini(drm);
517 501
518 nvif_device_fini(&drm->device); 502 nvif_device_fini(&drm->device);
@@ -527,15 +511,14 @@ nouveau_drm_device_remove(struct drm_device *dev)
527{ 511{
528 struct nouveau_drm *drm = nouveau_drm(dev); 512 struct nouveau_drm *drm = nouveau_drm(dev);
529 struct nvkm_client *client; 513 struct nvkm_client *client;
530 struct nvkm_object *device; 514 struct nvkm_device *device;
531 515
532 dev->irq_enabled = false; 516 dev->irq_enabled = false;
533 client = nvxx_client(&drm->client.base); 517 client = nvxx_client(&drm->client.base);
534 device = client->device; 518 device = nvkm_device_find(client->device);
535 drm_put_dev(dev); 519 drm_put_dev(dev);
536 520
537 nvkm_object_ref(NULL, &device); 521 nvkm_device_del(&device);
538 nvkm_object_debug();
539} 522}
540 523
541static void 524static void
@@ -597,7 +580,6 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
597 if (ret) 580 if (ret)
598 goto fail_client; 581 goto fail_client;
599 582
600 nouveau_agp_fini(drm);
601 return 0; 583 return 0;
602 584
603fail_client: 585fail_client:
@@ -622,13 +604,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
622 struct nouveau_drm *drm = nouveau_drm(dev); 604 struct nouveau_drm *drm = nouveau_drm(dev);
623 struct nouveau_cli *cli; 605 struct nouveau_cli *cli;
624 606
625 NV_INFO(drm, "re-enabling device...\n");
626
627 nouveau_agp_reset(drm);
628
629 NV_INFO(drm, "resuming kernel object tree...\n"); 607 NV_INFO(drm, "resuming kernel object tree...\n");
630 nvif_client_resume(&drm->client.base); 608 nvif_client_resume(&drm->client.base);
631 nouveau_agp_init(drm);
632 609
633 NV_INFO(drm, "resuming client object trees...\n"); 610 NV_INFO(drm, "resuming client object trees...\n");
634 if (drm->fence && nouveau_fence(drm)->resume) 611 if (drm->fence && nouveau_fence(drm)->resume)
@@ -728,7 +705,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
728 return -EBUSY; 705 return -EBUSY;
729 } 706 }
730 707
731 nv_debug_level(SILENT);
732 drm_kms_helper_poll_disable(drm_dev); 708 drm_kms_helper_poll_disable(drm_dev);
733 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 709 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
734 nouveau_switcheroo_optimus_dsm(); 710 nouveau_switcheroo_optimus_dsm();
@@ -762,10 +738,9 @@ nouveau_pmops_runtime_resume(struct device *dev)
762 ret = nouveau_do_resume(drm_dev, true); 738 ret = nouveau_do_resume(drm_dev, true);
763 drm_kms_helper_poll_enable(drm_dev); 739 drm_kms_helper_poll_enable(drm_dev);
764 /* do magic */ 740 /* do magic */
765 nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); 741 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
766 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 742 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
767 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; 743 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
768 nv_debug_level(NORMAL);
769 return ret; 744 return ret;
770} 745}
771 746
@@ -826,8 +801,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
826 get_task_comm(tmpname, current); 801 get_task_comm(tmpname, current);
827 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); 802 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
828 803
829 ret = nouveau_cli_create(nouveau_name(dev), name, sizeof(*cli), 804 ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli);
830 (void **)&cli);
831 805
832 if (ret) 806 if (ret)
833 goto out_suspend; 807 goto out_suspend;
@@ -836,7 +810,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
836 810
837 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 811 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
838 ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), 812 ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
839 0x1000, &cli->vm); 813 0x1000, NULL, &cli->vm);
840 if (ret) { 814 if (ret) {
841 nouveau_cli_destroy(cli); 815 nouveau_cli_destroy(cli);
842 goto out_suspend; 816 goto out_suspend;
@@ -945,7 +919,6 @@ nouveau_driver_fops = {
945static struct drm_driver 919static struct drm_driver
946driver_stub = { 920driver_stub = {
947 .driver_features = 921 .driver_features =
948 DRIVER_USE_AGP |
949 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | 922 DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
950 DRIVER_KMS_LEGACY_CONTEXT, 923 DRIVER_KMS_LEGACY_CONTEXT,
951 924
@@ -1057,18 +1030,16 @@ nouveau_drm_pci_driver = {
1057}; 1030};
1058 1031
1059struct drm_device * 1032struct drm_device *
1060nouveau_platform_device_create_(struct platform_device *pdev, int size, 1033nouveau_platform_device_create(struct platform_device *pdev,
1061 void **pobject) 1034 struct nvkm_device **pdevice)
1062{ 1035{
1063 struct drm_device *drm; 1036 struct drm_device *drm;
1064 int err; 1037 int err;
1065 1038
1066 err = nvkm_device_create_(pdev, NVKM_BUS_PLATFORM, 1039 err = nvkm_device_tegra_new(pdev, nouveau_config, nouveau_debug,
1067 nouveau_platform_name(pdev), 1040 true, true, ~0ULL, pdevice);
1068 dev_name(&pdev->dev), nouveau_config,
1069 nouveau_debug, size, pobject);
1070 if (err) 1041 if (err)
1071 return ERR_PTR(err); 1042 goto err_free;
1072 1043
1073 drm = drm_dev_alloc(&driver_platform, &pdev->dev); 1044 drm = drm_dev_alloc(&driver_platform, &pdev->dev);
1074 if (!drm) { 1045 if (!drm) {
@@ -1086,7 +1057,7 @@ nouveau_platform_device_create_(struct platform_device *pdev, int size,
1086 return drm; 1057 return drm;
1087 1058
1088err_free: 1059err_free:
1089 nvkm_object_ref(NULL, (struct nvkm_object **)pobject); 1060 nvkm_device_del(pdevice);
1090 1061
1091 return ERR_PTR(err); 1062 return ERR_PTR(err);
1092} 1063}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index dd726523ca99..3c902c24a8dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -9,8 +9,8 @@
9#define DRIVER_DATE "20120801" 9#define DRIVER_DATE "20120801"
10 10
11#define DRIVER_MAJOR 1 11#define DRIVER_MAJOR 1
12#define DRIVER_MINOR 2 12#define DRIVER_MINOR 3
13#define DRIVER_PATCHLEVEL 2 13#define DRIVER_PATCHLEVEL 0
14 14
15/* 15/*
16 * 1.1.1: 16 * 1.1.1:
@@ -30,6 +30,9 @@
30 * - allow concurrent access to bo's mapped read/write. 30 * - allow concurrent access to bo's mapped read/write.
31 * 1.2.2: 31 * 1.2.2:
32 * - add NOUVEAU_GEM_DOMAIN_COHERENT flag 32 * - add NOUVEAU_GEM_DOMAIN_COHERENT flag
33 * 1.3.0:
34 * - NVIF ABI modified, safe because only (current) users are test
35 * programs that get directly linked with NVKM.
33 */ 36 */
34 37
35#include <nvif/client.h> 38#include <nvif/client.h>
@@ -88,6 +91,8 @@ struct nouveau_cli {
88 void *abi16; 91 void *abi16;
89 struct list_head objects; 92 struct list_head objects;
90 struct list_head notifys; 93 struct list_head notifys;
94 char name[32];
95 struct drm_device *dev;
91}; 96};
92 97
93static inline struct nouveau_cli * 98static inline struct nouveau_cli *
@@ -109,13 +114,10 @@ struct nouveau_drm {
109 struct list_head clients; 114 struct list_head clients;
110 115
111 struct { 116 struct {
112 enum { 117 struct agp_bridge_data *bridge;
113 UNKNOWN = 0,
114 DISABLE = 1,
115 ENABLED = 2
116 } stat;
117 u32 base; 118 u32 base;
118 u32 size; 119 u32 size;
120 bool cma;
119 } agp; 121 } agp;
120 122
121 /* TTM interface support */ 123 /* TTM interface support */
@@ -148,6 +150,7 @@ struct nouveau_drm {
148 struct nouveau_fbdev *fbcon; 150 struct nouveau_fbdev *fbcon;
149 struct nvif_object nvsw; 151 struct nvif_object nvsw;
150 struct nvif_object ntfy; 152 struct nvif_object ntfy;
153 struct nvif_notify flip;
151 154
152 /* nv10-nv40 tiling regions */ 155 /* nv10-nv40 tiling regions */
153 struct { 156 struct {
@@ -180,22 +183,22 @@ nouveau_drm(struct drm_device *dev)
180int nouveau_pmops_suspend(struct device *); 183int nouveau_pmops_suspend(struct device *);
181int nouveau_pmops_resume(struct device *); 184int nouveau_pmops_resume(struct device *);
182 185
183#define nouveau_platform_device_create(p, u) \
184 nouveau_platform_device_create_(p, sizeof(**u), (void **)u)
185struct drm_device * 186struct drm_device *
186nouveau_platform_device_create_(struct platform_device *pdev, 187nouveau_platform_device_create(struct platform_device *, struct nvkm_device **);
187 int size, void **pobject);
188void nouveau_drm_device_remove(struct drm_device *dev); 188void nouveau_drm_device_remove(struct drm_device *dev);
189 189
190#define NV_PRINTK(l,c,f,a...) do { \ 190#define NV_PRINTK(l,c,f,a...) do { \
191 struct nouveau_cli *_cli = (c); \ 191 struct nouveau_cli *_cli = (c); \
192 nv_##l(_cli->base.base.priv, f, ##a); \ 192 dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a); \
193} while(0) 193} while(0)
194#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a) 194#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
195#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a) 195#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
196#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a) 196#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
197#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a) 197#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
198#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a) 198#define NV_DEBUG(drm,f,a...) do { \
199 if (unlikely(drm_debug & DRM_UT_DRIVER)) \
200 NV_PRINTK(info, &(drm)->client, f, ##a); \
201} while(0)
199 202
200extern int nouveau_modeset; 203extern int nouveau_modeset;
201 204
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index c57a37e8e1eb..b37da95105b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -41,7 +41,9 @@ struct nouveau_encoder {
41 41
42 struct dcb_output *dcb; 42 struct dcb_output *dcb;
43 int or; 43 int or;
44 struct nvkm_i2c_port *i2c; 44
45 struct i2c_adapter *i2c;
46 struct nvkm_i2c_aux *aux;
45 47
46 /* different to drm_encoder.crtc, this reflects what's 48 /* different to drm_encoder.crtc, this reflects what's
47 * actually programmed on the hw, not the proposed crtc */ 49 * actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c6d56bef5823..574c36b492ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -169,7 +169,7 @@ void
169nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 169nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
170{ 170{
171 struct nouveau_fence_priv *priv = (void*)chan->drm->fence; 171 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
172 struct nouveau_cli *cli = (void *)nvif_client(chan->object); 172 struct nouveau_cli *cli = (void *)chan->user.client;
173 int ret; 173 int ret;
174 174
175 INIT_LIST_HEAD(&fctx->flip); 175 INIT_LIST_HEAD(&fctx->flip);
@@ -188,13 +188,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
188 if (!priv->uevent) 188 if (!priv->uevent)
189 return; 189 return;
190 190
191 ret = nvif_notify_init(chan->object, NULL, 191 ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
192 nouveau_fence_wait_uevent_handler, false, 192 false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
193 G82_CHANNEL_DMA_V0_NTFY_UEVENT, 193 &(struct nvif_notify_uevent_req) { },
194 &(struct nvif_notify_uevent_req) { }, 194 sizeof(struct nvif_notify_uevent_req),
195 sizeof(struct nvif_notify_uevent_req), 195 sizeof(struct nvif_notify_uevent_rep),
196 sizeof(struct nvif_notify_uevent_rep), 196 &fctx->notify);
197 &fctx->notify);
198 197
199 WARN_ON(ret); 198 WARN_ON(ret);
200} 199}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index d9241d8247fb..2e3a62d38fe9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -85,7 +85,7 @@ int nv50_fence_create(struct nouveau_drm *);
85int nv84_fence_create(struct nouveau_drm *); 85int nv84_fence_create(struct nouveau_drm *);
86int nvc0_fence_create(struct nouveau_drm *); 86int nvc0_fence_create(struct nouveau_drm *);
87 87
88int nouveau_flip_complete(void *chan); 88int nouveau_flip_complete(struct nvif_notify *);
89 89
90struct nv84_fence_chan { 90struct nv84_fence_chan {
91 struct nouveau_fence_chan base; 91 struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index af1ee517f372..2c9981512d27 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -254,13 +254,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
254{ 254{
255 struct nouveau_drm *drm = nouveau_drm(dev); 255 struct nouveau_drm *drm = nouveau_drm(dev);
256 struct nouveau_cli *cli = nouveau_cli(file_priv); 256 struct nouveau_cli *cli = nouveau_cli(file_priv);
257 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 257 struct nvkm_fb *fb = nvxx_fb(&drm->device);
258 struct drm_nouveau_gem_new *req = data; 258 struct drm_nouveau_gem_new *req = data;
259 struct nouveau_bo *nvbo = NULL; 259 struct nouveau_bo *nvbo = NULL;
260 int ret = 0; 260 int ret = 0;
261 261
262 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { 262 if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
263 NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags); 263 NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
264 return -EINVAL; 264 return -EINVAL;
265 } 265 }
266 266
@@ -376,7 +376,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
376 ww_acquire_init(&op->ticket, &reservation_ww_class); 376 ww_acquire_init(&op->ticket, &reservation_ww_class);
377retry: 377retry:
378 if (++trycnt > 100000) { 378 if (++trycnt > 100000) {
379 NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__); 379 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
380 return -EINVAL; 380 return -EINVAL;
381 } 381 }
382 382
@@ -387,7 +387,7 @@ retry:
387 387
388 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 388 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
389 if (!gem) { 389 if (!gem) {
390 NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle); 390 NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
391 ret = -ENOENT; 391 ret = -ENOENT;
392 break; 392 break;
393 } 393 }
@@ -399,7 +399,7 @@ retry:
399 } 399 }
400 400
401 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 401 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
402 NV_PRINTK(error, cli, "multiple instances of buffer %d on " 402 NV_PRINTK(err, cli, "multiple instances of buffer %d on "
403 "validation list\n", b->handle); 403 "validation list\n", b->handle);
404 drm_gem_object_unreference_unlocked(gem); 404 drm_gem_object_unreference_unlocked(gem);
405 ret = -EINVAL; 405 ret = -EINVAL;
@@ -420,7 +420,7 @@ retry:
420 } 420 }
421 if (unlikely(ret)) { 421 if (unlikely(ret)) {
422 if (ret != -ERESTARTSYS) 422 if (ret != -ERESTARTSYS)
423 NV_PRINTK(error, cli, "fail reserve\n"); 423 NV_PRINTK(err, cli, "fail reserve\n");
424 break; 424 break;
425 } 425 }
426 } 426 }
@@ -438,7 +438,7 @@ retry:
438 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 438 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
439 list_add_tail(&nvbo->entry, &gart_list); 439 list_add_tail(&nvbo->entry, &gart_list);
440 else { 440 else {
441 NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n", 441 NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
442 b->valid_domains); 442 b->valid_domains);
443 list_add_tail(&nvbo->entry, &both_list); 443 list_add_tail(&nvbo->entry, &both_list);
444 ret = -EINVAL; 444 ret = -EINVAL;
@@ -476,21 +476,21 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
476 b->write_domains, 476 b->write_domains,
477 b->valid_domains); 477 b->valid_domains);
478 if (unlikely(ret)) { 478 if (unlikely(ret)) {
479 NV_PRINTK(error, cli, "fail set_domain\n"); 479 NV_PRINTK(err, cli, "fail set_domain\n");
480 return ret; 480 return ret;
481 } 481 }
482 482
483 ret = nouveau_bo_validate(nvbo, true, false); 483 ret = nouveau_bo_validate(nvbo, true, false);
484 if (unlikely(ret)) { 484 if (unlikely(ret)) {
485 if (ret != -ERESTARTSYS) 485 if (ret != -ERESTARTSYS)
486 NV_PRINTK(error, cli, "fail ttm_validate\n"); 486 NV_PRINTK(err, cli, "fail ttm_validate\n");
487 return ret; 487 return ret;
488 } 488 }
489 489
490 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true); 490 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
491 if (unlikely(ret)) { 491 if (unlikely(ret)) {
492 if (ret != -ERESTARTSYS) 492 if (ret != -ERESTARTSYS)
493 NV_PRINTK(error, cli, "fail post-validate sync\n"); 493 NV_PRINTK(err, cli, "fail post-validate sync\n");
494 return ret; 494 return ret;
495 } 495 }
496 496
@@ -537,14 +537,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
537 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 537 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
538 if (unlikely(ret)) { 538 if (unlikely(ret)) {
539 if (ret != -ERESTARTSYS) 539 if (ret != -ERESTARTSYS)
540 NV_PRINTK(error, cli, "validate_init\n"); 540 NV_PRINTK(err, cli, "validate_init\n");
541 return ret; 541 return ret;
542 } 542 }
543 543
544 ret = validate_list(chan, cli, &op->list, pbbo, user_buffers); 544 ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
545 if (unlikely(ret < 0)) { 545 if (unlikely(ret < 0)) {
546 if (ret != -ERESTARTSYS) 546 if (ret != -ERESTARTSYS)
547 NV_PRINTK(error, cli, "validating bo list\n"); 547 NV_PRINTK(err, cli, "validating bo list\n");
548 validate_fini(op, NULL, NULL); 548 validate_fini(op, NULL, NULL);
549 return ret; 549 return ret;
550 } 550 }
@@ -600,7 +600,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
600 uint32_t data; 600 uint32_t data;
601 601
602 if (unlikely(r->bo_index > req->nr_buffers)) { 602 if (unlikely(r->bo_index > req->nr_buffers)) {
603 NV_PRINTK(error, cli, "reloc bo index invalid\n"); 603 NV_PRINTK(err, cli, "reloc bo index invalid\n");
604 ret = -EINVAL; 604 ret = -EINVAL;
605 break; 605 break;
606 } 606 }
@@ -610,7 +610,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
610 continue; 610 continue;
611 611
612 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 612 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
613 NV_PRINTK(error, cli, "reloc container bo index invalid\n"); 613 NV_PRINTK(err, cli, "reloc container bo index invalid\n");
614 ret = -EINVAL; 614 ret = -EINVAL;
615 break; 615 break;
616 } 616 }
@@ -618,7 +618,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
618 618
619 if (unlikely(r->reloc_bo_offset + 4 > 619 if (unlikely(r->reloc_bo_offset + 4 >
620 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 620 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
621 NV_PRINTK(error, cli, "reloc outside of bo\n"); 621 NV_PRINTK(err, cli, "reloc outside of bo\n");
622 ret = -EINVAL; 622 ret = -EINVAL;
623 break; 623 break;
624 } 624 }
@@ -627,7 +627,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
627 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 627 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
628 &nvbo->kmap); 628 &nvbo->kmap);
629 if (ret) { 629 if (ret) {
630 NV_PRINTK(error, cli, "failed kmap for reloc\n"); 630 NV_PRINTK(err, cli, "failed kmap for reloc\n");
631 break; 631 break;
632 } 632 }
633 nvbo->validate_mapped = true; 633 nvbo->validate_mapped = true;
@@ -650,7 +650,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
650 650
651 ret = ttm_bo_wait(&nvbo->bo, true, false, false); 651 ret = ttm_bo_wait(&nvbo->bo, true, false, false);
652 if (ret) { 652 if (ret) {
653 NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); 653 NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
654 break; 654 break;
655 } 655 }
656 656
@@ -681,7 +681,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
681 return -ENOMEM; 681 return -ENOMEM;
682 682
683 list_for_each_entry(temp, &abi16->channels, head) { 683 list_for_each_entry(temp, &abi16->channels, head) {
684 if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) { 684 if (temp->chan->user.handle == (NVDRM_CHAN | req->channel)) {
685 chan = temp->chan; 685 chan = temp->chan;
686 break; 686 break;
687 } 687 }
@@ -696,19 +696,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
696 goto out_next; 696 goto out_next;
697 697
698 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 698 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
699 NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n", 699 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
700 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 700 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
701 return nouveau_abi16_put(abi16, -EINVAL); 701 return nouveau_abi16_put(abi16, -EINVAL);
702 } 702 }
703 703
704 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 704 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
705 NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n", 705 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
706 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 706 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
707 return nouveau_abi16_put(abi16, -EINVAL); 707 return nouveau_abi16_put(abi16, -EINVAL);
708 } 708 }
709 709
710 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 710 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
711 NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n", 711 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
712 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 712 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
713 return nouveau_abi16_put(abi16, -EINVAL); 713 return nouveau_abi16_put(abi16, -EINVAL);
714 } 714 }
@@ -726,7 +726,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
726 /* Ensure all push buffers are on validate list */ 726 /* Ensure all push buffers are on validate list */
727 for (i = 0; i < req->nr_push; i++) { 727 for (i = 0; i < req->nr_push; i++) {
728 if (push[i].bo_index >= req->nr_buffers) { 728 if (push[i].bo_index >= req->nr_buffers) {
729 NV_PRINTK(error, cli, "push %d buffer not in list\n", i); 729 NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
730 ret = -EINVAL; 730 ret = -EINVAL;
731 goto out_prevalid; 731 goto out_prevalid;
732 } 732 }
@@ -737,7 +737,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
737 req->nr_buffers, &op, &do_reloc); 737 req->nr_buffers, &op, &do_reloc);
738 if (ret) { 738 if (ret) {
739 if (ret != -ERESTARTSYS) 739 if (ret != -ERESTARTSYS)
740 NV_PRINTK(error, cli, "validate: %d\n", ret); 740 NV_PRINTK(err, cli, "validate: %d\n", ret);
741 goto out_prevalid; 741 goto out_prevalid;
742 } 742 }
743 743
@@ -745,7 +745,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
745 if (do_reloc) { 745 if (do_reloc) {
746 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); 746 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
747 if (ret) { 747 if (ret) {
748 NV_PRINTK(error, cli, "reloc apply: %d\n", ret); 748 NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
749 goto out; 749 goto out;
750 } 750 }
751 } 751 }
@@ -753,7 +753,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
753 if (chan->dma.ib_max) { 753 if (chan->dma.ib_max) {
754 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 754 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
755 if (ret) { 755 if (ret) {
756 NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret); 756 NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
757 goto out; 757 goto out;
758 } 758 }
759 759
@@ -768,7 +768,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
768 if (drm->device.info.chipset >= 0x25) { 768 if (drm->device.info.chipset >= 0x25) {
769 ret = RING_SPACE(chan, req->nr_push * 2); 769 ret = RING_SPACE(chan, req->nr_push * 2);
770 if (ret) { 770 if (ret) {
771 NV_PRINTK(error, cli, "cal_space: %d\n", ret); 771 NV_PRINTK(err, cli, "cal_space: %d\n", ret);
772 goto out; 772 goto out;
773 } 773 }
774 774
@@ -782,7 +782,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
782 } else { 782 } else {
783 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 783 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
784 if (ret) { 784 if (ret) {
785 NV_PRINTK(error, cli, "jmp_space: %d\n", ret); 785 NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
786 goto out; 786 goto out;
787 } 787 }
788 788
@@ -820,7 +820,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
820 820
821 ret = nouveau_fence_new(chan, false, &fence); 821 ret = nouveau_fence_new(chan, false, &fence);
822 if (ret) { 822 if (ret) {
823 NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret); 823 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
824 WIND_RING(chan); 824 WIND_RING(chan);
825 goto out; 825 goto out;
826 } 826 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 0dbe0060f86e..491c7149d197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -41,7 +41,7 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
41 struct drm_device *dev = dev_get_drvdata(d); 41 struct drm_device *dev = dev_get_drvdata(d);
42 struct nouveau_drm *drm = nouveau_drm(dev); 42 struct nouveau_drm *drm = nouveau_drm(dev);
43 struct nvkm_therm *therm = nvxx_therm(&drm->device); 43 struct nvkm_therm *therm = nvxx_therm(&drm->device);
44 int temp = therm->temp_get(therm); 44 int temp = nvkm_therm_temp_get(therm);
45 45
46 if (temp < 0) 46 if (temp < 0)
47 return temp; 47 return temp;
@@ -348,7 +348,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
348 struct nouveau_drm *drm = nouveau_drm(dev); 348 struct nouveau_drm *drm = nouveau_drm(dev);
349 struct nvkm_therm *therm = nvxx_therm(&drm->device); 349 struct nvkm_therm *therm = nvxx_therm(&drm->device);
350 350
351 return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm)); 351 return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm));
352} 352}
353static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input, 353static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
354 NULL, 0); 354 NULL, 0);
@@ -571,7 +571,7 @@ nouveau_hwmon_init(struct drm_device *dev)
571 return -ENOMEM; 571 return -ENOMEM;
572 hwmon->dev = dev; 572 hwmon->dev = dev;
573 573
574 if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set) 574 if (!therm || !therm->attr_get || !therm->attr_set)
575 return -ENODEV; 575 return -ENODEV;
576 576
577 hwmon_dev = hwmon_device_register(&dev->pdev->dev); 577 hwmon_dev = hwmon_device_register(&dev->pdev->dev);
@@ -588,7 +588,7 @@ nouveau_hwmon_init(struct drm_device *dev)
588 goto error; 588 goto error;
589 589
590 /* if the card has a working thermal sensor */ 590 /* if the card has a working thermal sensor */
591 if (therm->temp_get(therm) >= 0) { 591 if (nvkm_therm_temp_get(therm) >= 0) {
592 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup); 592 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
593 if (ret) 593 if (ret)
594 goto error; 594 goto error;
@@ -606,7 +606,7 @@ nouveau_hwmon_init(struct drm_device *dev)
606 } 606 }
607 607
608 /* if the card can read the fan rpm */ 608 /* if the card can read the fan rpm */
609 if (therm->fan_sense(therm) >= 0) { 609 if (nvkm_therm_fan_sense(therm) >= 0) {
610 ret = sysfs_create_group(&hwmon_dev->kobj, 610 ret = sysfs_create_group(&hwmon_dev->kobj,
611 &hwmon_fan_rpm_attrgroup); 611 &hwmon_fan_rpm_attrgroup);
612 if (ret) 612 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index ca0ad9d1563d..55eb942847fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -72,10 +72,8 @@ nvkm_client_suspend(void *priv)
72static void 72static void
73nvkm_client_driver_fini(void *priv) 73nvkm_client_driver_fini(void *priv)
74{ 74{
75 struct nvkm_object *client = priv; 75 struct nvkm_client *client = priv;
76 nvkm_client_fini(nv_client(client), false); 76 nvkm_client_del(&client);
77 atomic_set(&client->refcount, 1);
78 nvkm_object_ref(NULL, &client);
79} 77}
80 78
81static int 79static int
@@ -113,7 +111,7 @@ nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
113 struct nvkm_client *client; 111 struct nvkm_client *client;
114 int ret; 112 int ret;
115 113
116 ret = nvkm_client_create(name, device, cfg, dbg, &client); 114 ret = nvkm_client_new(name, device, cfg, dbg, &client);
117 *ppriv = client; 115 *ppriv = client;
118 if (ret) 116 if (ret)
119 return ret; 117 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index dcfbbfaf1739..3eb665453165 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -19,239 +19,38 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22
23#include <linux/clk.h>
24#include <linux/io.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/of.h>
28#include <linux/reset.h>
29#include <linux/regulator/consumer.h>
30#include <linux/iommu.h>
31#include <soc/tegra/fuse.h>
32#include <soc/tegra/pmc.h>
33
34#include "nouveau_drm.h"
35#include "nouveau_platform.h" 22#include "nouveau_platform.h"
36 23
37static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
38{
39 int err;
40
41 err = regulator_enable(gpu->vdd);
42 if (err)
43 goto err_power;
44
45 err = clk_prepare_enable(gpu->clk);
46 if (err)
47 goto err_clk;
48 err = clk_prepare_enable(gpu->clk_pwr);
49 if (err)
50 goto err_clk_pwr;
51 clk_set_rate(gpu->clk_pwr, 204000000);
52 udelay(10);
53
54 reset_control_assert(gpu->rst);
55 udelay(10);
56
57 err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
58 if (err)
59 goto err_clamp;
60 udelay(10);
61
62 reset_control_deassert(gpu->rst);
63 udelay(10);
64
65 return 0;
66
67err_clamp:
68 clk_disable_unprepare(gpu->clk_pwr);
69err_clk_pwr:
70 clk_disable_unprepare(gpu->clk);
71err_clk:
72 regulator_disable(gpu->vdd);
73err_power:
74 return err;
75}
76
77static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
78{
79 int err;
80
81 reset_control_assert(gpu->rst);
82 udelay(10);
83
84 clk_disable_unprepare(gpu->clk_pwr);
85 clk_disable_unprepare(gpu->clk);
86 udelay(10);
87
88 err = regulator_disable(gpu->vdd);
89 if (err)
90 return err;
91
92 return 0;
93}
94
95#if IS_ENABLED(CONFIG_IOMMU_API)
96
97static void nouveau_platform_probe_iommu(struct device *dev,
98 struct nouveau_platform_gpu *gpu)
99{
100 int err;
101 unsigned long pgsize_bitmap;
102
103 mutex_init(&gpu->iommu.mutex);
104
105 if (iommu_present(&platform_bus_type)) {
106 gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
107 if (IS_ERR(gpu->iommu.domain))
108 goto error;
109
110 /*
111 * A IOMMU is only usable if it supports page sizes smaller
112 * or equal to the system's PAGE_SIZE, with a preference if
113 * both are equal.
114 */
115 pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
116 if (pgsize_bitmap & PAGE_SIZE) {
117 gpu->iommu.pgshift = PAGE_SHIFT;
118 } else {
119 gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
120 if (gpu->iommu.pgshift == 0) {
121 dev_warn(dev, "unsupported IOMMU page size\n");
122 goto free_domain;
123 }
124 gpu->iommu.pgshift -= 1;
125 }
126
127 err = iommu_attach_device(gpu->iommu.domain, dev);
128 if (err)
129 goto free_domain;
130
131 err = nvkm_mm_init(&gpu->iommu._mm, 0,
132 (1ULL << 40) >> gpu->iommu.pgshift, 1);
133 if (err)
134 goto detach_device;
135
136 gpu->iommu.mm = &gpu->iommu._mm;
137 }
138
139 return;
140
141detach_device:
142 iommu_detach_device(gpu->iommu.domain, dev);
143
144free_domain:
145 iommu_domain_free(gpu->iommu.domain);
146
147error:
148 gpu->iommu.domain = NULL;
149 gpu->iommu.pgshift = 0;
150 dev_err(dev, "cannot initialize IOMMU MM\n");
151}
152
153static void nouveau_platform_remove_iommu(struct device *dev,
154 struct nouveau_platform_gpu *gpu)
155{
156 if (gpu->iommu.domain) {
157 nvkm_mm_fini(&gpu->iommu._mm);
158 iommu_detach_device(gpu->iommu.domain, dev);
159 iommu_domain_free(gpu->iommu.domain);
160 }
161}
162
163#else
164
165static void nouveau_platform_probe_iommu(struct device *dev,
166 struct nouveau_platform_gpu *gpu)
167{
168}
169
170static void nouveau_platform_remove_iommu(struct device *dev,
171 struct nouveau_platform_gpu *gpu)
172{
173}
174
175#endif
176
177static int nouveau_platform_probe(struct platform_device *pdev) 24static int nouveau_platform_probe(struct platform_device *pdev)
178{ 25{
179 struct nouveau_platform_gpu *gpu; 26 struct nvkm_device *device;
180 struct nouveau_platform_device *device;
181 struct drm_device *drm; 27 struct drm_device *drm;
182 int err; 28 int ret;
183
184 gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
185 if (!gpu)
186 return -ENOMEM;
187
188 gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
189 if (IS_ERR(gpu->vdd))
190 return PTR_ERR(gpu->vdd);
191
192 gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
193 if (IS_ERR(gpu->rst))
194 return PTR_ERR(gpu->rst);
195
196 gpu->clk = devm_clk_get(&pdev->dev, "gpu");
197 if (IS_ERR(gpu->clk))
198 return PTR_ERR(gpu->clk);
199
200 gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
201 if (IS_ERR(gpu->clk_pwr))
202 return PTR_ERR(gpu->clk_pwr);
203
204 nouveau_platform_probe_iommu(&pdev->dev, gpu);
205
206 err = nouveau_platform_power_up(gpu);
207 if (err)
208 return err;
209 29
210 drm = nouveau_platform_device_create(pdev, &device); 30 drm = nouveau_platform_device_create(pdev, &device);
211 if (IS_ERR(drm)) { 31 if (IS_ERR(drm))
212 err = PTR_ERR(drm); 32 return PTR_ERR(drm);
213 goto power_down;
214 }
215 33
216 device->gpu = gpu; 34 ret = drm_dev_register(drm, 0);
217 device->gpu_speedo = tegra_sku_info.gpu_speedo_value; 35 if (ret < 0) {
218 36 drm_dev_unref(drm);
219 err = drm_dev_register(drm, 0); 37 return ret;
220 if (err < 0) 38 }
221 goto err_unref;
222 39
223 return 0; 40 return 0;
224
225err_unref:
226 drm_dev_unref(drm);
227
228power_down:
229 nouveau_platform_power_down(gpu);
230 nouveau_platform_remove_iommu(&pdev->dev, gpu);
231
232 return err;
233} 41}
234 42
235static int nouveau_platform_remove(struct platform_device *pdev) 43static int nouveau_platform_remove(struct platform_device *pdev)
236{ 44{
237 struct drm_device *drm_dev = platform_get_drvdata(pdev); 45 struct drm_device *dev = platform_get_drvdata(pdev);
238 struct nouveau_drm *drm = nouveau_drm(drm_dev); 46 nouveau_drm_device_remove(dev);
239 struct nvkm_device *device = nvxx_device(&drm->device); 47 return 0;
240 struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
241 int err;
242
243 nouveau_drm_device_remove(drm_dev);
244
245 err = nouveau_platform_power_down(gpu);
246
247 nouveau_platform_remove_iommu(&pdev->dev, gpu);
248
249 return err;
250} 48}
251 49
252#if IS_ENABLED(CONFIG_OF) 50#if IS_ENABLED(CONFIG_OF)
253static const struct of_device_id nouveau_platform_match[] = { 51static const struct of_device_id nouveau_platform_match[] = {
254 { .compatible = "nvidia,gk20a" }, 52 { .compatible = "nvidia,gk20a" },
53 { .compatible = "nvidia,gm20b" },
255 { } 54 { }
256}; 55};
257 56
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 392874cf4725..f41056d0f5f4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -19,54 +19,9 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22
23#ifndef __NOUVEAU_PLATFORM_H__ 22#ifndef __NOUVEAU_PLATFORM_H__
24#define __NOUVEAU_PLATFORM_H__ 23#define __NOUVEAU_PLATFORM_H__
25 24#include "nouveau_drm.h"
26#include "core/device.h"
27#include "core/mm.h"
28
29struct reset_control;
30struct clk;
31struct regulator;
32struct iommu_domain;
33struct platform_driver;
34
35struct nouveau_platform_gpu {
36 struct reset_control *rst;
37 struct clk *clk;
38 struct clk *clk_pwr;
39
40 struct regulator *vdd;
41
42 struct {
43 /*
44 * Protects accesses to mm from subsystems
45 */
46 struct mutex mutex;
47
48 struct nvkm_mm _mm;
49 /*
50 * Just points to _mm. We need this to avoid embedding
51 * struct nvkm_mm in os.h
52 */
53 struct nvkm_mm *mm;
54 struct iommu_domain *domain;
55 unsigned long pgshift;
56 } iommu;
57};
58
59struct nouveau_platform_device {
60 struct nvkm_device device;
61
62 struct nouveau_platform_gpu *gpu;
63
64 int gpu_speedo;
65};
66
67#define nv_device_to_platform(d) \
68 container_of(d, struct nouveau_platform_device, device)
69 25
70extern struct platform_driver nouveau_platform_driver; 26extern struct platform_driver nouveau_platform_driver;
71
72#endif 27#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 1ec8f38ae69a..d12a5faee047 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -165,7 +165,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
165 struct nvif_device *device = &drm->device; 165 struct nvif_device *device = &drm->device;
166 166
167 if (sysfs && sysfs->ctrl.priv) { 167 if (sysfs && sysfs->ctrl.priv) {
168 device_remove_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate); 168 device_remove_file(nvxx_device(device)->dev, &dev_attr_pstate);
169 nvif_object_fini(&sysfs->ctrl); 169 nvif_object_fini(&sysfs->ctrl);
170 } 170 }
171 171
@@ -188,11 +188,11 @@ nouveau_sysfs_init(struct drm_device *dev)
188 if (!sysfs) 188 if (!sysfs)
189 return -ENOMEM; 189 return -ENOMEM;
190 190
191 ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL, 191 ret = nvif_object_init(&device->object, NVDRM_CONTROL,
192 NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0, 192 NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
193 &sysfs->ctrl); 193 &sysfs->ctrl);
194 if (ret == 0) 194 if (ret == 0)
195 device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate); 195 device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
196 196
197 return 0; 197 return 0;
198} 198}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 737e8f976a98..3f0fb55cb473 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -33,8 +33,8 @@ static int
33nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 33nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
34{ 34{
35 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 35 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
36 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 36 struct nvkm_fb *fb = nvxx_fb(&drm->device);
37 man->priv = pfb; 37 man->priv = fb;
38 return 0; 38 return 0;
39} 39}
40 40
@@ -64,9 +64,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
64 struct ttm_mem_reg *mem) 64 struct ttm_mem_reg *mem)
65{ 65{
66 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 66 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
67 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 67 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
68 nvkm_mem_node_cleanup(mem->mm_node); 68 nvkm_mem_node_cleanup(mem->mm_node);
69 pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node); 69 ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
70} 70}
71 71
72static int 72static int
@@ -76,7 +76,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
76 struct ttm_mem_reg *mem) 76 struct ttm_mem_reg *mem)
77{ 77{
78 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 78 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
79 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 79 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
80 struct nouveau_bo *nvbo = nouveau_bo(bo); 80 struct nouveau_bo *nvbo = nouveau_bo(bo);
81 struct nvkm_mem *node; 81 struct nvkm_mem *node;
82 u32 size_nc = 0; 82 u32 size_nc = 0;
@@ -88,9 +88,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
88 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 88 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
89 size_nc = 1 << nvbo->page_shift; 89 size_nc = 1 << nvbo->page_shift;
90 90
91 ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT, 91 ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
92 mem->page_alignment << PAGE_SHIFT, size_nc, 92 mem->page_alignment << PAGE_SHIFT, size_nc,
93 (nvbo->tile_flags >> 8) & 0x3ff, &node); 93 (nvbo->tile_flags >> 8) & 0x3ff, &node);
94 if (ret) { 94 if (ret) {
95 mem->mm_node = NULL; 95 mem->mm_node = NULL;
96 return (ret == -ENOSPC) ? 0 : ret; 96 return (ret == -ENOSPC) ? 0 : ret;
@@ -103,38 +103,11 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
103 return 0; 103 return 0;
104} 104}
105 105
106static void
107nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
108{
109 struct nvkm_fb *pfb = man->priv;
110 struct nvkm_mm *mm = &pfb->vram;
111 struct nvkm_mm_node *r;
112 u32 total = 0, free = 0;
113
114 mutex_lock(&nv_subdev(pfb)->mutex);
115 list_for_each_entry(r, &mm->nodes, nl_entry) {
116 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
117 prefix, r->type, ((u64)r->offset << 12),
118 (((u64)r->offset + r->length) << 12));
119
120 total += r->length;
121 if (!r->type)
122 free += r->length;
123 }
124 mutex_unlock(&nv_subdev(pfb)->mutex);
125
126 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
127 prefix, (u64)total << 12, (u64)free << 12);
128 printk(KERN_DEBUG "%s block: 0x%08x\n",
129 prefix, mm->block_size << 12);
130}
131
132const struct ttm_mem_type_manager_func nouveau_vram_manager = { 106const struct ttm_mem_type_manager_func nouveau_vram_manager = {
133 nouveau_vram_manager_init, 107 nouveau_vram_manager_init,
134 nouveau_vram_manager_fini, 108 nouveau_vram_manager_fini,
135 nouveau_vram_manager_new, 109 nouveau_vram_manager_new,
136 nouveau_vram_manager_del, 110 nouveau_vram_manager_del,
137 nouveau_vram_manager_debug
138}; 111};
139 112
140static int 113static int
@@ -221,7 +194,7 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
221{ 194{
222 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 195 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
223 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); 196 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
224 struct nv04_mmu_priv *priv = (void *)mmu; 197 struct nv04_mmu *priv = (void *)mmu;
225 struct nvkm_vm *vm = NULL; 198 struct nvkm_vm *vm = NULL;
226 nvkm_vm_ref(priv->vm, &vm, NULL); 199 nvkm_vm_ref(priv->vm, &vm, NULL);
227 man->priv = vm; 200 man->priv = vm;
@@ -362,13 +335,22 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
362int 335int
363nouveau_ttm_init(struct nouveau_drm *drm) 336nouveau_ttm_init(struct nouveau_drm *drm)
364{ 337{
338 struct nvkm_device *device = nvxx_device(&drm->device);
339 struct nvkm_pci *pci = device->pci;
365 struct drm_device *dev = drm->dev; 340 struct drm_device *dev = drm->dev;
366 u32 bits; 341 u32 bits;
367 int ret; 342 int ret;
368 343
344 if (pci && pci->agp.bridge) {
345 drm->agp.bridge = pci->agp.bridge;
346 drm->agp.base = pci->agp.base;
347 drm->agp.size = pci->agp.size;
348 drm->agp.cma = pci->agp.cma;
349 }
350
369 bits = nvxx_mmu(&drm->device)->dma_bits; 351 bits = nvxx_mmu(&drm->device)->dma_bits;
370 if (nv_device_is_pci(nvxx_device(&drm->device))) { 352 if (nvxx_device(&drm->device)->func->pci) {
371 if (drm->agp.stat == ENABLED || 353 if (drm->agp.bridge ||
372 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) 354 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
373 bits = 32; 355 bits = 32;
374 356
@@ -408,11 +390,11 @@ nouveau_ttm_init(struct nouveau_drm *drm)
408 return ret; 390 return ret;
409 } 391 }
410 392
411 drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1), 393 drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
412 nv_device_resource_len(nvxx_device(&drm->device), 1)); 394 device->func->resource_size(device, 1));
413 395
414 /* GART init */ 396 /* GART init */
415 if (drm->agp.stat != ENABLED) { 397 if (!drm->agp.bridge) {
416 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit; 398 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
417 } else { 399 } else {
418 drm->gem.gart_available = drm->agp.size; 400 drm->gem.gart_available = drm->agp.size;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c7592ec8ecb8..af89c3665b2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -12,13 +12,14 @@
12static unsigned int 12static unsigned int
13nouveau_vga_set_decode(void *priv, bool state) 13nouveau_vga_set_decode(void *priv, bool state)
14{ 14{
15 struct nvif_device *device = &nouveau_drm(priv)->device; 15 struct nouveau_drm *drm = nouveau_drm(priv);
16 struct nvif_object *device = &drm->device.object;
16 17
17 if (device->info.family == NV_DEVICE_INFO_V0_CURIE && 18 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE &&
18 device->info.chipset >= 0x4c) 19 drm->device.info.chipset >= 0x4c)
19 nvif_wr32(device, 0x088060, state); 20 nvif_wr32(device, 0x088060, state);
20 else 21 else
21 if (device->info.chipset >= 0x40) 22 if (drm->device.info.chipset >= 0x40)
22 nvif_wr32(device, 0x088054, state); 23 nvif_wr32(device, 0x088054, state);
23 else 24 else
24 nvif_wr32(device, 0x001854, state); 25 nvif_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 495c57644ced..789dc2993b0d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -171,33 +171,33 @@ nv04_fbcon_accel_init(struct fb_info *info)
171 return -EINVAL; 171 return -EINVAL;
172 } 172 }
173 173
174 ret = nvif_object_init(chan->object, NULL, 0x0062, 174 ret = nvif_object_init(&chan->user, 0x0062,
175 device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ? 175 device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
176 0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d); 176 0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
177 if (ret) 177 if (ret)
178 return ret; 178 return ret;
179 179
180 ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0, 180 ret = nvif_object_init(&chan->user, 0x0019, 0x0019, NULL, 0,
181 &nfbdev->clip); 181 &nfbdev->clip);
182 if (ret) 182 if (ret)
183 return ret; 183 return ret;
184 184
185 ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0, 185 ret = nvif_object_init(&chan->user, 0x0043, 0x0043, NULL, 0,
186 &nfbdev->rop); 186 &nfbdev->rop);
187 if (ret) 187 if (ret)
188 return ret; 188 return ret;
189 189
190 ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0, 190 ret = nvif_object_init(&chan->user, 0x0044, 0x0044, NULL, 0,
191 &nfbdev->patt); 191 &nfbdev->patt);
192 if (ret) 192 if (ret)
193 return ret; 193 return ret;
194 194
195 ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0, 195 ret = nvif_object_init(&chan->user, 0x004a, 0x004a, NULL, 0,
196 &nfbdev->gdi); 196 &nfbdev->gdi);
197 if (ret) 197 if (ret)
198 return ret; 198 return ret;
199 199
200 ret = nvif_object_init(chan->object, NULL, 0x005f, 200 ret = nvif_object_init(&chan->user, 0x005f,
201 device->info.chipset >= 0x11 ? 0x009f : 0x005f, 201 device->info.chipset >= 0x11 ? 0x009f : 0x005f,
202 NULL, 0, &nfbdev->blit); 202 NULL, 0, &nfbdev->blit);
203 if (ret) 203 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index c2e05e64cd6f..f3d705d67738 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -57,8 +57,10 @@ nv04_fence_sync(struct nouveau_fence *fence,
57static u32 57static u32
58nv04_fence_read(struct nouveau_channel *chan) 58nv04_fence_read(struct nouveau_channel *chan)
59{ 59{
60 struct nvkm_fifo_chan *fifo = nvxx_fifo_chan(chan);; 60 struct nv04_nvsw_get_ref_v0 args = {};
61 return atomic_read(&fifo->refcnt); 61 WARN_ON(nvif_object_mthd(&chan->nvsw, NV04_NVSW_GET_REF,
62 &args, sizeof(args)));
63 return args.ref;
62} 64}
63 65
64static void 66static void
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 5e1ea1cdce75..2c35213da275 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -50,7 +50,7 @@ nv10_fence_sync(struct nouveau_fence *fence,
50u32 50u32
51nv10_fence_read(struct nouveau_channel *chan) 51nv10_fence_read(struct nouveau_channel *chan)
52{ 52{
53 return nvif_rd32(chan, 0x0048); 53 return nvif_rd32(&chan->user, 0x0048);
54} 54}
55 55
56void 56void
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 57860cfa1de5..80b6eb8b3d02 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -33,7 +33,7 @@ int
33nv17_fence_sync(struct nouveau_fence *fence, 33nv17_fence_sync(struct nouveau_fence *fence,
34 struct nouveau_channel *prev, struct nouveau_channel *chan) 34 struct nouveau_channel *prev, struct nouveau_channel *chan)
35{ 35{
36 struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base); 36 struct nouveau_cli *cli = (void *)prev->user.client;
37 struct nv10_fence_priv *priv = chan->drm->fence; 37 struct nv10_fence_priv *priv = chan->drm->fence;
38 struct nv10_fence_chan *fctx = chan->fence; 38 struct nv10_fence_chan *fctx = chan->fence;
39 u32 value; 39 u32 value;
@@ -89,7 +89,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
89 fctx->base.read = nv10_fence_read; 89 fctx->base.read = nv10_fence_read;
90 fctx->base.sync = nv17_fence_sync; 90 fctx->base.sync = nv17_fence_sync;
91 91
92 ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY, 92 ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY,
93 &(struct nv_dma_v0) { 93 &(struct nv_dma_v0) {
94 .target = NV_DMA_V0_TARGET_VRAM, 94 .target = NV_DMA_V0_TARGET_VRAM,
95 .access = NV_DMA_V0_ACCESS_RDWR, 95 .access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 981342d142ff..4ae87aed4505 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -60,35 +60,39 @@
60 60
61struct nv50_chan { 61struct nv50_chan {
62 struct nvif_object user; 62 struct nvif_object user;
63 struct nvif_device *device;
63}; 64};
64 65
65static int 66static int
66nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head, 67nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
67 void *data, u32 size, struct nv50_chan *chan) 68 const s32 *oclass, u8 head, void *data, u32 size,
69 struct nv50_chan *chan)
68{ 70{
69 const u32 handle = (oclass[0] << 16) | head; 71 const u32 handle = (oclass[0] << 16) | head;
70 u32 sclass[8]; 72 struct nvif_sclass *sclass;
71 int ret, i; 73 int ret, i, n;
74
75 chan->device = device;
72 76
73 ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass)); 77 ret = n = nvif_object_sclass_get(disp, &sclass);
74 WARN_ON(ret > ARRAY_SIZE(sclass));
75 if (ret < 0) 78 if (ret < 0)
76 return ret; 79 return ret;
77 80
78 while (oclass[0]) { 81 while (oclass[0]) {
79 for (i = 0; i < ARRAY_SIZE(sclass); i++) { 82 for (i = 0; i < n; i++) {
80 if (sclass[i] == oclass[0]) { 83 if (sclass[i].oclass == oclass[0]) {
81 ret = nvif_object_init(disp, NULL, handle, 84 ret = nvif_object_init(disp, handle, oclass[0],
82 oclass[0], data, size, 85 data, size, &chan->user);
83 &chan->user);
84 if (ret == 0) 86 if (ret == 0)
85 nvif_object_map(&chan->user); 87 nvif_object_map(&chan->user);
88 nvif_object_sclass_put(&sclass);
86 return ret; 89 return ret;
87 } 90 }
88 } 91 }
89 oclass++; 92 oclass++;
90 } 93 }
91 94
95 nvif_object_sclass_put(&sclass);
92 return -ENOSYS; 96 return -ENOSYS;
93} 97}
94 98
@@ -113,10 +117,12 @@ nv50_pioc_destroy(struct nv50_pioc *pioc)
113} 117}
114 118
115static int 119static int
116nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head, 120nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
117 void *data, u32 size, struct nv50_pioc *pioc) 121 const s32 *oclass, u8 head, void *data, u32 size,
122 struct nv50_pioc *pioc)
118{ 123{
119 return nv50_chan_create(disp, oclass, head, data, size, &pioc->base); 124 return nv50_chan_create(device, disp, oclass, head, data, size,
125 &pioc->base);
120} 126}
121 127
122/****************************************************************************** 128/******************************************************************************
@@ -128,12 +134,13 @@ struct nv50_curs {
128}; 134};
129 135
130static int 136static int
131nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs) 137nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
138 int head, struct nv50_curs *curs)
132{ 139{
133 struct nv50_disp_cursor_v0 args = { 140 struct nv50_disp_cursor_v0 args = {
134 .head = head, 141 .head = head,
135 }; 142 };
136 static const u32 oclass[] = { 143 static const s32 oclass[] = {
137 GK104_DISP_CURSOR, 144 GK104_DISP_CURSOR,
138 GF110_DISP_CURSOR, 145 GF110_DISP_CURSOR,
139 GT214_DISP_CURSOR, 146 GT214_DISP_CURSOR,
@@ -142,8 +149,8 @@ nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
142 0 149 0
143 }; 150 };
144 151
145 return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), 152 return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
146 &curs->base); 153 &curs->base);
147} 154}
148 155
149/****************************************************************************** 156/******************************************************************************
@@ -155,12 +162,13 @@ struct nv50_oimm {
155}; 162};
156 163
157static int 164static int
158nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm) 165nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
166 int head, struct nv50_oimm *oimm)
159{ 167{
160 struct nv50_disp_cursor_v0 args = { 168 struct nv50_disp_cursor_v0 args = {
161 .head = head, 169 .head = head,
162 }; 170 };
163 static const u32 oclass[] = { 171 static const s32 oclass[] = {
164 GK104_DISP_OVERLAY, 172 GK104_DISP_OVERLAY,
165 GF110_DISP_OVERLAY, 173 GF110_DISP_OVERLAY,
166 GT214_DISP_OVERLAY, 174 GT214_DISP_OVERLAY,
@@ -169,8 +177,8 @@ nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
169 0 177 0
170 }; 178 };
171 179
172 return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), 180 return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
173 &oimm->base); 181 &oimm->base);
174} 182}
175 183
176/****************************************************************************** 184/******************************************************************************
@@ -194,37 +202,37 @@ struct nv50_dmac {
194static void 202static void
195nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp) 203nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
196{ 204{
205 struct nvif_device *device = dmac->base.device;
206
197 nvif_object_fini(&dmac->vram); 207 nvif_object_fini(&dmac->vram);
198 nvif_object_fini(&dmac->sync); 208 nvif_object_fini(&dmac->sync);
199 209
200 nv50_chan_destroy(&dmac->base); 210 nv50_chan_destroy(&dmac->base);
201 211
202 if (dmac->ptr) { 212 if (dmac->ptr) {
203 struct pci_dev *pdev = nvxx_device(nvif_device(disp))->pdev; 213 struct device *dev = nvxx_device(device)->dev;
204 pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); 214 dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
205 } 215 }
206} 216}
207 217
208static int 218static int
209nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head, 219nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
210 void *data, u32 size, u64 syncbuf, 220 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
211 struct nv50_dmac *dmac) 221 struct nv50_dmac *dmac)
212{ 222{
213 struct nvif_device *device = nvif_device(disp);
214 struct nv50_disp_core_channel_dma_v0 *args = data; 223 struct nv50_disp_core_channel_dma_v0 *args = data;
215 struct nvif_object pushbuf; 224 struct nvif_object pushbuf;
216 int ret; 225 int ret;
217 226
218 mutex_init(&dmac->lock); 227 mutex_init(&dmac->lock);
219 228
220 dmac->ptr = pci_alloc_consistent(nvxx_device(device)->pdev, 229 dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
221 PAGE_SIZE, &dmac->handle); 230 &dmac->handle, GFP_KERNEL);
222 if (!dmac->ptr) 231 if (!dmac->ptr)
223 return -ENOMEM; 232 return -ENOMEM;
224 233
225 ret = nvif_object_init(nvif_object(device), NULL, 234 ret = nvif_object_init(&device->object, 0xd0000000,
226 args->pushbuf, NV_DMA_FROM_MEMORY, 235 NV_DMA_FROM_MEMORY, &(struct nv_dma_v0) {
227 &(struct nv_dma_v0) {
228 .target = NV_DMA_V0_TARGET_PCI_US, 236 .target = NV_DMA_V0_TARGET_PCI_US,
229 .access = NV_DMA_V0_ACCESS_RD, 237 .access = NV_DMA_V0_ACCESS_RD,
230 .start = dmac->handle + 0x0000, 238 .start = dmac->handle + 0x0000,
@@ -233,13 +241,15 @@ nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
233 if (ret) 241 if (ret)
234 return ret; 242 return ret;
235 243
236 ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base); 244 args->pushbuf = nvif_handle(&pushbuf);
245
246 ret = nv50_chan_create(device, disp, oclass, head, data, size,
247 &dmac->base);
237 nvif_object_fini(&pushbuf); 248 nvif_object_fini(&pushbuf);
238 if (ret) 249 if (ret)
239 return ret; 250 return ret;
240 251
241 ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000, 252 ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
242 NV_DMA_IN_MEMORY,
243 &(struct nv_dma_v0) { 253 &(struct nv_dma_v0) {
244 .target = NV_DMA_V0_TARGET_VRAM, 254 .target = NV_DMA_V0_TARGET_VRAM,
245 .access = NV_DMA_V0_ACCESS_RDWR, 255 .access = NV_DMA_V0_ACCESS_RDWR,
@@ -250,8 +260,7 @@ nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
250 if (ret) 260 if (ret)
251 return ret; 261 return ret;
252 262
253 ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001, 263 ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
254 NV_DMA_IN_MEMORY,
255 &(struct nv_dma_v0) { 264 &(struct nv_dma_v0) {
256 .target = NV_DMA_V0_TARGET_VRAM, 265 .target = NV_DMA_V0_TARGET_VRAM,
257 .access = NV_DMA_V0_ACCESS_RDWR, 266 .access = NV_DMA_V0_ACCESS_RDWR,
@@ -274,12 +283,13 @@ struct nv50_mast {
274}; 283};
275 284
276static int 285static int
277nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core) 286nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
287 u64 syncbuf, struct nv50_mast *core)
278{ 288{
279 struct nv50_disp_core_channel_dma_v0 args = { 289 struct nv50_disp_core_channel_dma_v0 args = {
280 .pushbuf = 0xb0007d00, 290 .pushbuf = 0xb0007d00,
281 }; 291 };
282 static const u32 oclass[] = { 292 static const s32 oclass[] = {
283 GM204_DISP_CORE_CHANNEL_DMA, 293 GM204_DISP_CORE_CHANNEL_DMA,
284 GM107_DISP_CORE_CHANNEL_DMA, 294 GM107_DISP_CORE_CHANNEL_DMA,
285 GK110_DISP_CORE_CHANNEL_DMA, 295 GK110_DISP_CORE_CHANNEL_DMA,
@@ -293,8 +303,8 @@ nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
293 0 303 0
294 }; 304 };
295 305
296 return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf, 306 return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
297 &core->base); 307 syncbuf, &core->base);
298} 308}
299 309
300/****************************************************************************** 310/******************************************************************************
@@ -308,14 +318,14 @@ struct nv50_sync {
308}; 318};
309 319
310static int 320static int
311nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf, 321nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
312 struct nv50_sync *base) 322 int head, u64 syncbuf, struct nv50_sync *base)
313{ 323{
314 struct nv50_disp_base_channel_dma_v0 args = { 324 struct nv50_disp_base_channel_dma_v0 args = {
315 .pushbuf = 0xb0007c00 | head, 325 .pushbuf = 0xb0007c00 | head,
316 .head = head, 326 .head = head,
317 }; 327 };
318 static const u32 oclass[] = { 328 static const s32 oclass[] = {
319 GK110_DISP_BASE_CHANNEL_DMA, 329 GK110_DISP_BASE_CHANNEL_DMA,
320 GK104_DISP_BASE_CHANNEL_DMA, 330 GK104_DISP_BASE_CHANNEL_DMA,
321 GF110_DISP_BASE_CHANNEL_DMA, 331 GF110_DISP_BASE_CHANNEL_DMA,
@@ -326,7 +336,7 @@ nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
326 0 336 0
327 }; 337 };
328 338
329 return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), 339 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
330 syncbuf, &base->base); 340 syncbuf, &base->base);
331} 341}
332 342
@@ -339,14 +349,14 @@ struct nv50_ovly {
339}; 349};
340 350
341static int 351static int
342nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf, 352nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
343 struct nv50_ovly *ovly) 353 int head, u64 syncbuf, struct nv50_ovly *ovly)
344{ 354{
345 struct nv50_disp_overlay_channel_dma_v0 args = { 355 struct nv50_disp_overlay_channel_dma_v0 args = {
346 .pushbuf = 0xb0007e00 | head, 356 .pushbuf = 0xb0007e00 | head,
347 .head = head, 357 .head = head,
348 }; 358 };
349 static const u32 oclass[] = { 359 static const s32 oclass[] = {
350 GK104_DISP_OVERLAY_CONTROL_DMA, 360 GK104_DISP_OVERLAY_CONTROL_DMA,
351 GF110_DISP_OVERLAY_CONTROL_DMA, 361 GF110_DISP_OVERLAY_CONTROL_DMA,
352 GT214_DISP_OVERLAY_CHANNEL_DMA, 362 GT214_DISP_OVERLAY_CHANNEL_DMA,
@@ -356,7 +366,7 @@ nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
356 0 366 0
357 }; 367 };
358 368
359 return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), 369 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
360 syncbuf, &ovly->base); 370 syncbuf, &ovly->base);
361} 371}
362 372
@@ -413,6 +423,7 @@ static u32 *
413evo_wait(void *evoc, int nr) 423evo_wait(void *evoc, int nr)
414{ 424{
415 struct nv50_dmac *dmac = evoc; 425 struct nv50_dmac *dmac = evoc;
426 struct nvif_device *device = dmac->base.device;
416 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4; 427 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
417 428
418 mutex_lock(&dmac->lock); 429 mutex_lock(&dmac->lock);
@@ -420,9 +431,12 @@ evo_wait(void *evoc, int nr)
420 dmac->ptr[put] = 0x20000000; 431 dmac->ptr[put] = 0x20000000;
421 432
422 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000); 433 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
423 if (!nvxx_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) { 434 if (nvif_msec(device, 2000,
435 if (!nvif_rd32(&dmac->base.user, 0x0004))
436 break;
437 ) < 0) {
424 mutex_unlock(&dmac->lock); 438 mutex_unlock(&dmac->lock);
425 nv_error(nvxx_object(&dmac->base.user), "channel stalled\n"); 439 printk(KERN_ERR "nouveau: evo channel stalled\n");
426 return NULL; 440 return NULL;
427 } 441 }
428 442
@@ -480,7 +494,10 @@ evo_sync(struct drm_device *dev)
480 evo_data(push, 0x00000000); 494 evo_data(push, 0x00000000);
481 evo_data(push, 0x00000000); 495 evo_data(push, 0x00000000);
482 evo_kick(push, mast); 496 evo_kick(push, mast);
483 if (nv_wait_cb(nvxx_device(device), evo_sync_wait, disp->sync)) 497 if (nvif_msec(device, 2000,
498 if (evo_sync_wait(disp->sync))
499 break;
500 ) >= 0)
484 return 0; 501 return 0;
485 } 502 }
486 503
@@ -535,7 +552,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
535 evo_kick(push, flip.chan); 552 evo_kick(push, flip.chan);
536 } 553 }
537 554
538 nv_wait_cb(nvxx_device(device), nv50_display_flip_wait, &flip); 555 nvif_msec(device, 2000,
556 if (nv50_display_flip_wait(&flip))
557 break;
558 );
539} 559}
540 560
541int 561int
@@ -563,7 +583,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
563 if (unlikely(push == NULL)) 583 if (unlikely(push == NULL))
564 return -EBUSY; 584 return -EBUSY;
565 585
566 if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) { 586 if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
567 ret = RING_SPACE(chan, 8); 587 ret = RING_SPACE(chan, 8);
568 if (ret) 588 if (ret)
569 return ret; 589 return ret;
@@ -577,7 +597,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
577 OUT_RING (chan, sync->addr); 597 OUT_RING (chan, sync->addr);
578 OUT_RING (chan, sync->data); 598 OUT_RING (chan, sync->data);
579 } else 599 } else
580 if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) { 600 if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
581 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; 601 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
582 ret = RING_SPACE(chan, 12); 602 ret = RING_SPACE(chan, 12);
583 if (ret) 603 if (ret)
@@ -1408,6 +1428,8 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
1408static int 1428static int
1409nv50_crtc_create(struct drm_device *dev, int index) 1429nv50_crtc_create(struct drm_device *dev, int index)
1410{ 1430{
1431 struct nouveau_drm *drm = nouveau_drm(dev);
1432 struct nvif_device *device = &drm->device;
1411 struct nv50_disp *disp = nv50_disp(dev); 1433 struct nv50_disp *disp = nv50_disp(dev);
1412 struct nv50_head *head; 1434 struct nv50_head *head;
1413 struct drm_crtc *crtc; 1435 struct drm_crtc *crtc;
@@ -1452,13 +1474,13 @@ nv50_crtc_create(struct drm_device *dev, int index)
1452 goto out; 1474 goto out;
1453 1475
1454 /* allocate cursor resources */ 1476 /* allocate cursor resources */
1455 ret = nv50_curs_create(disp->disp, index, &head->curs); 1477 ret = nv50_curs_create(device, disp->disp, index, &head->curs);
1456 if (ret) 1478 if (ret)
1457 goto out; 1479 goto out;
1458 1480
1459 /* allocate page flip / sync resources */ 1481 /* allocate page flip / sync resources */
1460 ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset, 1482 ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
1461 &head->sync); 1483 &head->sync);
1462 if (ret) 1484 if (ret)
1463 goto out; 1485 goto out;
1464 1486
@@ -1466,12 +1488,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
1466 head->sync.data = 0x00000000; 1488 head->sync.data = 0x00000000;
1467 1489
1468 /* allocate overlay resources */ 1490 /* allocate overlay resources */
1469 ret = nv50_oimm_create(disp->disp, index, &head->oimm); 1491 ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
1470 if (ret) 1492 if (ret)
1471 goto out; 1493 goto out;
1472 1494
1473 ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset, 1495 ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
1474 &head->ovly); 1496 &head->ovly);
1475 if (ret) 1497 if (ret)
1476 goto out; 1498 goto out;
1477 1499
@@ -1678,6 +1700,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1678{ 1700{
1679 struct nouveau_drm *drm = nouveau_drm(connector->dev); 1701 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1680 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 1702 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
1703 struct nvkm_i2c_bus *bus;
1681 struct nouveau_encoder *nv_encoder; 1704 struct nouveau_encoder *nv_encoder;
1682 struct drm_encoder *encoder; 1705 struct drm_encoder *encoder;
1683 int type = DRM_MODE_ENCODER_DAC; 1706 int type = DRM_MODE_ENCODER_DAC;
@@ -1687,7 +1710,10 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1687 return -ENOMEM; 1710 return -ENOMEM;
1688 nv_encoder->dcb = dcbe; 1711 nv_encoder->dcb = dcbe;
1689 nv_encoder->or = ffs(dcbe->or) - 1; 1712 nv_encoder->or = ffs(dcbe->or) - 1;
1690 nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index); 1713
1714 bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
1715 if (bus)
1716 nv_encoder->i2c = &bus->i2c;
1691 1717
1692 encoder = to_drm_encoder(nv_encoder); 1718 encoder = to_drm_encoder(nv_encoder);
1693 encoder->possible_crtcs = dcbe->heads; 1719 encoder->possible_crtcs = dcbe->heads;
@@ -2081,9 +2107,22 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
2081 return -ENOMEM; 2107 return -ENOMEM;
2082 nv_encoder->dcb = dcbe; 2108 nv_encoder->dcb = dcbe;
2083 nv_encoder->or = ffs(dcbe->or) - 1; 2109 nv_encoder->or = ffs(dcbe->or) - 1;
2084 nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
2085 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; 2110 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
2086 2111
2112 if (dcbe->type == DCB_OUTPUT_DP) {
2113 struct nvkm_i2c_aux *aux =
2114 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
2115 if (aux) {
2116 nv_encoder->i2c = &aux->i2c;
2117 nv_encoder->aux = aux;
2118 }
2119 } else {
2120 struct nvkm_i2c_bus *bus =
2121 nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
2122 if (bus)
2123 nv_encoder->i2c = &bus->i2c;
2124 }
2125
2087 encoder = to_drm_encoder(nv_encoder); 2126 encoder = to_drm_encoder(nv_encoder);
2088 encoder->possible_crtcs = dcbe->heads; 2127 encoder->possible_crtcs = dcbe->heads;
2089 encoder->possible_clones = 0; 2128 encoder->possible_clones = 0;
@@ -2234,18 +2273,22 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
2234{ 2273{
2235 struct nouveau_drm *drm = nouveau_drm(connector->dev); 2274 struct nouveau_drm *drm = nouveau_drm(connector->dev);
2236 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); 2275 struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
2237 struct nvkm_i2c_port *ddc = NULL; 2276 struct nvkm_i2c_bus *bus = NULL;
2277 struct nvkm_i2c_aux *aux = NULL;
2278 struct i2c_adapter *ddc;
2238 struct nouveau_encoder *nv_encoder; 2279 struct nouveau_encoder *nv_encoder;
2239 struct drm_encoder *encoder; 2280 struct drm_encoder *encoder;
2240 int type; 2281 int type;
2241 2282
2242 switch (dcbe->type) { 2283 switch (dcbe->type) {
2243 case DCB_OUTPUT_TMDS: 2284 case DCB_OUTPUT_TMDS:
2244 ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev)); 2285 bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
2286 ddc = bus ? &bus->i2c : NULL;
2245 type = DRM_MODE_ENCODER_TMDS; 2287 type = DRM_MODE_ENCODER_TMDS;
2246 break; 2288 break;
2247 case DCB_OUTPUT_DP: 2289 case DCB_OUTPUT_DP:
2248 ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev)); 2290 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
2291 ddc = aux ? &aux->i2c : NULL;
2249 type = DRM_MODE_ENCODER_TMDS; 2292 type = DRM_MODE_ENCODER_TMDS;
2250 break; 2293 break;
2251 default: 2294 default:
@@ -2258,6 +2301,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
2258 nv_encoder->dcb = dcbe; 2301 nv_encoder->dcb = dcbe;
2259 nv_encoder->or = ffs(dcbe->or) - 1; 2302 nv_encoder->or = ffs(dcbe->or) - 1;
2260 nv_encoder->i2c = ddc; 2303 nv_encoder->i2c = ddc;
2304 nv_encoder->aux = aux;
2261 2305
2262 encoder = to_drm_encoder(nv_encoder); 2306 encoder = to_drm_encoder(nv_encoder);
2263 encoder->possible_crtcs = dcbe->heads; 2307 encoder->possible_crtcs = dcbe->heads;
@@ -2295,7 +2339,7 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
2295 union { 2339 union {
2296 struct nv50_dma_v0 nv50; 2340 struct nv50_dma_v0 nv50;
2297 struct gf100_dma_v0 gf100; 2341 struct gf100_dma_v0 gf100;
2298 struct gf110_dma_v0 gf110; 2342 struct gf119_dma_v0 gf119;
2299 }; 2343 };
2300 } args = {}; 2344 } args = {};
2301 struct nv50_fbdma *fbdma; 2345 struct nv50_fbdma *fbdma;
@@ -2331,15 +2375,15 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
2331 args.gf100.kind = kind; 2375 args.gf100.kind = kind;
2332 size += sizeof(args.gf100); 2376 size += sizeof(args.gf100);
2333 } else { 2377 } else {
2334 args.gf110.page = GF110_DMA_V0_PAGE_LP; 2378 args.gf119.page = GF119_DMA_V0_PAGE_LP;
2335 args.gf110.kind = kind; 2379 args.gf119.kind = kind;
2336 size += sizeof(args.gf110); 2380 size += sizeof(args.gf119);
2337 } 2381 }
2338 2382
2339 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2383 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2340 struct nv50_head *head = nv50_head(crtc); 2384 struct nv50_head *head = nv50_head(crtc);
2341 int ret = nvif_object_init(&head->sync.base.base.user, NULL, 2385 int ret = nvif_object_init(&head->sync.base.base.user, name,
2342 name, NV_DMA_IN_MEMORY, &args, size, 2386 NV_DMA_IN_MEMORY, &args, size,
2343 &fbdma->base[head->base.index]); 2387 &fbdma->base[head->base.index]);
2344 if (ret) { 2388 if (ret) {
2345 nv50_fbdma_fini(fbdma); 2389 nv50_fbdma_fini(fbdma);
@@ -2347,9 +2391,8 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
2347 } 2391 }
2348 } 2392 }
2349 2393
2350 ret = nvif_object_init(&mast->base.base.user, NULL, name, 2394 ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
2351 NV_DMA_IN_MEMORY, &args, size, 2395 &args, size, &fbdma->core);
2352 &fbdma->core);
2353 if (ret) { 2396 if (ret) {
2354 nv50_fbdma_fini(fbdma); 2397 nv50_fbdma_fini(fbdma);
2355 return ret; 2398 return ret;
@@ -2502,14 +2545,14 @@ nv50_display_create(struct drm_device *dev)
2502 goto out; 2545 goto out;
2503 2546
2504 /* allocate master evo channel */ 2547 /* allocate master evo channel */
2505 ret = nv50_core_create(disp->disp, disp->sync->bo.offset, 2548 ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
2506 &disp->mast); 2549 &disp->mast);
2507 if (ret) 2550 if (ret)
2508 goto out; 2551 goto out;
2509 2552
2510 /* create crtc objects to represent the hw heads */ 2553 /* create crtc objects to represent the hw heads */
2511 if (disp->disp->oclass >= GF110_DISP) 2554 if (disp->disp->oclass >= GF110_DISP)
2512 crtcs = nvif_rd32(device, 0x022448); 2555 crtcs = nvif_rd32(&device->object, 0x022448);
2513 else 2556 else
2514 crtcs = 2; 2557 crtcs = 2;
2515 2558
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 901130b06072..e05499d6ed83 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -183,7 +183,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
185 185
186 ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0, 186 ret = nvif_object_init(&chan->user, 0x502d, 0x502d, NULL, 0,
187 &nfbdev->twod); 187 &nfbdev->twod);
188 if (ret) 188 if (ret)
189 return ret; 189 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index a82d9ea7c6fd..f0d96e5da6b4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -51,7 +51,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
51 fctx->base.read = nv10_fence_read; 51 fctx->base.read = nv10_fence_read;
52 fctx->base.sync = nv17_fence_sync; 52 fctx->base.sync = nv17_fence_sync;
53 53
54 ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY, 54 ret = nvif_object_init(&chan->user, NvSema, NV_DMA_IN_MEMORY,
55 &(struct nv_dma_v0) { 55 &(struct nv_dma_v0) {
56 .target = NV_DMA_V0_TARGET_VRAM, 56 .target = NV_DMA_V0_TARGET_VRAM,
57 .access = NV_DMA_V0_ACCESS_RDWR, 57 .access = NV_DMA_V0_ACCESS_RDWR,
@@ -66,7 +66,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
66 u32 start = bo->bo.mem.start * PAGE_SIZE; 66 u32 start = bo->bo.mem.start * PAGE_SIZE;
67 u32 limit = start + bo->bo.mem.size - 1; 67 u32 limit = start + bo->bo.mem.size - 1;
68 68
69 ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i, 69 ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
70 NV_DMA_IN_MEMORY, &(struct nv_dma_v0) { 70 NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
71 .target = NV_DMA_V0_TARGET_VRAM, 71 .target = NV_DMA_V0_TARGET_VRAM,
72 .access = NV_DMA_V0_ACCESS_RDWR, 72 .access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index a03db4368696..412c5be5a9ca 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -131,7 +131,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
131int 131int
132nv84_fence_context_new(struct nouveau_channel *chan) 132nv84_fence_context_new(struct nouveau_channel *chan)
133{ 133{
134 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); 134 struct nouveau_cli *cli = (void *)chan->user.client;
135 struct nv84_fence_priv *priv = chan->drm->fence; 135 struct nv84_fence_priv *priv = chan->drm->fence;
136 struct nv84_fence_chan *fctx; 136 struct nv84_fence_chan *fctx;
137 int ret, i; 137 int ret, i;
@@ -213,7 +213,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
213int 213int
214nv84_fence_create(struct nouveau_drm *drm) 214nv84_fence_create(struct nouveau_drm *drm)
215{ 215{
216 struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device); 216 struct nvkm_fifo *fifo = nvxx_fifo(&drm->device);
217 struct nv84_fence_priv *priv; 217 struct nv84_fence_priv *priv;
218 u32 domain; 218 u32 domain;
219 int ret; 219 int ret;
@@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm)
228 priv->base.context_new = nv84_fence_context_new; 228 priv->base.context_new = nv84_fence_context_new;
229 priv->base.context_del = nv84_fence_context_del; 229 priv->base.context_del = nv84_fence_context_del;
230 230
231 priv->base.contexts = pfifo->max + 1; 231 priv->base.contexts = fifo->nr;
232 priv->base.context_base = fence_context_alloc(priv->base.contexts); 232 priv->base.context_base = fence_context_alloc(priv->base.contexts);
233 priv->base.uevent = true; 233 priv->base.uevent = true;
234 234
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index fcd2e5f27bb9..c97395b4a312 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -156,7 +156,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
156 struct nouveau_channel *chan = drm->channel; 156 struct nouveau_channel *chan = drm->channel;
157 int ret, format; 157 int ret, format;
158 158
159 ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0, 159 ret = nvif_object_init(&chan->user, 0x902d, 0x902d, NULL, 0,
160 &nfbdev->twod); 160 &nfbdev->twod);
161 if (ret) 161 if (ret)
162 return ret; 162 return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 80b96844221e..1ee9294eca2e 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -29,29 +29,29 @@
29int 29int
30nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) 30nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
31{ 31{
32 return client->driver->ioctl(client->base.priv, client->super, data, size, NULL); 32 return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
33} 33}
34 34
35int 35int
36nvif_client_suspend(struct nvif_client *client) 36nvif_client_suspend(struct nvif_client *client)
37{ 37{
38 return client->driver->suspend(client->base.priv); 38 return client->driver->suspend(client->object.priv);
39} 39}
40 40
41int 41int
42nvif_client_resume(struct nvif_client *client) 42nvif_client_resume(struct nvif_client *client)
43{ 43{
44 return client->driver->resume(client->base.priv); 44 return client->driver->resume(client->object.priv);
45} 45}
46 46
47void 47void
48nvif_client_fini(struct nvif_client *client) 48nvif_client_fini(struct nvif_client *client)
49{ 49{
50 if (client->driver) { 50 if (client->driver) {
51 client->driver->fini(client->base.priv); 51 client->driver->fini(client->object.priv);
52 client->driver = NULL; 52 client->driver = NULL;
53 client->base.parent = NULL; 53 client->object.client = NULL;
54 nvif_object_fini(&client->base); 54 nvif_object_fini(&client->object);
55 } 55 }
56} 56}
57 57
@@ -68,63 +68,39 @@ nvif_drivers[] = {
68}; 68};
69 69
70int 70int
71nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver, 71nvif_client_init(const char *driver, const char *name, u64 device,
72 const char *name, u64 device, const char *cfg, const char *dbg, 72 const char *cfg, const char *dbg, struct nvif_client *client)
73 struct nvif_client *client)
74{ 73{
74 struct {
75 struct nvif_ioctl_v0 ioctl;
76 struct nvif_ioctl_nop_v0 nop;
77 } args = {};
75 int ret, i; 78 int ret, i;
76 79
77 ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base); 80 ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object);
78 if (ret) 81 if (ret)
79 return ret; 82 return ret;
80 83
81 client->base.parent = &client->base; 84 client->object.client = client;
82 client->base.handle = ~0; 85 client->object.handle = ~0;
83 client->object = &client->base; 86 client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
84 client->super = true; 87 client->super = true;
85 88
86 for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) { 89 for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
87 if (!driver || !strcmp(client->driver->name, driver)) { 90 if (!driver || !strcmp(client->driver->name, driver)) {
88 ret = client->driver->init(name, device, cfg, dbg, 91 ret = client->driver->init(name, device, cfg, dbg,
89 &client->base.priv); 92 &client->object.priv);
90 if (!ret || driver) 93 if (!ret || driver)
91 break; 94 break;
92 } 95 }
93 } 96 }
94 97
98 if (ret == 0) {
99 ret = nvif_client_ioctl(client, &args, sizeof(args));
100 client->version = args.nop.version;
101 }
102
95 if (ret) 103 if (ret)
96 nvif_client_fini(client); 104 nvif_client_fini(client);
97 return ret; 105 return ret;
98} 106}
99
100static void
101nvif_client_del(struct nvif_client *client)
102{
103 nvif_client_fini(client);
104 kfree(client);
105}
106
107int
108nvif_client_new(const char *driver, const char *name, u64 device,
109 const char *cfg, const char *dbg,
110 struct nvif_client **pclient)
111{
112 struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
113 if (client) {
114 int ret = nvif_client_init(nvif_client_del, driver, name,
115 device, cfg, dbg, client);
116 if (ret) {
117 kfree(client);
118 client = NULL;
119 }
120 *pclient = client;
121 return ret;
122 }
123 return -ENOMEM;
124}
125
126void
127nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient)
128{
129 nvif_object_ref(&client->base, (struct nvif_object **)pclient);
130}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 6f72244c52cd..252d8c33215b 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -24,55 +24,32 @@
24 24
25#include <nvif/device.h> 25#include <nvif/device.h>
26 26
27u64
28nvif_device_time(struct nvif_device *device)
29{
30 struct nv_device_time_v0 args = {};
31 int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
32 &args, sizeof(args));
33 WARN_ON_ONCE(ret != 0);
34 return args.time;
35}
36
27void 37void
28nvif_device_fini(struct nvif_device *device) 38nvif_device_fini(struct nvif_device *device)
29{ 39{
30 nvif_object_fini(&device->base); 40 nvif_object_fini(&device->object);
31} 41}
32 42
33int 43int
34nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *), 44nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
35 u32 handle, u32 oclass, void *data, u32 size, 45 void *data, u32 size, struct nvif_device *device)
36 struct nvif_device *device)
37{ 46{
38 int ret = nvif_object_init(parent, (void *)dtor, handle, oclass, 47 int ret = nvif_object_init(parent, handle, oclass, data, size,
39 data, size, &device->base); 48 &device->object);
40 if (ret == 0) { 49 if (ret == 0) {
41 device->object = &device->base;
42 device->info.version = 0; 50 device->info.version = 0;
43 ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO, 51 ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
44 &device->info, sizeof(device->info)); 52 &device->info, sizeof(device->info));
45 } 53 }
46 return ret; 54 return ret;
47} 55}
48
49static void
50nvif_device_del(struct nvif_device *device)
51{
52 nvif_device_fini(device);
53 kfree(device);
54}
55
56int
57nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
58 void *data, u32 size, struct nvif_device **pdevice)
59{
60 struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
61 if (device) {
62 int ret = nvif_device_init(parent, nvif_device_del, handle,
63 oclass, data, size, device);
64 if (ret) {
65 kfree(device);
66 device = NULL;
67 }
68 *pdevice = device;
69 return ret;
70 }
71 return -ENOMEM;
72}
73
74void
75nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
76{
77 nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
78}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index 8e34748709a0..b0787ff833ef 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -124,7 +124,7 @@ nvif_notify(const void *header, u32 length, const void *data, u32 size)
124 } 124 }
125 125
126 if (!WARN_ON(notify == NULL)) { 126 if (!WARN_ON(notify == NULL)) {
127 struct nvif_client *client = nvif_client(notify->object); 127 struct nvif_client *client = notify->object->client;
128 if (!WARN_ON(notify->size != size)) { 128 if (!WARN_ON(notify->size != size)) {
129 atomic_inc(&notify->putcnt); 129 atomic_inc(&notify->putcnt);
130 if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) { 130 if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
@@ -156,7 +156,7 @@ nvif_notify_fini(struct nvif_notify *notify)
156 if (ret >= 0 && object) { 156 if (ret >= 0 && object) {
157 ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); 157 ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
158 if (ret == 0) { 158 if (ret == 0) {
159 nvif_object_ref(NULL, &notify->object); 159 notify->object = NULL;
160 kfree((void *)notify->data); 160 kfree((void *)notify->data);
161 } 161 }
162 } 162 }
@@ -164,9 +164,9 @@ nvif_notify_fini(struct nvif_notify *notify)
164} 164}
165 165
166int 166int
167nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *), 167nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
168 int (*func)(struct nvif_notify *), bool work, u8 event, 168 bool work, u8 event, void *data, u32 size, u32 reply,
169 void *data, u32 size, u32 reply, struct nvif_notify *notify) 169 struct nvif_notify *notify)
170{ 170{
171 struct { 171 struct {
172 struct nvif_ioctl_v0 ioctl; 172 struct nvif_ioctl_v0 ioctl;
@@ -175,11 +175,9 @@ nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
175 } *args; 175 } *args;
176 int ret = -ENOMEM; 176 int ret = -ENOMEM;
177 177
178 notify->object = NULL; 178 notify->object = object;
179 nvif_object_ref(object, &notify->object);
180 notify->flags = 0; 179 notify->flags = 0;
181 atomic_set(&notify->putcnt, 1); 180 atomic_set(&notify->putcnt, 1);
182 notify->dtor = dtor;
183 notify->func = func; 181 notify->func = func;
184 notify->data = NULL; 182 notify->data = NULL;
185 notify->size = reply; 183 notify->size = reply;
@@ -211,38 +209,3 @@ done:
211 nvif_notify_fini(notify); 209 nvif_notify_fini(notify);
212 return ret; 210 return ret;
213} 211}
214
215static void
216nvif_notify_del(struct nvif_notify *notify)
217{
218 nvif_notify_fini(notify);
219 kfree(notify);
220}
221
222void
223nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
224{
225 BUG_ON(notify != NULL);
226 if (*pnotify)
227 (*pnotify)->dtor(*pnotify);
228 *pnotify = notify;
229}
230
231int
232nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
233 bool work, u8 type, void *data, u32 size, u32 reply,
234 struct nvif_notify **pnotify)
235{
236 struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL);
237 if (notify) {
238 int ret = nvif_notify_init(object, nvif_notify_del, func, work,
239 type, data, size, reply, notify);
240 if (ret) {
241 kfree(notify);
242 notify = NULL;
243 }
244 *pnotify = notify;
245 return ret;
246 }
247 return -ENOMEM;
248}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 3ab4e2f8cc12..c3fb6a20f567 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -30,47 +30,71 @@
30int 30int
31nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack) 31nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
32{ 32{
33 struct nvif_client *client = nvif_client(object); 33 struct nvif_client *client = object->client;
34 union { 34 union {
35 struct nvif_ioctl_v0 v0; 35 struct nvif_ioctl_v0 v0;
36 } *args = data; 36 } *args = data;
37 37
38 if (size >= sizeof(*args) && args->v0.version == 0) { 38 if (size >= sizeof(*args) && args->v0.version == 0) {
39 if (object != &client->object)
40 args->v0.object = nvif_handle(object);
41 else
42 args->v0.object = 0;
39 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY; 43 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
40 args->v0.path_nr = 0;
41 while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
42 args->v0.path[args->v0.path_nr++] = object->handle;
43 if (object->parent == object)
44 break;
45 object = object->parent;
46 }
47 } else 44 } else
48 return -ENOSYS; 45 return -ENOSYS;
49 46
50 return client->driver->ioctl(client->base.priv, client->super, data, size, hack); 47 return client->driver->ioctl(client->object.priv, client->super,
48 data, size, hack);
49}
50
51void
52nvif_object_sclass_put(struct nvif_sclass **psclass)
53{
54 kfree(*psclass);
55 *psclass = NULL;
51} 56}
52 57
53int 58int
54nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count) 59nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
55{ 60{
56 struct { 61 struct {
57 struct nvif_ioctl_v0 ioctl; 62 struct nvif_ioctl_v0 ioctl;
58 struct nvif_ioctl_sclass_v0 sclass; 63 struct nvif_ioctl_sclass_v0 sclass;
59 } *args; 64 } *args = NULL;
60 u32 size = count * sizeof(args->sclass.oclass[0]); 65 int ret, cnt = 0, i;
61 int ret; 66 u32 size;
62 67
63 if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) 68 while (1) {
64 return -ENOMEM; 69 size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
65 args->ioctl.version = 0; 70 if (!(args = kmalloc(size, GFP_KERNEL)))
66 args->ioctl.type = NVIF_IOCTL_V0_SCLASS; 71 return -ENOMEM;
67 args->sclass.version = 0; 72 args->ioctl.version = 0;
68 args->sclass.count = count; 73 args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
74 args->sclass.version = 0;
75 args->sclass.count = cnt;
76
77 ret = nvif_object_ioctl(object, args, size, NULL);
78 if (ret == 0 && args->sclass.count <= cnt)
79 break;
80 cnt = args->sclass.count;
81 kfree(args);
82 if (ret != 0)
83 return ret;
84 }
85
86 *psclass = kzalloc(sizeof(**psclass) * args->sclass.count, GFP_KERNEL);
87 if (*psclass) {
88 for (i = 0; i < args->sclass.count; i++) {
89 (*psclass)[i].oclass = args->sclass.oclass[i].oclass;
90 (*psclass)[i].minver = args->sclass.oclass[i].minver;
91 (*psclass)[i].maxver = args->sclass.oclass[i].maxver;
92 }
93 ret = args->sclass.count;
94 } else {
95 ret = -ENOMEM;
96 }
69 97
70 memcpy(args->sclass.oclass, oclass, size);
71 ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
72 ret = ret ? ret : args->sclass.count;
73 memcpy(oclass, args->sclass.oclass, size);
74 kfree(args); 98 kfree(args);
75 return ret; 99 return ret;
76} 100}
@@ -145,7 +169,7 @@ void
145nvif_object_unmap(struct nvif_object *object) 169nvif_object_unmap(struct nvif_object *object)
146{ 170{
147 if (object->map.size) { 171 if (object->map.size) {
148 struct nvif_client *client = nvif_client(object); 172 struct nvif_client *client = object->client;
149 struct { 173 struct {
150 struct nvif_ioctl_v0 ioctl; 174 struct nvif_ioctl_v0 ioctl;
151 struct nvif_ioctl_unmap unmap; 175 struct nvif_ioctl_unmap unmap;
@@ -167,7 +191,7 @@ nvif_object_unmap(struct nvif_object *object)
167int 191int
168nvif_object_map(struct nvif_object *object) 192nvif_object_map(struct nvif_object *object)
169{ 193{
170 struct nvif_client *client = nvif_client(object); 194 struct nvif_client *client = object->client;
171 struct { 195 struct {
172 struct nvif_ioctl_v0 ioctl; 196 struct nvif_ioctl_v0 ioctl;
173 struct nvif_ioctl_map_v0 map; 197 struct nvif_ioctl_map_v0 map;
@@ -186,119 +210,65 @@ nvif_object_map(struct nvif_object *object)
186 return ret; 210 return ret;
187} 211}
188 212
189struct ctor {
190 struct nvif_ioctl_v0 ioctl;
191 struct nvif_ioctl_new_v0 new;
192};
193
194void 213void
195nvif_object_fini(struct nvif_object *object) 214nvif_object_fini(struct nvif_object *object)
196{ 215{
197 struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data); 216 struct {
198 if (object->parent) { 217 struct nvif_ioctl_v0 ioctl;
199 struct { 218 struct nvif_ioctl_del del;
200 struct nvif_ioctl_v0 ioctl; 219 } args = {
201 struct nvif_ioctl_del del; 220 .ioctl.type = NVIF_IOCTL_V0_DEL,
202 } args = { 221 };
203 .ioctl.type = NVIF_IOCTL_V0_DEL,
204 };
205 222
206 nvif_object_unmap(object); 223 if (!object->client)
207 nvif_object_ioctl(object, &args, sizeof(args), NULL); 224 return;
208 if (object->data) { 225
209 object->size = 0; 226 nvif_object_unmap(object);
210 object->data = NULL; 227 nvif_object_ioctl(object, &args, sizeof(args), NULL);
211 kfree(ctor); 228 object->client = NULL;
212 }
213 nvif_object_ref(NULL, &object->parent);
214 }
215} 229}
216 230
217int 231int
218nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *), 232nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
219 u32 handle, u32 oclass, void *data, u32 size, 233 void *data, u32 size, struct nvif_object *object)
220 struct nvif_object *object)
221{ 234{
222 struct ctor *ctor; 235 struct {
236 struct nvif_ioctl_v0 ioctl;
237 struct nvif_ioctl_new_v0 new;
238 } *args;
223 int ret = 0; 239 int ret = 0;
224 240
225 object->parent = NULL; 241 object->client = NULL;
226 object->object = object;
227 nvif_object_ref(parent, &object->parent);
228 kref_init(&object->refcount);
229 object->handle = handle; 242 object->handle = handle;
230 object->oclass = oclass; 243 object->oclass = oclass;
231 object->data = NULL;
232 object->size = 0;
233 object->dtor = dtor;
234 object->map.ptr = NULL; 244 object->map.ptr = NULL;
235 object->map.size = 0; 245 object->map.size = 0;
236 246
237 if (object->parent) { 247 if (parent) {
238 if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) { 248 if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
239 nvif_object_fini(object); 249 nvif_object_fini(object);
240 return -ENOMEM; 250 return -ENOMEM;
241 } 251 }
242 object->data = ctor->new.data;
243 object->size = size;
244 memcpy(object->data, data, size);
245 252
246 ctor->ioctl.version = 0; 253 args->ioctl.version = 0;
247 ctor->ioctl.type = NVIF_IOCTL_V0_NEW; 254 args->ioctl.type = NVIF_IOCTL_V0_NEW;
248 ctor->new.version = 0; 255 args->new.version = 0;
249 ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF; 256 args->new.route = parent->client->route;
250 ctor->new.token = (unsigned long)(void *)object; 257 args->new.token = nvif_handle(object);
251 ctor->new.handle = handle; 258 args->new.object = nvif_handle(object);
252 ctor->new.oclass = oclass; 259 args->new.handle = handle;
260 args->new.oclass = oclass;
253 261
254 ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) + 262 memcpy(args->new.data, data, size);
255 object->size, &object->priv); 263 ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
264 &object->priv);
265 memcpy(data, args->new.data, size);
266 kfree(args);
267 if (ret == 0)
268 object->client = parent->client;
256 } 269 }
257 270
258 if (ret) 271 if (ret)
259 nvif_object_fini(object); 272 nvif_object_fini(object);
260 return ret; 273 return ret;
261} 274}
262
263static void
264nvif_object_del(struct nvif_object *object)
265{
266 nvif_object_fini(object);
267 kfree(object);
268}
269
270int
271nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
272 void *data, u32 size, struct nvif_object **pobject)
273{
274 struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
275 if (object) {
276 int ret = nvif_object_init(parent, nvif_object_del, handle,
277 oclass, data, size, object);
278 if (ret) {
279 kfree(object);
280 object = NULL;
281 }
282 *pobject = object;
283 return ret;
284 }
285 return -ENOMEM;
286}
287
288static void
289nvif_object_put(struct kref *kref)
290{
291 struct nvif_object *object =
292 container_of(kref, typeof(*object), refcount);
293 object->dtor(object);
294}
295
296void
297nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
298{
299 if (object)
300 kref_get(&object->refcount);
301 if (*pobject)
302 kref_put(&(*pobject)->refcount, nvif_object_put);
303 *pobject = object;
304}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
index a2bdb2069113..7f66963f305c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -1,17 +1,14 @@
1nvkm-y := nvkm/core/client.o 1nvkm-y := nvkm/core/client.o
2nvkm-y += nvkm/core/engctx.o
3nvkm-y += nvkm/core/engine.o 2nvkm-y += nvkm/core/engine.o
4nvkm-y += nvkm/core/enum.o 3nvkm-y += nvkm/core/enum.o
5nvkm-y += nvkm/core/event.o 4nvkm-y += nvkm/core/event.o
6nvkm-y += nvkm/core/gpuobj.o 5nvkm-y += nvkm/core/gpuobj.o
7nvkm-y += nvkm/core/handle.o
8nvkm-y += nvkm/core/ioctl.o 6nvkm-y += nvkm/core/ioctl.o
7nvkm-y += nvkm/core/memory.o
9nvkm-y += nvkm/core/mm.o 8nvkm-y += nvkm/core/mm.o
10nvkm-y += nvkm/core/namedb.o
11nvkm-y += nvkm/core/notify.o 9nvkm-y += nvkm/core/notify.o
12nvkm-y += nvkm/core/object.o 10nvkm-y += nvkm/core/object.o
11nvkm-y += nvkm/core/oproxy.o
13nvkm-y += nvkm/core/option.o 12nvkm-y += nvkm/core/option.o
14nvkm-y += nvkm/core/parent.o
15nvkm-y += nvkm/core/printk.o
16nvkm-y += nvkm/core/ramht.o 13nvkm-y += nvkm/core/ramht.o
17nvkm-y += nvkm/core/subdev.o 14nvkm-y += nvkm/core/subdev.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 878a82f8f295..297e1e953fa6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -23,7 +23,6 @@
23 */ 23 */
24#include <core/client.h> 24#include <core/client.h>
25#include <core/device.h> 25#include <core/device.h>
26#include <core/handle.h>
27#include <core/notify.h> 26#include <core/notify.h>
28#include <core/option.h> 27#include <core/option.h>
29 28
@@ -91,7 +90,7 @@ int
91nvkm_client_notify_new(struct nvkm_object *object, 90nvkm_client_notify_new(struct nvkm_object *object,
92 struct nvkm_event *event, void *data, u32 size) 91 struct nvkm_event *event, void *data, u32 size)
93{ 92{
94 struct nvkm_client *client = nvkm_client(object); 93 struct nvkm_client *client = object->client;
95 struct nvkm_client_notify *notify; 94 struct nvkm_client_notify *notify;
96 union { 95 union {
97 struct nvif_notify_req_v0 v0; 96 struct nvif_notify_req_v0 v0;
@@ -111,11 +110,11 @@ nvkm_client_notify_new(struct nvkm_object *object,
111 if (!notify) 110 if (!notify)
112 return -ENOMEM; 111 return -ENOMEM;
113 112
114 nv_ioctl(client, "notify new size %d\n", size); 113 nvif_ioctl(object, "notify new size %d\n", size);
115 if (nvif_unpack(req->v0, 0, 0, true)) { 114 if (nvif_unpack(req->v0, 0, 0, true)) {
116 nv_ioctl(client, "notify new vers %d reply %d route %02x " 115 nvif_ioctl(object, "notify new vers %d reply %d route %02x "
117 "token %llx\n", req->v0.version, 116 "token %llx\n", req->v0.version,
118 req->v0.reply, req->v0.route, req->v0.token); 117 req->v0.reply, req->v0.route, req->v0.token);
119 notify->version = req->v0.version; 118 notify->version = req->v0.version;
120 notify->size = sizeof(notify->rep.v0); 119 notify->size = sizeof(notify->rep.v0);
121 notify->rep.v0.version = req->v0.version; 120 notify->rep.v0.version = req->v0.version;
@@ -146,10 +145,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
146 } *args = data; 145 } *args = data;
147 int ret; 146 int ret;
148 147
149 nv_ioctl(object, "client devlist size %d\n", size); 148 nvif_ioctl(object, "client devlist size %d\n", size);
150 if (nvif_unpack(args->v0, 0, 0, true)) { 149 if (nvif_unpack(args->v0, 0, 0, true)) {
151 nv_ioctl(object, "client devlist vers %d count %d\n", 150 nvif_ioctl(object, "client devlist vers %d count %d\n",
152 args->v0.version, args->v0.count); 151 args->v0.version, args->v0.count);
153 if (size == sizeof(args->v0.device[0]) * args->v0.count) { 152 if (size == sizeof(args->v0.device[0]) * args->v0.count) {
154 ret = nvkm_device_list(args->v0.device, args->v0.count); 153 ret = nvkm_device_list(args->v0.device, args->v0.count);
155 if (ret >= 0) { 154 if (ret >= 0) {
@@ -176,91 +175,134 @@ nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
176 return -EINVAL; 175 return -EINVAL;
177} 176}
178 177
179static void 178static int
180nvkm_client_dtor(struct nvkm_object *object) 179nvkm_client_child_new(const struct nvkm_oclass *oclass,
180 void *data, u32 size, struct nvkm_object **pobject)
181{ 181{
182 struct nvkm_client *client = (void *)object; 182 return oclass->base.ctor(oclass, data, size, pobject);
183 int i;
184 for (i = 0; i < ARRAY_SIZE(client->notify); i++)
185 nvkm_client_notify_del(client, i);
186 nvkm_object_ref(NULL, &client->device);
187 nvkm_handle_destroy(client->root);
188 nvkm_namedb_destroy(&client->namedb);
189} 183}
190 184
191static struct nvkm_oclass 185static int
192nvkm_client_oclass = { 186nvkm_client_child_get(struct nvkm_object *object, int index,
193 .ofuncs = &(struct nvkm_ofuncs) { 187 struct nvkm_oclass *oclass)
194 .dtor = nvkm_client_dtor,
195 .mthd = nvkm_client_mthd,
196 },
197};
198
199int
200nvkm_client_create_(const char *name, u64 devname, const char *cfg,
201 const char *dbg, int length, void **pobject)
202{ 188{
203 struct nvkm_object *device; 189 const struct nvkm_sclass *sclass;
204 struct nvkm_client *client; 190
205 int ret; 191 switch (index) {
192 case 0: sclass = &nvkm_udevice_sclass; break;
193 default:
194 return -EINVAL;
195 }
206 196
207 device = (void *)nvkm_device_find(devname); 197 oclass->ctor = nvkm_client_child_new;
208 if (!device) 198 oclass->base = *sclass;
209 return -ENODEV; 199 return 0;
200}
201
202static const struct nvkm_object_func
203nvkm_client_object_func = {
204 .mthd = nvkm_client_mthd,
205 .sclass = nvkm_client_child_get,
206};
210 207
211 ret = nvkm_namedb_create_(NULL, NULL, &nvkm_client_oclass, 208void
212 NV_CLIENT_CLASS, NULL, 209nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object)
213 (1ULL << NVDEV_ENGINE_DEVICE), 210{
214 length, pobject); 211 if (!RB_EMPTY_NODE(&object->node))
215 client = *pobject; 212 rb_erase(&object->node, &client->objroot);
216 if (ret) 213}
217 return ret;
218 214
219 ret = nvkm_handle_create(nv_object(client), ~0, ~0, nv_object(client), 215bool
220 &client->root); 216nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object)
221 if (ret) 217{
222 return ret; 218 struct rb_node **ptr = &client->objroot.rb_node;
219 struct rb_node *parent = NULL;
223 220
224 /* prevent init/fini being called, os in in charge of this */ 221 while (*ptr) {
225 atomic_set(&nv_object(client)->usecount, 2); 222 struct nvkm_object *this =
223 container_of(*ptr, typeof(*this), node);
224 parent = *ptr;
225 if (object->object < this->object)
226 ptr = &parent->rb_left;
227 else
228 if (object->object > this->object)
229 ptr = &parent->rb_right;
230 else
231 return false;
232 }
226 233
227 nvkm_object_ref(device, &client->device); 234 rb_link_node(&object->node, parent, ptr);
228 snprintf(client->name, sizeof(client->name), "%s", name); 235 rb_insert_color(&object->node, &client->objroot);
229 client->debug = nvkm_dbgopt(dbg, "CLIENT"); 236 return true;
230 return 0;
231} 237}
232 238
233int 239struct nvkm_object *
234nvkm_client_init(struct nvkm_client *client) 240nvkm_client_search(struct nvkm_client *client, u64 handle)
235{ 241{
236 int ret; 242 struct rb_node *node = client->objroot.rb_node;
237 nv_debug(client, "init running\n"); 243 while (node) {
238 ret = nvkm_handle_init(client->root); 244 struct nvkm_object *object =
239 nv_debug(client, "init completed with %d\n", ret); 245 container_of(node, typeof(*object), node);
240 return ret; 246 if (handle < object->object)
247 node = node->rb_left;
248 else
249 if (handle > object->object)
250 node = node->rb_right;
251 else
252 return object;
253 }
254 return NULL;
241} 255}
242 256
243int 257int
244nvkm_client_fini(struct nvkm_client *client, bool suspend) 258nvkm_client_fini(struct nvkm_client *client, bool suspend)
245{ 259{
260 struct nvkm_object *object = &client->object;
246 const char *name[2] = { "fini", "suspend" }; 261 const char *name[2] = { "fini", "suspend" };
247 int ret, i; 262 int i;
248 nv_debug(client, "%s running\n", name[suspend]); 263 nvif_debug(object, "%s notify\n", name[suspend]);
249 nv_debug(client, "%s notify\n", name[suspend]);
250 for (i = 0; i < ARRAY_SIZE(client->notify); i++) 264 for (i = 0; i < ARRAY_SIZE(client->notify); i++)
251 nvkm_client_notify_put(client, i); 265 nvkm_client_notify_put(client, i);
252 nv_debug(client, "%s object\n", name[suspend]); 266 return nvkm_object_fini(&client->object, suspend);
253 ret = nvkm_handle_fini(client->root, suspend); 267}
254 nv_debug(client, "%s completed with %d\n", name[suspend], ret); 268
255 return ret; 269int
270nvkm_client_init(struct nvkm_client *client)
271{
272 return nvkm_object_init(&client->object);
273}
274
275void
276nvkm_client_del(struct nvkm_client **pclient)
277{
278 struct nvkm_client *client = *pclient;
279 int i;
280 if (client) {
281 nvkm_client_fini(client, false);
282 for (i = 0; i < ARRAY_SIZE(client->notify); i++)
283 nvkm_client_notify_del(client, i);
284 nvkm_object_dtor(&client->object);
285 kfree(*pclient);
286 *pclient = NULL;
287 }
256} 288}
257 289
258const char * 290int
259nvkm_client_name(void *obj) 291nvkm_client_new(const char *name, u64 device, const char *cfg,
292 const char *dbg, struct nvkm_client **pclient)
260{ 293{
261 const char *client_name = "unknown"; 294 struct nvkm_oclass oclass = {};
262 struct nvkm_client *client = nvkm_client(obj); 295 struct nvkm_client *client;
263 if (client) 296
264 client_name = client->name; 297 if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
265 return client_name; 298 return -ENOMEM;
299 oclass.client = client;
300
301 nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object);
302 snprintf(client->name, sizeof(client->name), "%s", name);
303 client->device = device;
304 client->debug = nvkm_dbgopt(dbg, "CLIENT");
305 client->objroot = RB_ROOT;
306 client->dmaroot = RB_ROOT;
307 return 0;
266} 308}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
deleted file mode 100644
index fb2acbca75d9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
+++ /dev/null
@@ -1,239 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <core/engctx.h>
25#include <core/engine.h>
26#include <core/client.h>
27
28static inline int
29nvkm_engctx_exists(struct nvkm_object *parent,
30 struct nvkm_engine *engine, void **pobject)
31{
32 struct nvkm_engctx *engctx;
33 struct nvkm_object *parctx;
34
35 list_for_each_entry(engctx, &engine->contexts, head) {
36 parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
37 if (parctx == parent) {
38 atomic_inc(&nv_object(engctx)->refcount);
39 *pobject = engctx;
40 return 1;
41 }
42 }
43
44 return 0;
45}
46
47int
48nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
49 struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
50 u32 size, u32 align, u32 flags, int length, void **pobject)
51{
52 struct nvkm_client *client = nvkm_client(parent);
53 struct nvkm_engine *engine = nv_engine(engobj);
54 struct nvkm_object *engctx;
55 unsigned long save;
56 int ret;
57
58 /* check if this engine already has a context for the parent object,
59 * and reference it instead of creating a new one
60 */
61 spin_lock_irqsave(&engine->lock, save);
62 ret = nvkm_engctx_exists(parent, engine, pobject);
63 spin_unlock_irqrestore(&engine->lock, save);
64 if (ret)
65 return ret;
66
67 /* create the new context, supports creating both raw objects and
68 * objects backed by instance memory
69 */
70 if (size) {
71 ret = nvkm_gpuobj_create_(parent, engobj, oclass,
72 NV_ENGCTX_CLASS, pargpu, size,
73 align, flags, length, pobject);
74 } else {
75 ret = nvkm_object_create_(parent, engobj, oclass,
76 NV_ENGCTX_CLASS, length, pobject);
77 }
78
79 engctx = *pobject;
80 if (ret)
81 return ret;
82
83 /* must take the lock again and re-check a context doesn't already
84 * exist (in case of a race) - the lock had to be dropped before as
85 * it's not possible to allocate the object with it held.
86 */
87 spin_lock_irqsave(&engine->lock, save);
88 ret = nvkm_engctx_exists(parent, engine, pobject);
89 if (ret) {
90 spin_unlock_irqrestore(&engine->lock, save);
91 nvkm_object_ref(NULL, &engctx);
92 return ret;
93 }
94
95 if (client->vm)
96 atomic_inc(&client->vm->engref[nv_engidx(engine)]);
97 list_add(&nv_engctx(engctx)->head, &engine->contexts);
98 nv_engctx(engctx)->addr = ~0ULL;
99 spin_unlock_irqrestore(&engine->lock, save);
100 return 0;
101}
102
103void
104nvkm_engctx_destroy(struct nvkm_engctx *engctx)
105{
106 struct nvkm_engine *engine = engctx->gpuobj.object.engine;
107 struct nvkm_client *client = nvkm_client(engctx);
108 unsigned long save;
109
110 nvkm_gpuobj_unmap(&engctx->vma);
111 spin_lock_irqsave(&engine->lock, save);
112 list_del(&engctx->head);
113 spin_unlock_irqrestore(&engine->lock, save);
114
115 if (client->vm)
116 atomic_dec(&client->vm->engref[nv_engidx(engine)]);
117
118 if (engctx->gpuobj.size)
119 nvkm_gpuobj_destroy(&engctx->gpuobj);
120 else
121 nvkm_object_destroy(&engctx->gpuobj.object);
122}
123
124int
125nvkm_engctx_init(struct nvkm_engctx *engctx)
126{
127 struct nvkm_object *object = nv_object(engctx);
128 struct nvkm_subdev *subdev = nv_subdev(object->engine);
129 struct nvkm_object *parent;
130 struct nvkm_subdev *pardev;
131 int ret;
132
133 ret = nvkm_gpuobj_init(&engctx->gpuobj);
134 if (ret)
135 return ret;
136
137 parent = nv_pclass(object->parent, NV_PARENT_CLASS);
138 pardev = nv_subdev(parent->engine);
139 if (nv_parent(parent)->context_attach) {
140 mutex_lock(&pardev->mutex);
141 ret = nv_parent(parent)->context_attach(parent, object);
142 mutex_unlock(&pardev->mutex);
143 }
144
145 if (ret) {
146 nv_error(parent, "failed to attach %s context, %d\n",
147 subdev->name, ret);
148 return ret;
149 }
150
151 nv_debug(parent, "attached %s context\n", subdev->name);
152 return 0;
153}
154
155int
156nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
157{
158 struct nvkm_object *object = nv_object(engctx);
159 struct nvkm_subdev *subdev = nv_subdev(object->engine);
160 struct nvkm_object *parent;
161 struct nvkm_subdev *pardev;
162 int ret = 0;
163
164 parent = nv_pclass(object->parent, NV_PARENT_CLASS);
165 pardev = nv_subdev(parent->engine);
166 if (nv_parent(parent)->context_detach) {
167 mutex_lock(&pardev->mutex);
168 ret = nv_parent(parent)->context_detach(parent, suspend, object);
169 mutex_unlock(&pardev->mutex);
170 }
171
172 if (ret) {
173 nv_error(parent, "failed to detach %s context, %d\n",
174 subdev->name, ret);
175 return ret;
176 }
177
178 nv_debug(parent, "detached %s context\n", subdev->name);
179 return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
180}
181
182int
183_nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
184 struct nvkm_oclass *oclass, void *data, u32 size,
185 struct nvkm_object **pobject)
186{
187 struct nvkm_engctx *engctx;
188 int ret;
189
190 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
191 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
192 *pobject = nv_object(engctx);
193 return ret;
194}
195
196void
197_nvkm_engctx_dtor(struct nvkm_object *object)
198{
199 nvkm_engctx_destroy(nv_engctx(object));
200}
201
202int
203_nvkm_engctx_init(struct nvkm_object *object)
204{
205 return nvkm_engctx_init(nv_engctx(object));
206}
207
208int
209_nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
210{
211 return nvkm_engctx_fini(nv_engctx(object), suspend);
212}
213
214struct nvkm_object *
215nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
216{
217 struct nvkm_engctx *engctx;
218 unsigned long flags;
219
220 spin_lock_irqsave(&engine->lock, flags);
221 list_for_each_entry(engctx, &engine->contexts, head) {
222 if (engctx->addr == addr) {
223 engctx->save = flags;
224 return nv_object(engctx);
225 }
226 }
227 spin_unlock_irqrestore(&engine->lock, flags);
228 return NULL;
229}
230
231void
232nvkm_engctx_put(struct nvkm_object *object)
233{
234 if (object) {
235 struct nvkm_engine *engine = nv_engine(object->engine);
236 struct nvkm_engctx *engctx = nv_engctx(object);
237 spin_unlock_irqrestore(&engine->lock, engctx->save);
238 }
239}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 60820173c6aa..8a7bae7bd995 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -25,51 +25,141 @@
25#include <core/device.h> 25#include <core/device.h>
26#include <core/option.h> 26#include <core/option.h>
27 27
28#include <subdev/fb.h>
29
30void
31nvkm_engine_unref(struct nvkm_engine **pengine)
32{
33 struct nvkm_engine *engine = *pengine;
34 if (engine) {
35 mutex_lock(&engine->subdev.mutex);
36 if (--engine->usecount == 0)
37 nvkm_subdev_fini(&engine->subdev, false);
38 mutex_unlock(&engine->subdev.mutex);
39 *pengine = NULL;
40 }
41}
42
28struct nvkm_engine * 43struct nvkm_engine *
29nvkm_engine(void *obj, int idx) 44nvkm_engine_ref(struct nvkm_engine *engine)
30{ 45{
31 obj = nvkm_subdev(obj, idx); 46 if (engine) {
32 if (obj && nv_iclass(obj, NV_ENGINE_CLASS)) 47 mutex_lock(&engine->subdev.mutex);
33 return nv_engine(obj); 48 if (++engine->usecount == 1) {
34 return NULL; 49 int ret = nvkm_subdev_init(&engine->subdev);
50 if (ret) {
51 engine->usecount--;
52 mutex_unlock(&engine->subdev.mutex);
53 return ERR_PTR(ret);
54 }
55 }
56 mutex_unlock(&engine->subdev.mutex);
57 }
58 return engine;
35} 59}
36 60
37int 61void
38nvkm_engine_create_(struct nvkm_object *parent, struct nvkm_object *engobj, 62nvkm_engine_tile(struct nvkm_engine *engine, int region)
39 struct nvkm_oclass *oclass, bool enable,
40 const char *iname, const char *fname,
41 int length, void **pobject)
42{ 63{
43 struct nvkm_engine *engine; 64 struct nvkm_fb *fb = engine->subdev.device->fb;
44 int ret; 65 if (engine->func->tile)
66 engine->func->tile(engine, region, &fb->tile.region[region]);
67}
45 68
46 ret = nvkm_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS, 69static void
47 iname, fname, length, pobject); 70nvkm_engine_intr(struct nvkm_subdev *subdev)
48 engine = *pobject; 71{
49 if (ret) 72 struct nvkm_engine *engine = nvkm_engine(subdev);
50 return ret; 73 if (engine->func->intr)
74 engine->func->intr(engine);
75}
51 76
52 if (parent) { 77static int
53 struct nvkm_device *device = nv_device(parent); 78nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
54 int engidx = nv_engidx(engine); 79{
80 struct nvkm_engine *engine = nvkm_engine(subdev);
81 if (engine->func->fini)
82 return engine->func->fini(engine, suspend);
83 return 0;
84}
55 85
56 if (device->disable_mask & (1ULL << engidx)) { 86static int
57 if (!nvkm_boolopt(device->cfgopt, iname, false)) { 87nvkm_engine_init(struct nvkm_subdev *subdev)
58 nv_debug(engine, "engine disabled by hw/fw\n"); 88{
59 return -ENODEV; 89 struct nvkm_engine *engine = nvkm_engine(subdev);
60 } 90 struct nvkm_fb *fb = subdev->device->fb;
91 int ret = 0, i;
92 s64 time;
61 93
62 nv_warn(engine, "ignoring hw/fw engine disable\n"); 94 if (!engine->usecount) {
63 } 95 nvkm_trace(subdev, "init skipped, engine has no users\n");
96 return ret;
97 }
64 98
65 if (!nvkm_boolopt(device->cfgopt, iname, enable)) { 99 if (engine->func->oneinit && !engine->subdev.oneinit) {
66 if (!enable) 100 nvkm_trace(subdev, "one-time init running...\n");
67 nv_warn(engine, "disabled, %s=1 to enable\n", iname); 101 time = ktime_to_us(ktime_get());
68 return -ENODEV; 102 ret = engine->func->oneinit(engine);
103 if (ret) {
104 nvkm_trace(subdev, "one-time init failed, %d\n", ret);
105 return ret;
69 } 106 }
107
108 engine->subdev.oneinit = true;
109 time = ktime_to_us(ktime_get()) - time;
110 nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
111 }
112
113 if (engine->func->init)
114 ret = engine->func->init(engine);
115
116 for (i = 0; fb && i < fb->tile.regions; i++)
117 nvkm_engine_tile(engine, i);
118 return ret;
119}
120
121static void *
122nvkm_engine_dtor(struct nvkm_subdev *subdev)
123{
124 struct nvkm_engine *engine = nvkm_engine(subdev);
125 if (engine->func->dtor)
126 return engine->func->dtor(engine);
127 return engine;
128}
129
130static const struct nvkm_subdev_func
131nvkm_engine_func = {
132 .dtor = nvkm_engine_dtor,
133 .init = nvkm_engine_init,
134 .fini = nvkm_engine_fini,
135 .intr = nvkm_engine_intr,
136};
137
138int
139nvkm_engine_ctor(const struct nvkm_engine_func *func,
140 struct nvkm_device *device, int index, u32 pmc_enable,
141 bool enable, struct nvkm_engine *engine)
142{
143 nvkm_subdev_ctor(&nvkm_engine_func, device, index,
144 pmc_enable, &engine->subdev);
145 engine->func = func;
146
147 if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
148 nvkm_debug(&engine->subdev, "disabled\n");
149 return -ENODEV;
70 } 150 }
71 151
72 INIT_LIST_HEAD(&engine->contexts);
73 spin_lock_init(&engine->lock); 152 spin_lock_init(&engine->lock);
74 return 0; 153 return 0;
75} 154}
155
156int
157nvkm_engine_new_(const struct nvkm_engine_func *func,
158 struct nvkm_device *device, int index, u32 pmc_enable,
159 bool enable, struct nvkm_engine **pengine)
160{
161 if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
162 return -ENOMEM;
163 return nvkm_engine_ctor(func, device, index, pmc_enable,
164 enable, *pengine);
165}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index 4f92bfc13d6b..b9581feb24cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -38,29 +38,19 @@ nvkm_enum_find(const struct nvkm_enum *en, u32 value)
38 return NULL; 38 return NULL;
39} 39}
40 40
41const struct nvkm_enum *
42nvkm_enum_print(const struct nvkm_enum *en, u32 value)
43{
44 en = nvkm_enum_find(en, value);
45 if (en)
46 pr_cont("%s", en->name);
47 else
48 pr_cont("(unknown enum 0x%08x)", value);
49 return en;
50}
51
52void 41void
53nvkm_bitfield_print(const struct nvkm_bitfield *bf, u32 value) 42nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
54{ 43{
55 while (bf->name) { 44 bool space = false;
45 while (size >= 1 && bf->name) {
56 if (value & bf->mask) { 46 if (value & bf->mask) {
57 pr_cont(" %s", bf->name); 47 int this = snprintf(data, size, "%s%s",
58 value &= ~bf->mask; 48 space ? " " : "", bf->name);
49 size -= this;
50 data += this;
51 space = true;
59 } 52 }
60
61 bf++; 53 bf++;
62 } 54 }
63 55 data[0] = '\0';
64 if (value)
65 pr_cont(" (unknown bits 0x%08x)", value);
66} 56}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index 2eba801aae6f..c3a790eb8d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -28,240 +28,205 @@
28#include <subdev/bar.h> 28#include <subdev/bar.h>
29#include <subdev/mmu.h> 29#include <subdev/mmu.h>
30 30
31void 31/* fast-path, where backend is able to provide direct pointer to memory */
32nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj) 32static u32
33nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
33{ 34{
34 int i; 35 return ioread32_native(gpuobj->map + offset);
35 36}
36 if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
37 for (i = 0; i < gpuobj->size; i += 4)
38 nv_wo32(gpuobj, i, 0x00000000);
39 }
40
41 if (gpuobj->node)
42 nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
43 37
44 if (gpuobj->heap.block_size) 38static void
45 nvkm_mm_fini(&gpuobj->heap); 39nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
40{
41 iowrite32_native(data, gpuobj->map + offset);
42}
46 43
47 nvkm_object_destroy(&gpuobj->object); 44/* accessor functions for gpuobjs allocated directly from instmem */
45static u32
46nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
47{
48 return nvkm_ro32(gpuobj->memory, offset);
48} 49}
49 50
50int 51static void
51nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, 52nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
52 struct nvkm_oclass *oclass, u32 pclass,
53 struct nvkm_object *pargpu, u32 size, u32 align, u32 flags,
54 int length, void **pobject)
55{ 53{
56 struct nvkm_instmem *imem = nvkm_instmem(parent); 54 nvkm_wo32(gpuobj->memory, offset, data);
57 struct nvkm_bar *bar = nvkm_bar(parent); 55}
58 struct nvkm_gpuobj *gpuobj;
59 struct nvkm_mm *heap = NULL;
60 int ret, i;
61 u64 addr;
62 56
63 *pobject = NULL; 57static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
58static void
59nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
60{
61 gpuobj->func = &nvkm_gpuobj_heap;
62 nvkm_done(gpuobj->memory);
63}
64 64
65 if (pargpu) { 65static const struct nvkm_gpuobj_func
66 while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) { 66nvkm_gpuobj_heap_fast = {
67 if (nv_gpuobj(pargpu)->heap.block_size) 67 .release = nvkm_gpuobj_heap_release,
68 break; 68 .rd32 = nvkm_gpuobj_rd32_fast,
69 pargpu = pargpu->parent; 69 .wr32 = nvkm_gpuobj_wr32_fast,
70 } 70};
71 71
72 if (unlikely(pargpu == NULL)) { 72static const struct nvkm_gpuobj_func
73 nv_error(parent, "no gpuobj heap\n"); 73nvkm_gpuobj_heap_slow = {
74 return -EINVAL; 74 .release = nvkm_gpuobj_heap_release,
75 } 75 .rd32 = nvkm_gpuobj_heap_rd32,
76 .wr32 = nvkm_gpuobj_heap_wr32,
77};
76 78
77 addr = nv_gpuobj(pargpu)->addr; 79static void *
78 heap = &nv_gpuobj(pargpu)->heap; 80nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
79 atomic_inc(&parent->refcount); 81{
80 } else { 82 gpuobj->map = nvkm_kmap(gpuobj->memory);
81 ret = imem->alloc(imem, parent, size, align, &parent); 83 if (likely(gpuobj->map))
82 pargpu = parent; 84 gpuobj->func = &nvkm_gpuobj_heap_fast;
83 if (ret) 85 else
84 return ret; 86 gpuobj->func = &nvkm_gpuobj_heap_slow;
87 return gpuobj->map;
88}
85 89
86 addr = nv_memobj(pargpu)->addr; 90static const struct nvkm_gpuobj_func
87 size = nv_memobj(pargpu)->size; 91nvkm_gpuobj_heap = {
88 92 .acquire = nvkm_gpuobj_heap_acquire,
89 if (bar && bar->alloc) { 93};
90 struct nvkm_instobj *iobj = (void *)parent;
91 struct nvkm_mem **mem = (void *)(iobj + 1);
92 struct nvkm_mem *node = *mem;
93 if (!bar->alloc(bar, parent, node, &pargpu)) {
94 nvkm_object_ref(NULL, &parent);
95 parent = pargpu;
96 }
97 }
98 }
99 94
100 ret = nvkm_object_create_(parent, engine, oclass, pclass | 95/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
101 NV_GPUOBJ_CLASS, length, pobject); 96static u32
102 nvkm_object_ref(NULL, &parent); 97nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
103 gpuobj = *pobject; 98{
104 if (ret) 99 return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
105 return ret; 100}
106 101
107 gpuobj->parent = pargpu; 102static void
108 gpuobj->flags = flags; 103nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
109 gpuobj->addr = addr; 104{
110 gpuobj->size = size; 105 nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
106}
111 107
112 if (heap) { 108static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
113 ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1), 109static void
114 &gpuobj->node); 110nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
115 if (ret) 111{
116 return ret; 112 gpuobj->func = &nvkm_gpuobj_func;
113 nvkm_done(gpuobj->parent);
114}
117 115
118 gpuobj->addr += gpuobj->node->offset; 116static const struct nvkm_gpuobj_func
119 } 117nvkm_gpuobj_fast = {
118 .release = nvkm_gpuobj_release,
119 .rd32 = nvkm_gpuobj_rd32_fast,
120 .wr32 = nvkm_gpuobj_wr32_fast,
121};
120 122
121 if (gpuobj->flags & NVOBJ_FLAG_HEAP) { 123static const struct nvkm_gpuobj_func
122 ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); 124nvkm_gpuobj_slow = {
123 if (ret) 125 .release = nvkm_gpuobj_release,
124 return ret; 126 .rd32 = nvkm_gpuobj_rd32,
125 } 127 .wr32 = nvkm_gpuobj_wr32,
128};
126 129
127 if (flags & NVOBJ_FLAG_ZERO_ALLOC) { 130static void *
128 for (i = 0; i < gpuobj->size; i += 4) 131nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
129 nv_wo32(gpuobj, i, 0x00000000); 132{
133 gpuobj->map = nvkm_kmap(gpuobj->parent);
134 if (likely(gpuobj->map)) {
135 gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset;
136 gpuobj->func = &nvkm_gpuobj_fast;
137 } else {
138 gpuobj->func = &nvkm_gpuobj_slow;
130 } 139 }
131 140 return gpuobj->map;
132 return ret;
133} 141}
134 142
135struct nvkm_gpuobj_class { 143static const struct nvkm_gpuobj_func
136 struct nvkm_object *pargpu; 144nvkm_gpuobj_func = {
137 u64 size; 145 .acquire = nvkm_gpuobj_acquire,
138 u32 align;
139 u32 flags;
140}; 146};
141 147
142static int 148static int
143_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 149nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
144 struct nvkm_oclass *oclass, void *data, u32 size, 150 struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
145 struct nvkm_object **pobject)
146{ 151{
147 struct nvkm_gpuobj_class *args = data; 152 u32 offset;
148 struct nvkm_gpuobj *object;
149 int ret; 153 int ret;
150 154
151 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu, 155 if (parent) {
152 args->size, args->align, args->flags, 156 if (align >= 0) {
153 &object); 157 ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
154 *pobject = nv_object(object); 158 max(align, 1), &gpuobj->node);
155 if (ret) 159 } else {
156 return ret; 160 ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
161 -align, &gpuobj->node);
162 }
163 if (ret)
164 return ret;
157 165
158 return 0; 166 gpuobj->parent = parent;
159} 167 gpuobj->func = &nvkm_gpuobj_func;
168 gpuobj->addr = parent->addr + gpuobj->node->offset;
169 gpuobj->size = gpuobj->node->length;
160 170
161void 171 if (zero) {
162_nvkm_gpuobj_dtor(struct nvkm_object *object) 172 nvkm_kmap(gpuobj);
163{ 173 for (offset = 0; offset < gpuobj->size; offset += 4)
164 nvkm_gpuobj_destroy(nv_gpuobj(object)); 174 nvkm_wo32(gpuobj, offset, 0x00000000);
165} 175 nvkm_done(gpuobj);
166 176 }
167int 177 } else {
168_nvkm_gpuobj_init(struct nvkm_object *object) 178 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
169{ 179 abs(align), zero, &gpuobj->memory);
170 return nvkm_gpuobj_init(nv_gpuobj(object)); 180 if (ret)
171} 181 return ret;
172 182
173int 183 gpuobj->func = &nvkm_gpuobj_heap;
174_nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend) 184 gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
175{ 185 gpuobj->size = nvkm_memory_size(gpuobj->memory);
176 return nvkm_gpuobj_fini(nv_gpuobj(object), suspend); 186 }
177}
178 187
179u32 188 return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
180_nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr)
181{
182 struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
183 struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
184 if (gpuobj->node)
185 addr += gpuobj->node->offset;
186 return pfuncs->rd32(gpuobj->parent, addr);
187} 189}
188 190
189void 191void
190_nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data) 192nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
191{ 193{
192 struct nvkm_gpuobj *gpuobj = nv_gpuobj(object); 194 struct nvkm_gpuobj *gpuobj = *pgpuobj;
193 struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); 195 if (gpuobj) {
194 if (gpuobj->node) 196 if (gpuobj->parent)
195 addr += gpuobj->node->offset; 197 nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
196 pfuncs->wr32(gpuobj->parent, addr, data); 198 nvkm_mm_fini(&gpuobj->heap);
199 nvkm_memory_del(&gpuobj->memory);
200 kfree(*pgpuobj);
201 *pgpuobj = NULL;
202 }
197} 203}
198 204
199static struct nvkm_oclass
200_nvkm_gpuobj_oclass = {
201 .handle = 0x00000000,
202 .ofuncs = &(struct nvkm_ofuncs) {
203 .ctor = _nvkm_gpuobj_ctor,
204 .dtor = _nvkm_gpuobj_dtor,
205 .init = _nvkm_gpuobj_init,
206 .fini = _nvkm_gpuobj_fini,
207 .rd32 = _nvkm_gpuobj_rd32,
208 .wr32 = _nvkm_gpuobj_wr32,
209 },
210};
211
212int 205int
213nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu, 206nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
214 u32 size, u32 align, u32 flags, 207 struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
215 struct nvkm_gpuobj **pgpuobj)
216{ 208{
217 struct nvkm_object *engine = parent; 209 struct nvkm_gpuobj *gpuobj;
218 struct nvkm_gpuobj_class args = { 210 int ret;
219 .pargpu = pargpu,
220 .size = size,
221 .align = align,
222 .flags = flags,
223 };
224
225 if (!nv_iclass(engine, NV_SUBDEV_CLASS))
226 engine = &engine->engine->subdev.object;
227 BUG_ON(engine == NULL);
228
229 return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass,
230 &args, sizeof(args),
231 (struct nvkm_object **)pgpuobj);
232}
233 211
234int 212 if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
235nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma) 213 return -ENOMEM;
236{
237 struct nvkm_bar *bar = nvkm_bar(gpuobj);
238 int ret = -EINVAL;
239
240 if (bar && bar->umap) {
241 struct nvkm_instobj *iobj = (void *)
242 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
243 struct nvkm_mem **mem = (void *)(iobj + 1);
244 ret = bar->umap(bar, *mem, access, vma);
245 }
246 214
215 ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
216 if (ret)
217 nvkm_gpuobj_del(pgpuobj);
247 return ret; 218 return ret;
248} 219}
249 220
250int 221int
251nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm, 222nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
252 u32 access, struct nvkm_vma *vma) 223 u32 access, struct nvkm_vma *vma)
253{ 224{
254 struct nvkm_instobj *iobj = (void *) 225 struct nvkm_memory *memory = gpuobj->memory;
255 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); 226 int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
256 struct nvkm_mem **mem = (void *)(iobj + 1); 227 if (ret == 0)
257 int ret; 228 nvkm_memory_map(memory, vma, 0);
258 229 return ret;
259 ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
260 if (ret)
261 return ret;
262
263 nvkm_vm_map(vma, *mem);
264 return 0;
265} 230}
266 231
267void 232void
@@ -278,39 +243,13 @@ nvkm_gpuobj_unmap(struct nvkm_vma *vma)
278 * anywhere else. 243 * anywhere else.
279 */ 244 */
280 245
281static void
282nvkm_gpudup_dtor(struct nvkm_object *object)
283{
284 struct nvkm_gpuobj *gpuobj = (void *)object;
285 nvkm_object_ref(NULL, &gpuobj->parent);
286 nvkm_object_destroy(&gpuobj->object);
287}
288
289static struct nvkm_oclass
290nvkm_gpudup_oclass = {
291 .handle = NV_GPUOBJ_CLASS,
292 .ofuncs = &(struct nvkm_ofuncs) {
293 .dtor = nvkm_gpudup_dtor,
294 .init = nvkm_object_init,
295 .fini = nvkm_object_fini,
296 },
297};
298
299int 246int
300nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base, 247nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
301 struct nvkm_gpuobj **pgpuobj)
302{ 248{
303 struct nvkm_gpuobj *gpuobj; 249 if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
304 int ret; 250 return -ENOMEM;
305
306 ret = nvkm_object_create(parent, &parent->engine->subdev.object,
307 &nvkm_gpudup_oclass, 0, &gpuobj);
308 *pgpuobj = gpuobj;
309 if (ret)
310 return ret;
311 251
312 nvkm_object_ref(nv_object(base), &gpuobj->parent); 252 (*pgpuobj)->addr = nvkm_memory_addr(memory);
313 gpuobj->addr = base->addr; 253 (*pgpuobj)->size = nvkm_memory_size(memory);
314 gpuobj->size = base->size;
315 return 0; 254 return 0;
316} 255}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/handle.c b/drivers/gpu/drm/nouveau/nvkm/core/handle.c
deleted file mode 100644
index dc7ff10ebe7b..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/handle.c
+++ /dev/null
@@ -1,221 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <core/handle.h>
25#include <core/client.h>
26
27#define hprintk(h,l,f,a...) do { \
28 struct nvkm_client *c = nvkm_client((h)->object); \
29 struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
30 nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \
31} while(0)
32
33int
34nvkm_handle_init(struct nvkm_handle *handle)
35{
36 struct nvkm_handle *item;
37 int ret;
38
39 hprintk(handle, TRACE, "init running\n");
40 ret = nvkm_object_inc(handle->object);
41 if (ret)
42 return ret;
43
44 hprintk(handle, TRACE, "init children\n");
45 list_for_each_entry(item, &handle->tree, head) {
46 ret = nvkm_handle_init(item);
47 if (ret)
48 goto fail;
49 }
50
51 hprintk(handle, TRACE, "init completed\n");
52 return 0;
53fail:
54 hprintk(handle, ERROR, "init failed with %d\n", ret);
55 list_for_each_entry_continue_reverse(item, &handle->tree, head) {
56 nvkm_handle_fini(item, false);
57 }
58
59 nvkm_object_dec(handle->object, false);
60 return ret;
61}
62
63int
64nvkm_handle_fini(struct nvkm_handle *handle, bool suspend)
65{
66 static char *name[2] = { "fini", "suspend" };
67 struct nvkm_handle *item;
68 int ret;
69
70 hprintk(handle, TRACE, "%s children\n", name[suspend]);
71 list_for_each_entry(item, &handle->tree, head) {
72 ret = nvkm_handle_fini(item, suspend);
73 if (ret && suspend)
74 goto fail;
75 }
76
77 hprintk(handle, TRACE, "%s running\n", name[suspend]);
78 if (handle->object) {
79 ret = nvkm_object_dec(handle->object, suspend);
80 if (ret && suspend)
81 goto fail;
82 }
83
84 hprintk(handle, TRACE, "%s completed\n", name[suspend]);
85 return 0;
86fail:
87 hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
88 list_for_each_entry_continue_reverse(item, &handle->tree, head) {
89 int rret = nvkm_handle_init(item);
90 if (rret)
91 hprintk(handle, FATAL, "failed to restart, %d\n", rret);
92 }
93
94 return ret;
95}
96
97int
98nvkm_handle_create(struct nvkm_object *parent, u32 _parent, u32 _handle,
99 struct nvkm_object *object, struct nvkm_handle **phandle)
100{
101 struct nvkm_object *namedb;
102 struct nvkm_handle *handle;
103 int ret;
104
105 namedb = parent;
106 while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
107 namedb = namedb->parent;
108
109 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
110 if (!handle)
111 return -ENOMEM;
112
113 INIT_LIST_HEAD(&handle->head);
114 INIT_LIST_HEAD(&handle->tree);
115 handle->name = _handle;
116 handle->priv = ~0;
117
118 ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, object, handle);
119 if (ret) {
120 kfree(handle);
121 return ret;
122 }
123
124 if (nv_parent(parent)->object_attach) {
125 ret = nv_parent(parent)->object_attach(parent, object, _handle);
126 if (ret < 0) {
127 nvkm_handle_destroy(handle);
128 return ret;
129 }
130
131 handle->priv = ret;
132 }
133
134 if (object != namedb) {
135 while (!nv_iclass(namedb, NV_CLIENT_CLASS))
136 namedb = namedb->parent;
137
138 handle->parent = nvkm_namedb_get(nv_namedb(namedb), _parent);
139 if (handle->parent) {
140 list_add(&handle->head, &handle->parent->tree);
141 nvkm_namedb_put(handle->parent);
142 }
143 }
144
145 hprintk(handle, TRACE, "created\n");
146 *phandle = handle;
147 return 0;
148}
149
150void
151nvkm_handle_destroy(struct nvkm_handle *handle)
152{
153 struct nvkm_handle *item, *temp;
154
155 hprintk(handle, TRACE, "destroy running\n");
156 list_for_each_entry_safe(item, temp, &handle->tree, head) {
157 nvkm_handle_destroy(item);
158 }
159 list_del(&handle->head);
160
161 if (handle->priv != ~0) {
162 struct nvkm_object *parent = handle->parent->object;
163 nv_parent(parent)->object_detach(parent, handle->priv);
164 }
165
166 hprintk(handle, TRACE, "destroy completed\n");
167 nvkm_namedb_remove(handle);
168 kfree(handle);
169}
170
171struct nvkm_object *
172nvkm_handle_ref(struct nvkm_object *parent, u32 name)
173{
174 struct nvkm_object *object = NULL;
175 struct nvkm_handle *handle;
176
177 while (!nv_iclass(parent, NV_NAMEDB_CLASS))
178 parent = parent->parent;
179
180 handle = nvkm_namedb_get(nv_namedb(parent), name);
181 if (handle) {
182 nvkm_object_ref(handle->object, &object);
183 nvkm_namedb_put(handle);
184 }
185
186 return object;
187}
188
189struct nvkm_handle *
190nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass)
191{
192 struct nvkm_namedb *namedb;
193 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
194 return nvkm_namedb_get_class(namedb, oclass);
195 return NULL;
196}
197
198struct nvkm_handle *
199nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst)
200{
201 struct nvkm_namedb *namedb;
202 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
203 return nvkm_namedb_get_vinst(namedb, vinst);
204 return NULL;
205}
206
207struct nvkm_handle *
208nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst)
209{
210 struct nvkm_namedb *namedb;
211 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
212 return nvkm_namedb_get_cinst(namedb, cinst);
213 return NULL;
214}
215
216void
217nvkm_handle_put(struct nvkm_handle *handle)
218{
219 if (handle)
220 nvkm_namedb_put(handle);
221}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 4459ff5f4cb8..d87d6ab03cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -24,196 +24,154 @@
24#include <core/ioctl.h> 24#include <core/ioctl.h>
25#include <core/client.h> 25#include <core/client.h>
26#include <core/engine.h> 26#include <core/engine.h>
27#include <core/handle.h>
28#include <core/namedb.h>
29 27
30#include <nvif/unpack.h> 28#include <nvif/unpack.h>
31#include <nvif/ioctl.h> 29#include <nvif/ioctl.h>
32 30
33static int 31static int
34nvkm_ioctl_nop(struct nvkm_handle *handle, void *data, u32 size) 32nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
35{ 33{
36 struct nvkm_object *object = handle->object;
37 union { 34 union {
38 struct nvif_ioctl_nop none; 35 struct nvif_ioctl_nop_v0 v0;
39 } *args = data; 36 } *args = data;
40 int ret; 37 int ret;
41 38
42 nv_ioctl(object, "nop size %d\n", size); 39 nvif_ioctl(object, "nop size %d\n", size);
43 if (nvif_unvers(args->none)) { 40 if (nvif_unpack(args->v0, 0, 0, false)) {
44 nv_ioctl(object, "nop\n"); 41 nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
42 args->v0.version = NVIF_VERSION_LATEST;
45 } 43 }
46 44
47 return ret; 45 return ret;
48} 46}
49 47
50static int 48static int
51nvkm_ioctl_sclass(struct nvkm_handle *handle, void *data, u32 size) 49nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
52{ 50{
53 struct nvkm_object *object = handle->object;
54 union { 51 union {
55 struct nvif_ioctl_sclass_v0 v0; 52 struct nvif_ioctl_sclass_v0 v0;
56 } *args = data; 53 } *args = data;
57 int ret; 54 struct nvkm_oclass oclass;
55 int ret, i = 0;
58 56
59 if (!nv_iclass(object, NV_PARENT_CLASS)) { 57 nvif_ioctl(object, "sclass size %d\n", size);
60 nv_debug(object, "cannot have children (sclass)\n");
61 return -ENODEV;
62 }
63
64 nv_ioctl(object, "sclass size %d\n", size);
65 if (nvif_unpack(args->v0, 0, 0, true)) { 58 if (nvif_unpack(args->v0, 0, 0, true)) {
66 nv_ioctl(object, "sclass vers %d count %d\n", 59 nvif_ioctl(object, "sclass vers %d count %d\n",
67 args->v0.version, args->v0.count); 60 args->v0.version, args->v0.count);
68 if (size == args->v0.count * sizeof(args->v0.oclass[0])) { 61 if (size != args->v0.count * sizeof(args->v0.oclass[0]))
69 ret = nvkm_parent_lclass(object, args->v0.oclass, 62 return -EINVAL;
70 args->v0.count); 63
71 if (ret >= 0) { 64 while (object->func->sclass &&
72 args->v0.count = ret; 65 object->func->sclass(object, i, &oclass) >= 0) {
73 ret = 0; 66 if (i < args->v0.count) {
67 args->v0.oclass[i].oclass = oclass.base.oclass;
68 args->v0.oclass[i].minver = oclass.base.minver;
69 args->v0.oclass[i].maxver = oclass.base.maxver;
74 } 70 }
75 } else { 71 i++;
76 ret = -EINVAL;
77 } 72 }
73
74 args->v0.count = i;
78 } 75 }
79 76
80 return ret; 77 return ret;
81} 78}
82 79
83static int 80static int
84nvkm_ioctl_new(struct nvkm_handle *handle, void *data, u32 size) 81nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
85{ 82{
86 union { 83 union {
87 struct nvif_ioctl_new_v0 v0; 84 struct nvif_ioctl_new_v0 v0;
88 } *args = data; 85 } *args = data;
89 struct nvkm_client *client = nvkm_client(handle->object); 86 struct nvkm_client *client = parent->client;
90 struct nvkm_object *engctx = NULL;
91 struct nvkm_object *object = NULL; 87 struct nvkm_object *object = NULL;
92 struct nvkm_parent *parent; 88 struct nvkm_oclass oclass;
93 struct nvkm_object *engine; 89 int ret, i = 0;
94 struct nvkm_oclass *oclass;
95 u32 _handle, _oclass;
96 int ret;
97 90
98 nv_ioctl(client, "new size %d\n", size); 91 nvif_ioctl(parent, "new size %d\n", size);
99 if (nvif_unpack(args->v0, 0, 0, true)) { 92 if (nvif_unpack(args->v0, 0, 0, true)) {
100 _handle = args->v0.handle; 93 nvif_ioctl(parent, "new vers %d handle %08x class %08x "
101 _oclass = args->v0.oclass; 94 "route %02x token %llx object %016llx\n",
95 args->v0.version, args->v0.handle, args->v0.oclass,
96 args->v0.route, args->v0.token, args->v0.object);
102 } else 97 } else
103 return ret; 98 return ret;
104 99
105 nv_ioctl(client, "new vers %d handle %08x class %08x " 100 if (!parent->func->sclass) {
106 "route %02x token %llx\n", 101 nvif_ioctl(parent, "cannot have children\n");
107 args->v0.version, _handle, _oclass, 102 return -EINVAL;
108 args->v0.route, args->v0.token);
109
110 if (!nv_iclass(handle->object, NV_PARENT_CLASS)) {
111 nv_debug(handle->object, "cannot have children (ctor)\n");
112 ret = -ENODEV;
113 goto fail_class;
114 } 103 }
115 104
116 parent = nv_parent(handle->object); 105 do {
117 106 memset(&oclass, 0x00, sizeof(oclass));
118 /* check that parent supports the requested subclass */ 107 oclass.client = client;
119 ret = nvkm_parent_sclass(&parent->object, _oclass, &engine, &oclass); 108 oclass.handle = args->v0.handle;
120 if (ret) { 109 oclass.object = args->v0.object;
121 nv_debug(parent, "illegal class 0x%04x\n", _oclass); 110 oclass.parent = parent;
122 goto fail_class; 111 ret = parent->func->sclass(parent, i++, &oclass);
123 }
124
125 /* make sure engine init has been completed *before* any objects
126 * it controls are created - the constructors may depend on
127 * state calculated at init (ie. default context construction)
128 */
129 if (engine) {
130 ret = nvkm_object_inc(engine);
131 if (ret) 112 if (ret)
132 goto fail_class; 113 return ret;
114 } while (oclass.base.oclass != args->v0.oclass);
115
116 if (oclass.engine) {
117 oclass.engine = nvkm_engine_ref(oclass.engine);
118 if (IS_ERR(oclass.engine))
119 return PTR_ERR(oclass.engine);
133 } 120 }
134 121
135 /* if engine requires it, create a context object to insert 122 ret = oclass.ctor(&oclass, data, size, &object);
136 * between the parent and its children (eg. PGRAPH context) 123 nvkm_engine_unref(&oclass.engine);
137 */ 124 if (ret == 0) {
138 if (engine && nv_engine(engine)->cclass) { 125 ret = nvkm_object_init(object);
139 ret = nvkm_object_ctor(&parent->object, engine, 126 if (ret == 0) {
140 nv_engine(engine)->cclass, 127 list_add(&object->head, &parent->tree);
141 data, size, &engctx); 128 object->route = args->v0.route;
142 if (ret) 129 object->token = args->v0.token;
143 goto fail_engctx; 130 object->object = args->v0.object;
144 } else { 131 if (nvkm_client_insert(client, object)) {
145 nvkm_object_ref(&parent->object, &engctx); 132 client->data = object;
133 return 0;
134 }
135 ret = -EEXIST;
136 }
137 nvkm_object_fini(object, false);
146 } 138 }
147 139
148 /* finally, create new object and bind it to its handle */ 140 nvkm_object_del(&object);
149 ret = nvkm_object_ctor(engctx, engine, oclass, data, size, &object);
150 client->data = object;
151 if (ret)
152 goto fail_ctor;
153
154 ret = nvkm_object_inc(object);
155 if (ret)
156 goto fail_init;
157
158 ret = nvkm_handle_create(&parent->object, handle->name,
159 _handle, object, &handle);
160 if (ret)
161 goto fail_handle;
162
163 ret = nvkm_handle_init(handle);
164 handle->route = args->v0.route;
165 handle->token = args->v0.token;
166 if (ret)
167 nvkm_handle_destroy(handle);
168
169fail_handle:
170 nvkm_object_dec(object, false);
171fail_init:
172 nvkm_object_ref(NULL, &object);
173fail_ctor:
174 nvkm_object_ref(NULL, &engctx);
175fail_engctx:
176 if (engine)
177 nvkm_object_dec(engine, false);
178fail_class:
179 return ret; 141 return ret;
180} 142}
181 143
182static int 144static int
183nvkm_ioctl_del(struct nvkm_handle *handle, void *data, u32 size) 145nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
184{ 146{
185 struct nvkm_object *object = handle->object;
186 union { 147 union {
187 struct nvif_ioctl_del none; 148 struct nvif_ioctl_del none;
188 } *args = data; 149 } *args = data;
189 int ret; 150 int ret;
190 151
191 nv_ioctl(object, "delete size %d\n", size); 152 nvif_ioctl(object, "delete size %d\n", size);
192 if (nvif_unvers(args->none)) { 153 if (nvif_unvers(args->none)) {
193 nv_ioctl(object, "delete\n"); 154 nvif_ioctl(object, "delete\n");
194 nvkm_handle_fini(handle, false); 155 nvkm_object_fini(object, false);
195 nvkm_handle_destroy(handle); 156 nvkm_object_del(&object);
196 } 157 }
197 158
198 return ret; 159 return ret;
199} 160}
200 161
201static int 162static int
202nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size) 163nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
203{ 164{
204 struct nvkm_object *object = handle->object;
205 struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
206 union { 165 union {
207 struct nvif_ioctl_mthd_v0 v0; 166 struct nvif_ioctl_mthd_v0 v0;
208 } *args = data; 167 } *args = data;
209 int ret; 168 int ret;
210 169
211 nv_ioctl(object, "mthd size %d\n", size); 170 nvif_ioctl(object, "mthd size %d\n", size);
212 if (nvif_unpack(args->v0, 0, 0, true)) { 171 if (nvif_unpack(args->v0, 0, 0, true)) {
213 nv_ioctl(object, "mthd vers %d mthd %02x\n", 172 nvif_ioctl(object, "mthd vers %d mthd %02x\n",
214 args->v0.version, args->v0.method); 173 args->v0.version, args->v0.method);
215 if (ret = -ENODEV, ofuncs->mthd) 174 ret = nvkm_object_mthd(object, args->v0.method, data, size);
216 ret = ofuncs->mthd(object, args->v0.method, data, size);
217 } 175 }
218 176
219 return ret; 177 return ret;
@@ -221,37 +179,34 @@ nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size)
221 179
222 180
223static int 181static int
224nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size) 182nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
225{ 183{
226 struct nvkm_object *object = handle->object;
227 struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
228 union { 184 union {
229 struct nvif_ioctl_rd_v0 v0; 185 struct nvif_ioctl_rd_v0 v0;
230 } *args = data; 186 } *args = data;
187 union {
188 u8 b08;
189 u16 b16;
190 u32 b32;
191 } v;
231 int ret; 192 int ret;
232 193
233 nv_ioctl(object, "rd size %d\n", size); 194 nvif_ioctl(object, "rd size %d\n", size);
234 if (nvif_unpack(args->v0, 0, 0, false)) { 195 if (nvif_unpack(args->v0, 0, 0, false)) {
235 nv_ioctl(object, "rd vers %d size %d addr %016llx\n", 196 nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
236 args->v0.version, args->v0.size, args->v0.addr); 197 args->v0.version, args->v0.size, args->v0.addr);
237 switch (args->v0.size) { 198 switch (args->v0.size) {
238 case 1: 199 case 1:
239 if (ret = -ENODEV, ofuncs->rd08) { 200 ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
240 args->v0.data = nv_ro08(object, args->v0.addr); 201 args->v0.data = v.b08;
241 ret = 0;
242 }
243 break; 202 break;
244 case 2: 203 case 2:
245 if (ret = -ENODEV, ofuncs->rd16) { 204 ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
246 args->v0.data = nv_ro16(object, args->v0.addr); 205 args->v0.data = v.b16;
247 ret = 0;
248 }
249 break; 206 break;
250 case 4: 207 case 4:
251 if (ret = -ENODEV, ofuncs->rd32) { 208 ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
252 args->v0.data = nv_ro32(object, args->v0.addr); 209 args->v0.data = v.b32;
253 ret = 0;
254 }
255 break; 210 break;
256 default: 211 default:
257 ret = -EINVAL; 212 ret = -EINVAL;
@@ -263,104 +218,81 @@ nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size)
263} 218}
264 219
265static int 220static int
266nvkm_ioctl_wr(struct nvkm_handle *handle, void *data, u32 size) 221nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
267{ 222{
268 struct nvkm_object *object = handle->object;
269 struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
270 union { 223 union {
271 struct nvif_ioctl_wr_v0 v0; 224 struct nvif_ioctl_wr_v0 v0;
272 } *args = data; 225 } *args = data;
273 int ret; 226 int ret;
274 227
275 nv_ioctl(object, "wr size %d\n", size); 228 nvif_ioctl(object, "wr size %d\n", size);
276 if (nvif_unpack(args->v0, 0, 0, false)) { 229 if (nvif_unpack(args->v0, 0, 0, false)) {
277 nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n", 230 nvif_ioctl(object,
278 args->v0.version, args->v0.size, args->v0.addr, 231 "wr vers %d size %d addr %016llx data %08x\n",
279 args->v0.data); 232 args->v0.version, args->v0.size, args->v0.addr,
280 switch (args->v0.size) { 233 args->v0.data);
281 case 1: 234 } else
282 if (ret = -ENODEV, ofuncs->wr08) { 235 return ret;
283 nv_wo08(object, args->v0.addr, args->v0.data); 236
284 ret = 0; 237 switch (args->v0.size) {
285 } 238 case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
286 break; 239 case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
287 case 2: 240 case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
288 if (ret = -ENODEV, ofuncs->wr16) { 241 default:
289 nv_wo16(object, args->v0.addr, args->v0.data); 242 break;
290 ret = 0;
291 }
292 break;
293 case 4:
294 if (ret = -ENODEV, ofuncs->wr32) {
295 nv_wo32(object, args->v0.addr, args->v0.data);
296 ret = 0;
297 }
298 break;
299 default:
300 ret = -EINVAL;
301 break;
302 }
303 } 243 }
304 244
305 return ret; 245 return -EINVAL;
306} 246}
307 247
308static int 248static int
309nvkm_ioctl_map(struct nvkm_handle *handle, void *data, u32 size) 249nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
310{ 250{
311 struct nvkm_object *object = handle->object;
312 struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
313 union { 251 union {
314 struct nvif_ioctl_map_v0 v0; 252 struct nvif_ioctl_map_v0 v0;
315 } *args = data; 253 } *args = data;
316 int ret; 254 int ret;
317 255
318 nv_ioctl(object, "map size %d\n", size); 256 nvif_ioctl(object, "map size %d\n", size);
319 if (nvif_unpack(args->v0, 0, 0, false)) { 257 if (nvif_unpack(args->v0, 0, 0, false)) {
320 nv_ioctl(object, "map vers %d\n", args->v0.version); 258 nvif_ioctl(object, "map vers %d\n", args->v0.version);
321 if (ret = -ENODEV, ofuncs->map) { 259 ret = nvkm_object_map(object, &args->v0.handle,
322 ret = ofuncs->map(object, &args->v0.handle, 260 &args->v0.length);
323 &args->v0.length);
324 }
325 } 261 }
326 262
327 return ret; 263 return ret;
328} 264}
329 265
330static int 266static int
331nvkm_ioctl_unmap(struct nvkm_handle *handle, void *data, u32 size) 267nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
332{ 268{
333 struct nvkm_object *object = handle->object;
334 union { 269 union {
335 struct nvif_ioctl_unmap none; 270 struct nvif_ioctl_unmap none;
336 } *args = data; 271 } *args = data;
337 int ret; 272 int ret;
338 273
339 nv_ioctl(object, "unmap size %d\n", size); 274 nvif_ioctl(object, "unmap size %d\n", size);
340 if (nvif_unvers(args->none)) { 275 if (nvif_unvers(args->none)) {
341 nv_ioctl(object, "unmap\n"); 276 nvif_ioctl(object, "unmap\n");
342 } 277 }
343 278
344 return ret; 279 return ret;
345} 280}
346 281
347static int 282static int
348nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size) 283nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
349{ 284{
350 struct nvkm_object *object = handle->object;
351 struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
352 union { 285 union {
353 struct nvif_ioctl_ntfy_new_v0 v0; 286 struct nvif_ioctl_ntfy_new_v0 v0;
354 } *args = data; 287 } *args = data;
355 struct nvkm_event *event; 288 struct nvkm_event *event;
356 int ret; 289 int ret;
357 290
358 nv_ioctl(object, "ntfy new size %d\n", size); 291 nvif_ioctl(object, "ntfy new size %d\n", size);
359 if (nvif_unpack(args->v0, 0, 0, true)) { 292 if (nvif_unpack(args->v0, 0, 0, true)) {
360 nv_ioctl(object, "ntfy new vers %d event %02x\n", 293 nvif_ioctl(object, "ntfy new vers %d event %02x\n",
361 args->v0.version, args->v0.event); 294 args->v0.version, args->v0.event);
362 if (ret = -ENODEV, ofuncs->ntfy) 295 ret = nvkm_object_ntfy(object, args->v0.event, &event);
363 ret = ofuncs->ntfy(object, args->v0.event, &event);
364 if (ret == 0) { 296 if (ret == 0) {
365 ret = nvkm_client_notify_new(object, event, data, size); 297 ret = nvkm_client_notify_new(object, event, data, size);
366 if (ret >= 0) { 298 if (ret >= 0) {
@@ -374,19 +306,18 @@ nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size)
374} 306}
375 307
376static int 308static int
377nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size) 309nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
378{ 310{
379 struct nvkm_client *client = nvkm_client(handle->object); 311 struct nvkm_client *client = object->client;
380 struct nvkm_object *object = handle->object;
381 union { 312 union {
382 struct nvif_ioctl_ntfy_del_v0 v0; 313 struct nvif_ioctl_ntfy_del_v0 v0;
383 } *args = data; 314 } *args = data;
384 int ret; 315 int ret;
385 316
386 nv_ioctl(object, "ntfy del size %d\n", size); 317 nvif_ioctl(object, "ntfy del size %d\n", size);
387 if (nvif_unpack(args->v0, 0, 0, false)) { 318 if (nvif_unpack(args->v0, 0, 0, false)) {
388 nv_ioctl(object, "ntfy del vers %d index %d\n", 319 nvif_ioctl(object, "ntfy del vers %d index %d\n",
389 args->v0.version, args->v0.index); 320 args->v0.version, args->v0.index);
390 ret = nvkm_client_notify_del(client, args->v0.index); 321 ret = nvkm_client_notify_del(client, args->v0.index);
391 } 322 }
392 323
@@ -394,19 +325,18 @@ nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size)
394} 325}
395 326
396static int 327static int
397nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size) 328nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
398{ 329{
399 struct nvkm_client *client = nvkm_client(handle->object); 330 struct nvkm_client *client = object->client;
400 struct nvkm_object *object = handle->object;
401 union { 331 union {
402 struct nvif_ioctl_ntfy_get_v0 v0; 332 struct nvif_ioctl_ntfy_get_v0 v0;
403 } *args = data; 333 } *args = data;
404 int ret; 334 int ret;
405 335
406 nv_ioctl(object, "ntfy get size %d\n", size); 336 nvif_ioctl(object, "ntfy get size %d\n", size);
407 if (nvif_unpack(args->v0, 0, 0, false)) { 337 if (nvif_unpack(args->v0, 0, 0, false)) {
408 nv_ioctl(object, "ntfy get vers %d index %d\n", 338 nvif_ioctl(object, "ntfy get vers %d index %d\n",
409 args->v0.version, args->v0.index); 339 args->v0.version, args->v0.index);
410 ret = nvkm_client_notify_get(client, args->v0.index); 340 ret = nvkm_client_notify_get(client, args->v0.index);
411 } 341 }
412 342
@@ -414,19 +344,18 @@ nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size)
414} 344}
415 345
416static int 346static int
417nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size) 347nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
418{ 348{
419 struct nvkm_client *client = nvkm_client(handle->object); 349 struct nvkm_client *client = object->client;
420 struct nvkm_object *object = handle->object;
421 union { 350 union {
422 struct nvif_ioctl_ntfy_put_v0 v0; 351 struct nvif_ioctl_ntfy_put_v0 v0;
423 } *args = data; 352 } *args = data;
424 int ret; 353 int ret;
425 354
426 nv_ioctl(object, "ntfy put size %d\n", size); 355 nvif_ioctl(object, "ntfy put size %d\n", size);
427 if (nvif_unpack(args->v0, 0, 0, false)) { 356 if (nvif_unpack(args->v0, 0, 0, false)) {
428 nv_ioctl(object, "ntfy put vers %d index %d\n", 357 nvif_ioctl(object, "ntfy put vers %d index %d\n",
429 args->v0.version, args->v0.index); 358 args->v0.version, args->v0.index);
430 ret = nvkm_client_notify_put(client, args->v0.index); 359 ret = nvkm_client_notify_put(client, args->v0.index);
431 } 360 }
432 361
@@ -435,7 +364,7 @@ nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size)
435 364
436static struct { 365static struct {
437 int version; 366 int version;
438 int (*func)(struct nvkm_handle *, void *, u32); 367 int (*func)(struct nvkm_object *, void *, u32);
439} 368}
440nvkm_ioctl_v0[] = { 369nvkm_ioctl_v0[] = {
441 { 0x00, nvkm_ioctl_nop }, 370 { 0x00, nvkm_ioctl_nop },
@@ -454,40 +383,31 @@ nvkm_ioctl_v0[] = {
454}; 383};
455 384
456static int 385static int
457nvkm_ioctl_path(struct nvkm_handle *parent, u32 type, u32 nr, u32 *path, 386nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
458 void *data, u32 size, u8 owner, u8 *route, u64 *token) 387 void *data, u32 size, u8 owner, u8 *route, u64 *token)
459{ 388{
460 struct nvkm_handle *handle = parent;
461 struct nvkm_namedb *namedb;
462 struct nvkm_object *object; 389 struct nvkm_object *object;
463 int ret; 390 int ret;
464 391
465 while ((object = parent->object), nr--) { 392 if (handle)
466 nv_ioctl(object, "path 0x%08x\n", path[nr]); 393 object = nvkm_client_search(client, handle);
467 if (!nv_iclass(object, NV_PARENT_CLASS)) { 394 else
468 nv_debug(object, "cannot have children (path)\n"); 395 object = &client->object;
469 return -EINVAL; 396 if (unlikely(!object)) {
470 } 397 nvif_ioctl(&client->object, "object not found\n");
471 398 return -ENOENT;
472 if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
473 !(handle = nvkm_namedb_get(namedb, path[nr]))) {
474 nv_debug(object, "handle 0x%08x not found\n", path[nr]);
475 return -ENOENT;
476 }
477 nvkm_namedb_put(handle);
478 parent = handle;
479 } 399 }
480 400
481 if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) { 401 if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
482 nv_ioctl(object, "object route != owner\n"); 402 nvif_ioctl(&client->object, "route != owner\n");
483 return -EACCES; 403 return -EACCES;
484 } 404 }
485 *route = handle->route; 405 *route = object->route;
486 *token = handle->token; 406 *token = object->token;
487 407
488 if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { 408 if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
489 if (nvkm_ioctl_v0[type].version == 0) 409 if (nvkm_ioctl_v0[type].version == 0)
490 ret = nvkm_ioctl_v0[type].func(handle, data, size); 410 ret = nvkm_ioctl_v0[type].func(object, data, size);
491 } 411 }
492 412
493 return ret; 413 return ret;
@@ -497,25 +417,26 @@ int
497nvkm_ioctl(struct nvkm_client *client, bool supervisor, 417nvkm_ioctl(struct nvkm_client *client, bool supervisor,
498 void *data, u32 size, void **hack) 418 void *data, u32 size, void **hack)
499{ 419{
420 struct nvkm_object *object = &client->object;
500 union { 421 union {
501 struct nvif_ioctl_v0 v0; 422 struct nvif_ioctl_v0 v0;
502 } *args = data; 423 } *args = data;
503 int ret; 424 int ret;
504 425
505 client->super = supervisor; 426 client->super = supervisor;
506 nv_ioctl(client, "size %d\n", size); 427 nvif_ioctl(object, "size %d\n", size);
507 428
508 if (nvif_unpack(args->v0, 0, 0, true)) { 429 if (nvif_unpack(args->v0, 0, 0, true)) {
509 nv_ioctl(client, "vers %d type %02x path %d owner %02x\n", 430 nvif_ioctl(object,
510 args->v0.version, args->v0.type, args->v0.path_nr, 431 "vers %d type %02x object %016llx owner %02x\n",
511 args->v0.owner); 432 args->v0.version, args->v0.type, args->v0.object,
512 ret = nvkm_ioctl_path(client->root, args->v0.type, 433 args->v0.owner);
513 args->v0.path_nr, args->v0.path, 434 ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
514 data, size, args->v0.owner, 435 data, size, args->v0.owner,
515 &args->v0.route, &args->v0.token); 436 &args->v0.route, &args->v0.token);
516 } 437 }
517 438
518 nv_ioctl(client, "return %d\n", ret); 439 nvif_ioctl(object, "return %d\n", ret);
519 if (hack) { 440 if (hack) {
520 *hack = client->data; 441 *hack = client->data;
521 client->data = NULL; 442 client->data = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
new file mode 100644
index 000000000000..8903c04c977e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include <core/memory.h>
25#include <subdev/instmem.h>
26
27void
28nvkm_memory_ctor(const struct nvkm_memory_func *func,
29 struct nvkm_memory *memory)
30{
31 memory->func = func;
32}
33
34void
35nvkm_memory_del(struct nvkm_memory **pmemory)
36{
37 struct nvkm_memory *memory = *pmemory;
38 if (memory && !WARN_ON(!memory->func)) {
39 if (memory->func->dtor)
40 *pmemory = memory->func->dtor(memory);
41 kfree(*pmemory);
42 *pmemory = NULL;
43 }
44}
45
46int
47nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
48 u64 size, u32 align, bool zero,
49 struct nvkm_memory **pmemory)
50{
51 struct nvkm_instmem *imem = device->imem;
52 struct nvkm_memory *memory;
53 int ret = -ENOSYS;
54
55 if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
56 return -ENOSYS;
57
58 ret = nvkm_instobj_new(imem, size, align, zero, &memory);
59 if (ret)
60 return ret;
61
62 *pmemory = memory;
63 return 0;
64}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 7f458dfd5608..09a1eee8fd33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -26,7 +26,7 @@
26#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ 26#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
27 list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry) 27 list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
28 28
29static void 29void
30nvkm_mm_dump(struct nvkm_mm *mm, const char *header) 30nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
31{ 31{
32 struct nvkm_mm_node *node; 32 struct nvkm_mm_node *node;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
deleted file mode 100644
index 6400767c5dba..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
+++ /dev/null
@@ -1,199 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <core/namedb.h>
25#include <core/gpuobj.h>
26#include <core/handle.h>
27
28static struct nvkm_handle *
29nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name)
30{
31 struct nvkm_handle *handle;
32
33 list_for_each_entry(handle, &namedb->list, node) {
34 if (handle->name == name)
35 return handle;
36 }
37
38 return NULL;
39}
40
41static struct nvkm_handle *
42nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, u16 oclass)
43{
44 struct nvkm_handle *handle;
45
46 list_for_each_entry(handle, &namedb->list, node) {
47 if (nv_mclass(handle->object) == oclass)
48 return handle;
49 }
50
51 return NULL;
52}
53
54static struct nvkm_handle *
55nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst)
56{
57 struct nvkm_handle *handle;
58
59 list_for_each_entry(handle, &namedb->list, node) {
60 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
61 if (nv_gpuobj(handle->object)->addr == vinst)
62 return handle;
63 }
64 }
65
66 return NULL;
67}
68
69static struct nvkm_handle *
70nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst)
71{
72 struct nvkm_handle *handle;
73
74 list_for_each_entry(handle, &namedb->list, node) {
75 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
76 if (nv_gpuobj(handle->object)->node &&
77 nv_gpuobj(handle->object)->node->offset == cinst)
78 return handle;
79 }
80 }
81
82 return NULL;
83}
84
85int
86nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name,
87 struct nvkm_object *object,
88 struct nvkm_handle *handle)
89{
90 int ret = -EEXIST;
91 write_lock_irq(&namedb->lock);
92 if (!nvkm_namedb_lookup(namedb, name)) {
93 nvkm_object_ref(object, &handle->object);
94 handle->namedb = namedb;
95 list_add(&handle->node, &namedb->list);
96 ret = 0;
97 }
98 write_unlock_irq(&namedb->lock);
99 return ret;
100}
101
102void
103nvkm_namedb_remove(struct nvkm_handle *handle)
104{
105 struct nvkm_namedb *namedb = handle->namedb;
106 struct nvkm_object *object = handle->object;
107 write_lock_irq(&namedb->lock);
108 list_del(&handle->node);
109 write_unlock_irq(&namedb->lock);
110 nvkm_object_ref(NULL, &object);
111}
112
113struct nvkm_handle *
114nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name)
115{
116 struct nvkm_handle *handle;
117 read_lock(&namedb->lock);
118 handle = nvkm_namedb_lookup(namedb, name);
119 if (handle == NULL)
120 read_unlock(&namedb->lock);
121 return handle;
122}
123
124struct nvkm_handle *
125nvkm_namedb_get_class(struct nvkm_namedb *namedb, u16 oclass)
126{
127 struct nvkm_handle *handle;
128 read_lock(&namedb->lock);
129 handle = nvkm_namedb_lookup_class(namedb, oclass);
130 if (handle == NULL)
131 read_unlock(&namedb->lock);
132 return handle;
133}
134
135struct nvkm_handle *
136nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst)
137{
138 struct nvkm_handle *handle;
139 read_lock(&namedb->lock);
140 handle = nvkm_namedb_lookup_vinst(namedb, vinst);
141 if (handle == NULL)
142 read_unlock(&namedb->lock);
143 return handle;
144}
145
146struct nvkm_handle *
147nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst)
148{
149 struct nvkm_handle *handle;
150 read_lock(&namedb->lock);
151 handle = nvkm_namedb_lookup_cinst(namedb, cinst);
152 if (handle == NULL)
153 read_unlock(&namedb->lock);
154 return handle;
155}
156
157void
158nvkm_namedb_put(struct nvkm_handle *handle)
159{
160 if (handle)
161 read_unlock(&handle->namedb->lock);
162}
163
164int
165nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
166 struct nvkm_oclass *oclass, u32 pclass,
167 struct nvkm_oclass *sclass, u64 engcls,
168 int length, void **pobject)
169{
170 struct nvkm_namedb *namedb;
171 int ret;
172
173 ret = nvkm_parent_create_(parent, engine, oclass, pclass |
174 NV_NAMEDB_CLASS, sclass, engcls,
175 length, pobject);
176 namedb = *pobject;
177 if (ret)
178 return ret;
179
180 rwlock_init(&namedb->lock);
181 INIT_LIST_HEAD(&namedb->list);
182 return 0;
183}
184
185int
186_nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
187 struct nvkm_oclass *oclass, void *data, u32 size,
188 struct nvkm_object **pobject)
189{
190 struct nvkm_namedb *object;
191 int ret;
192
193 ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
194 *pobject = nv_object(object);
195 if (ret)
196 return ret;
197
198 return 0;
199}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 979f3627d395..67aa7223dcd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -22,309 +22,243 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <core/object.h> 24#include <core/object.h>
25#include <core/client.h>
25#include <core/engine.h> 26#include <core/engine.h>
26 27
27#ifdef NVKM_OBJECT_MAGIC
28static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
29static DEFINE_SPINLOCK(_objlist_lock);
30#endif
31
32int 28int
33nvkm_object_create_(struct nvkm_object *parent, struct nvkm_object *engine, 29nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
34 struct nvkm_oclass *oclass, u32 pclass,
35 int size, void **pobject)
36{ 30{
37 struct nvkm_object *object; 31 if (likely(object->func->mthd))
38 32 return object->func->mthd(object, mthd, data, size);
39 object = *pobject = kzalloc(size, GFP_KERNEL); 33 return -ENODEV;
40 if (!object)
41 return -ENOMEM;
42
43 nvkm_object_ref(parent, &object->parent);
44 nvkm_object_ref(engine, (struct nvkm_object **)&object->engine);
45 object->oclass = oclass;
46 object->oclass->handle |= pclass;
47 atomic_set(&object->refcount, 1);
48 atomic_set(&object->usecount, 0);
49
50#ifdef NVKM_OBJECT_MAGIC
51 object->_magic = NVKM_OBJECT_MAGIC;
52 spin_lock(&_objlist_lock);
53 list_add(&object->list, &_objlist);
54 spin_unlock(&_objlist_lock);
55#endif
56 return 0;
57} 34}
58 35
59int 36int
60_nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 37nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
61 struct nvkm_oclass *oclass, void *data, u32 size, 38 struct nvkm_event **pevent)
62 struct nvkm_object **pobject)
63{ 39{
64 if (size != 0) 40 if (likely(object->func->ntfy))
65 return -ENOSYS; 41 return object->func->ntfy(object, mthd, pevent);
66 return nvkm_object_create(parent, engine, oclass, 0, pobject); 42 return -ENODEV;
67} 43}
68 44
69void 45int
70nvkm_object_destroy(struct nvkm_object *object) 46nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
71{ 47{
72#ifdef NVKM_OBJECT_MAGIC 48 if (likely(object->func->map))
73 spin_lock(&_objlist_lock); 49 return object->func->map(object, addr, size);
74 list_del(&object->list); 50 return -ENODEV;
75 spin_unlock(&_objlist_lock);
76#endif
77 nvkm_object_ref(NULL, (struct nvkm_object **)&object->engine);
78 nvkm_object_ref(NULL, &object->parent);
79 kfree(object);
80} 51}
81 52
82int 53int
83nvkm_object_init(struct nvkm_object *object) 54nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
84{ 55{
85 return 0; 56 if (likely(object->func->rd08))
57 return object->func->rd08(object, addr, data);
58 return -ENODEV;
86} 59}
87 60
88int 61int
89nvkm_object_fini(struct nvkm_object *object, bool suspend) 62nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
90{ 63{
91 return 0; 64 if (likely(object->func->rd16))
65 return object->func->rd16(object, addr, data);
66 return -ENODEV;
92} 67}
93 68
94struct nvkm_ofuncs
95nvkm_object_ofuncs = {
96 .ctor = _nvkm_object_ctor,
97 .dtor = nvkm_object_destroy,
98 .init = nvkm_object_init,
99 .fini = nvkm_object_fini,
100};
101
102int 69int
103nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 70nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
104 struct nvkm_oclass *oclass, void *data, u32 size,
105 struct nvkm_object **pobject)
106{ 71{
107 struct nvkm_ofuncs *ofuncs = oclass->ofuncs; 72 if (likely(object->func->rd32))
108 struct nvkm_object *object = NULL; 73 return object->func->rd32(object, addr, data);
109 int ret; 74 return -ENODEV;
110
111 ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
112 *pobject = object;
113 if (ret < 0) {
114 if (ret != -ENODEV) {
115 nv_error(parent, "failed to create 0x%08x, %d\n",
116 oclass->handle, ret);
117 }
118
119 if (object) {
120 ofuncs->dtor(object);
121 *pobject = NULL;
122 }
123
124 return ret;
125 }
126
127 if (ret == 0) {
128 nv_trace(object, "created\n");
129 atomic_set(&object->refcount, 1);
130 }
131
132 return 0;
133} 75}
134 76
135static void 77int
136nvkm_object_dtor(struct nvkm_object *object) 78nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
137{ 79{
138 nv_trace(object, "destroying\n"); 80 if (likely(object->func->wr08))
139 nv_ofuncs(object)->dtor(object); 81 return object->func->wr08(object, addr, data);
82 return -ENODEV;
140} 83}
141 84
142void 85int
143nvkm_object_ref(struct nvkm_object *obj, struct nvkm_object **ref) 86nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
144{ 87{
145 if (obj) { 88 if (likely(object->func->wr16))
146 atomic_inc(&obj->refcount); 89 return object->func->wr16(object, addr, data);
147 nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount)); 90 return -ENODEV;
148 } 91}
149 92
150 if (*ref) { 93int
151 int dead = atomic_dec_and_test(&(*ref)->refcount); 94nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
152 nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount)); 95{
153 if (dead) 96 if (likely(object->func->wr32))
154 nvkm_object_dtor(*ref); 97 return object->func->wr32(object, addr, data);
155 } 98 return -ENODEV;
99}
156 100
157 *ref = obj; 101int
102nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
103 int align, struct nvkm_gpuobj **pgpuobj)
104{
105 if (object->func->bind)
106 return object->func->bind(object, gpuobj, align, pgpuobj);
107 return -ENODEV;
158} 108}
159 109
160int 110int
161nvkm_object_inc(struct nvkm_object *object) 111nvkm_object_fini(struct nvkm_object *object, bool suspend)
162{ 112{
163 int ref = atomic_add_return(1, &object->usecount); 113 const char *action = suspend ? "suspend" : "fini";
114 struct nvkm_object *child;
115 s64 time;
164 int ret; 116 int ret;
165 117
166 nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount)); 118 nvif_debug(object, "%s children...\n", action);
167 if (ref != 1) 119 time = ktime_to_us(ktime_get());
168 return 0; 120 list_for_each_entry(child, &object->tree, head) {
169 121 ret = nvkm_object_fini(child, suspend);
170 nv_trace(object, "initialising...\n"); 122 if (ret && suspend)
171 if (object->parent) { 123 goto fail_child;
172 ret = nvkm_object_inc(object->parent);
173 if (ret) {
174 nv_error(object, "parent failed, %d\n", ret);
175 goto fail_parent;
176 }
177 } 124 }
178 125
179 if (object->engine) { 126 nvif_debug(object, "%s running...\n", action);
180 mutex_lock(&nv_subdev(object->engine)->mutex); 127 if (object->func->fini) {
181 ret = nvkm_object_inc(&object->engine->subdev.object); 128 ret = object->func->fini(object, suspend);
182 mutex_unlock(&nv_subdev(object->engine)->mutex);
183 if (ret) { 129 if (ret) {
184 nv_error(object, "engine failed, %d\n", ret); 130 nvif_error(object, "%s failed with %d\n", action, ret);
185 goto fail_engine; 131 if (suspend)
132 goto fail;
186 } 133 }
187 } 134 }
188 135
189 ret = nv_ofuncs(object)->init(object); 136 time = ktime_to_us(ktime_get()) - time;
190 atomic_set(&object->usecount, 1); 137 nvif_debug(object, "%s completed in %lldus\n", action, time);
191 if (ret) {
192 nv_error(object, "init failed, %d\n", ret);
193 goto fail_self;
194 }
195
196 nv_trace(object, "initialised\n");
197 return 0; 138 return 0;
198 139
199fail_self: 140fail:
200 if (object->engine) { 141 if (object->func->init) {
201 mutex_lock(&nv_subdev(object->engine)->mutex); 142 int rret = object->func->init(object);
202 nvkm_object_dec(&object->engine->subdev.object, false); 143 if (rret)
203 mutex_unlock(&nv_subdev(object->engine)->mutex); 144 nvif_fatal(object, "failed to restart, %d\n", rret);
145 }
146fail_child:
147 list_for_each_entry_continue_reverse(child, &object->tree, head) {
148 nvkm_object_init(child);
204 } 149 }
205fail_engine:
206 if (object->parent)
207 nvkm_object_dec(object->parent, false);
208fail_parent:
209 atomic_dec(&object->usecount);
210 return ret; 150 return ret;
211} 151}
212 152
213static int 153int
214nvkm_object_decf(struct nvkm_object *object) 154nvkm_object_init(struct nvkm_object *object)
215{ 155{
156 struct nvkm_object *child;
157 s64 time;
216 int ret; 158 int ret;
217 159
218 nv_trace(object, "stopping...\n"); 160 nvif_debug(object, "init running...\n");
219 161 time = ktime_to_us(ktime_get());
220 ret = nv_ofuncs(object)->fini(object, false); 162 if (object->func->init) {
221 atomic_set(&object->usecount, 0); 163 ret = object->func->init(object);
222 if (ret) 164 if (ret)
223 nv_warn(object, "failed fini, %d\n", ret); 165 goto fail;
224
225 if (object->engine) {
226 mutex_lock(&nv_subdev(object->engine)->mutex);
227 nvkm_object_dec(&object->engine->subdev.object, false);
228 mutex_unlock(&nv_subdev(object->engine)->mutex);
229 } 166 }
230 167
231 if (object->parent) 168 nvif_debug(object, "init children...\n");
232 nvkm_object_dec(object->parent, false); 169 list_for_each_entry(child, &object->tree, head) {
170 ret = nvkm_object_init(child);
171 if (ret)
172 goto fail_child;
173 }
233 174
234 nv_trace(object, "stopped\n"); 175 time = ktime_to_us(ktime_get()) - time;
176 nvif_debug(object, "init completed in %lldus\n", time);
235 return 0; 177 return 0;
178
179fail_child:
180 list_for_each_entry_continue_reverse(child, &object->tree, head)
181 nvkm_object_fini(child, false);
182fail:
183 nvif_error(object, "init failed with %d\n", ret);
184 if (object->func->fini)
185 object->func->fini(object, false);
186 return ret;
236} 187}
237 188
238static int 189void *
239nvkm_object_decs(struct nvkm_object *object) 190nvkm_object_dtor(struct nvkm_object *object)
240{ 191{
241 int ret, rret; 192 struct nvkm_object *child, *ctemp;
242 193 void *data = object;
243 nv_trace(object, "suspending...\n"); 194 s64 time;
244 195
245 ret = nv_ofuncs(object)->fini(object, true); 196 nvif_debug(object, "destroy children...\n");
246 atomic_set(&object->usecount, 0); 197 time = ktime_to_us(ktime_get());
247 if (ret) { 198 list_for_each_entry_safe(child, ctemp, &object->tree, head) {
248 nv_error(object, "failed suspend, %d\n", ret); 199 nvkm_object_del(&child);
249 return ret;
250 } 200 }
251 201
252 if (object->engine) { 202 nvif_debug(object, "destroy running...\n");
253 mutex_lock(&nv_subdev(object->engine)->mutex); 203 if (object->func->dtor)
254 ret = nvkm_object_dec(&object->engine->subdev.object, true); 204 data = object->func->dtor(object);
255 mutex_unlock(&nv_subdev(object->engine)->mutex); 205 nvkm_engine_unref(&object->engine);
256 if (ret) { 206 time = ktime_to_us(ktime_get()) - time;
257 nv_warn(object, "engine failed suspend, %d\n", ret); 207 nvif_debug(object, "destroy completed in %lldus...\n", time);
258 goto fail_engine; 208 return data;
259 } 209}
260 }
261
262 if (object->parent) {
263 ret = nvkm_object_dec(object->parent, true);
264 if (ret) {
265 nv_warn(object, "parent failed suspend, %d\n", ret);
266 goto fail_parent;
267 }
268 }
269
270 nv_trace(object, "suspended\n");
271 return 0;
272 210
273fail_parent: 211void
274 if (object->engine) { 212nvkm_object_del(struct nvkm_object **pobject)
275 mutex_lock(&nv_subdev(object->engine)->mutex); 213{
276 rret = nvkm_object_inc(&object->engine->subdev.object); 214 struct nvkm_object *object = *pobject;
277 mutex_unlock(&nv_subdev(object->engine)->mutex); 215 if (object && !WARN_ON(!object->func)) {
278 if (rret) 216 *pobject = nvkm_object_dtor(object);
279 nv_fatal(object, "engine failed to reinit, %d\n", rret); 217 nvkm_client_remove(object->client, object);
218 list_del(&object->head);
219 kfree(*pobject);
220 *pobject = NULL;
280 } 221 }
222}
281 223
282fail_engine: 224void
283 rret = nv_ofuncs(object)->init(object); 225nvkm_object_ctor(const struct nvkm_object_func *func,
284 if (rret) 226 const struct nvkm_oclass *oclass, struct nvkm_object *object)
285 nv_fatal(object, "failed to reinit, %d\n", rret); 227{
286 228 object->func = func;
287 return ret; 229 object->client = oclass->client;
230 object->engine = nvkm_engine_ref(oclass->engine);
231 object->oclass = oclass->base.oclass;
232 object->handle = oclass->handle;
233 INIT_LIST_HEAD(&object->head);
234 INIT_LIST_HEAD(&object->tree);
235 RB_CLEAR_NODE(&object->node);
236 WARN_ON(oclass->engine && !object->engine);
288} 237}
289 238
290int 239int
291nvkm_object_dec(struct nvkm_object *object, bool suspend) 240nvkm_object_new_(const struct nvkm_object_func *func,
241 const struct nvkm_oclass *oclass, void *data, u32 size,
242 struct nvkm_object **pobject)
292{ 243{
293 int ref = atomic_add_return(-1, &object->usecount); 244 if (size == 0) {
294 int ret; 245 if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL)))
295 246 return -ENOMEM;
296 nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount)); 247 nvkm_object_ctor(func, oclass, *pobject);
297 248 return 0;
298 if (ref == 0) {
299 if (suspend)
300 ret = nvkm_object_decs(object);
301 else
302 ret = nvkm_object_decf(object);
303
304 if (ret) {
305 atomic_inc(&object->usecount);
306 return ret;
307 }
308 } 249 }
309 250 return -ENOSYS;
310 return 0;
311} 251}
312 252
313void 253static const struct nvkm_object_func
314nvkm_object_debug(void) 254nvkm_object_func = {
255};
256
257int
258nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size,
259 struct nvkm_object **pobject)
315{ 260{
316#ifdef NVKM_OBJECT_MAGIC 261 const struct nvkm_object_func *func =
317 struct nvkm_object *object; 262 oclass->base.func ? oclass->base.func : &nvkm_object_func;
318 if (!list_empty(&_objlist)) { 263 return nvkm_object_new_(func, oclass, data, size, pobject);
319 nv_fatal(NULL, "*******************************************\n");
320 nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
321 nv_fatal(NULL, "*******************************************\n");
322 list_for_each_entry(object, &_objlist, list) {
323 nv_fatal(object, "%p/%p/%d/%d\n",
324 object->parent, object->engine,
325 atomic_read(&object->refcount),
326 atomic_read(&object->usecount));
327 }
328 }
329#endif
330} 264}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
new file mode 100644
index 000000000000..e31a0479add0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -0,0 +1,200 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include <core/oproxy.h>
25
26static int
27nvkm_oproxy_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
28{
29 return nvkm_object_mthd(nvkm_oproxy(object)->object, mthd, data, size);
30}
31
32static int
33nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
34 struct nvkm_event **pevent)
35{
36 return nvkm_object_ntfy(nvkm_oproxy(object)->object, mthd, pevent);
37}
38
39static int
40nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
41{
42 return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
43}
44
45static int
46nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
47{
48 return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
49}
50
51static int
52nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
53{
54 return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
55}
56
57static int
58nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
59{
60 return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
61}
62
63static int
64nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
65{
66 return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
67}
68
69static int
70nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
71{
72 return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
73}
74
75static int
76nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
77{
78 return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
79}
80
81static int
82nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
83 int align, struct nvkm_gpuobj **pgpuobj)
84{
85 return nvkm_object_bind(nvkm_oproxy(object)->object,
86 parent, align, pgpuobj);
87}
88
89static int
90nvkm_oproxy_sclass(struct nvkm_object *object, int index,
91 struct nvkm_oclass *oclass)
92{
93 struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
94 oclass->parent = oproxy->object;
95 if (!oproxy->object->func->sclass)
96 return -ENODEV;
97 return oproxy->object->func->sclass(oproxy->object, index, oclass);
98}
99
100static int
101nvkm_oproxy_fini(struct nvkm_object *object, bool suspend)
102{
103 struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
104 int ret;
105
106 if (oproxy->func->fini[0]) {
107 ret = oproxy->func->fini[0](oproxy, suspend);
108 if (ret && suspend)
109 return ret;
110 }
111
112 if (oproxy->object->func->fini) {
113 ret = oproxy->object->func->fini(oproxy->object, suspend);
114 if (ret && suspend)
115 return ret;
116 }
117
118 if (oproxy->func->fini[1]) {
119 ret = oproxy->func->fini[1](oproxy, suspend);
120 if (ret && suspend)
121 return ret;
122 }
123
124 return 0;
125}
126
127static int
128nvkm_oproxy_init(struct nvkm_object *object)
129{
130 struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
131 int ret;
132
133 if (oproxy->func->init[0]) {
134 ret = oproxy->func->init[0](oproxy);
135 if (ret)
136 return ret;
137 }
138
139 if (oproxy->object->func->init) {
140 ret = oproxy->object->func->init(oproxy->object);
141 if (ret)
142 return ret;
143 }
144
145 if (oproxy->func->init[1]) {
146 ret = oproxy->func->init[1](oproxy);
147 if (ret)
148 return ret;
149 }
150
151 return 0;
152}
153
154static void *
155nvkm_oproxy_dtor(struct nvkm_object *object)
156{
157 struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
158 if (oproxy->func->dtor[0])
159 oproxy->func->dtor[0](oproxy);
160 nvkm_object_del(&oproxy->object);
161 if (oproxy->func->dtor[1])
162 oproxy->func->dtor[1](oproxy);
163 return oproxy;
164}
165
166static const struct nvkm_object_func
167nvkm_oproxy_func = {
168 .dtor = nvkm_oproxy_dtor,
169 .init = nvkm_oproxy_init,
170 .fini = nvkm_oproxy_fini,
171 .mthd = nvkm_oproxy_mthd,
172 .ntfy = nvkm_oproxy_ntfy,
173 .map = nvkm_oproxy_map,
174 .rd08 = nvkm_oproxy_rd08,
175 .rd16 = nvkm_oproxy_rd16,
176 .rd32 = nvkm_oproxy_rd32,
177 .wr08 = nvkm_oproxy_wr08,
178 .wr16 = nvkm_oproxy_wr16,
179 .wr32 = nvkm_oproxy_wr32,
180 .bind = nvkm_oproxy_bind,
181 .sclass = nvkm_oproxy_sclass,
182};
183
184void
185nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func,
186 const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy)
187{
188 nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base);
189 oproxy->func = func;
190}
191
192int
193nvkm_oproxy_new_(const struct nvkm_oproxy_func *func,
194 const struct nvkm_oclass *oclass, struct nvkm_oproxy **poproxy)
195{
196 if (!(*poproxy = kzalloc(sizeof(**poproxy), GFP_KERNEL)))
197 return -ENOMEM;
198 nvkm_oproxy_ctor(func, oclass, *poproxy);
199 return 0;
200}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/option.c b/drivers/gpu/drm/nouveau/nvkm/core/option.c
index 19d153f8c8fd..3e62cf8cde08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/option.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/option.c
@@ -73,6 +73,24 @@ nvkm_boolopt(const char *optstr, const char *opt, bool value)
73 return value; 73 return value;
74} 74}
75 75
76long
77nvkm_longopt(const char *optstr, const char *opt, long value)
78{
79 long result = value;
80 int arglen;
81 char *s;
82
83 optstr = nvkm_stropt(optstr, opt, &arglen);
84 if (optstr && (s = kstrndup(optstr, arglen, GFP_KERNEL))) {
85 int ret = kstrtol(s, 0, &value);
86 if (ret == 0)
87 result = value;
88 kfree(s);
89 }
90
91 return result;
92}
93
76int 94int
77nvkm_dbgopt(const char *optstr, const char *sub) 95nvkm_dbgopt(const char *optstr, const char *sub)
78{ 96{
@@ -95,7 +113,7 @@ nvkm_dbgopt(const char *optstr, const char *sub)
95 else if (!strncasecmpz(optstr, "warn", len)) 113 else if (!strncasecmpz(optstr, "warn", len))
96 level = NV_DBG_WARN; 114 level = NV_DBG_WARN;
97 else if (!strncasecmpz(optstr, "info", len)) 115 else if (!strncasecmpz(optstr, "info", len))
98 level = NV_DBG_INFO_NORMAL; 116 level = NV_DBG_INFO;
99 else if (!strncasecmpz(optstr, "debug", len)) 117 else if (!strncasecmpz(optstr, "debug", len))
100 level = NV_DBG_DEBUG; 118 level = NV_DBG_DEBUG;
101 else if (!strncasecmpz(optstr, "trace", len)) 119 else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/parent.c b/drivers/gpu/drm/nouveau/nvkm/core/parent.c
deleted file mode 100644
index dd56cd1eeb38..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/parent.c
+++ /dev/null
@@ -1,159 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <core/parent.h>
25#include <core/client.h>
26#include <core/engine.h>
27
28int
29nvkm_parent_sclass(struct nvkm_object *parent, u16 handle,
30 struct nvkm_object **pengine,
31 struct nvkm_oclass **poclass)
32{
33 struct nvkm_sclass *sclass;
34 struct nvkm_engine *engine;
35 struct nvkm_oclass *oclass;
36 u64 mask;
37
38 sclass = nv_parent(parent)->sclass;
39 while (sclass) {
40 if ((sclass->oclass->handle & 0xffff) == handle) {
41 *pengine = &parent->engine->subdev.object;
42 *poclass = sclass->oclass;
43 return 0;
44 }
45
46 sclass = sclass->sclass;
47 }
48
49 mask = nv_parent(parent)->engine;
50 while (mask) {
51 int i = __ffs64(mask);
52
53 if (nv_iclass(parent, NV_CLIENT_CLASS))
54 engine = nv_engine(nv_client(parent)->device);
55 else
56 engine = nvkm_engine(parent, i);
57
58 if (engine) {
59 oclass = engine->sclass;
60 while (oclass->ofuncs) {
61 if ((oclass->handle & 0xffff) == handle) {
62 *pengine = nv_object(engine);
63 *poclass = oclass;
64 return 0;
65 }
66 oclass++;
67 }
68 }
69
70 mask &= ~(1ULL << i);
71 }
72
73 return -EINVAL;
74}
75
76int
77nvkm_parent_lclass(struct nvkm_object *parent, u32 *lclass, int size)
78{
79 struct nvkm_sclass *sclass;
80 struct nvkm_engine *engine;
81 struct nvkm_oclass *oclass;
82 int nr = -1, i;
83 u64 mask;
84
85 sclass = nv_parent(parent)->sclass;
86 while (sclass) {
87 if (++nr < size)
88 lclass[nr] = sclass->oclass->handle & 0xffff;
89 sclass = sclass->sclass;
90 }
91
92 mask = nv_parent(parent)->engine;
93 while (i = __ffs64(mask), mask) {
94 engine = nvkm_engine(parent, i);
95 if (engine && (oclass = engine->sclass)) {
96 while (oclass->ofuncs) {
97 if (++nr < size)
98 lclass[nr] = oclass->handle & 0xffff;
99 oclass++;
100 }
101 }
102
103 mask &= ~(1ULL << i);
104 }
105
106 return nr + 1;
107}
108
109int
110nvkm_parent_create_(struct nvkm_object *parent, struct nvkm_object *engine,
111 struct nvkm_oclass *oclass, u32 pclass,
112 struct nvkm_oclass *sclass, u64 engcls,
113 int size, void **pobject)
114{
115 struct nvkm_parent *object;
116 struct nvkm_sclass *nclass;
117 int ret;
118
119 ret = nvkm_object_create_(parent, engine, oclass, pclass |
120 NV_PARENT_CLASS, size, pobject);
121 object = *pobject;
122 if (ret)
123 return ret;
124
125 while (sclass && sclass->ofuncs) {
126 nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
127 if (!nclass)
128 return -ENOMEM;
129
130 nclass->sclass = object->sclass;
131 object->sclass = nclass;
132 nclass->engine = engine ? nv_engine(engine) : NULL;
133 nclass->oclass = sclass;
134 sclass++;
135 }
136
137 object->engine = engcls;
138 return 0;
139}
140
141void
142nvkm_parent_destroy(struct nvkm_parent *parent)
143{
144 struct nvkm_sclass *sclass;
145
146 while ((sclass = parent->sclass)) {
147 parent->sclass = sclass->sclass;
148 kfree(sclass);
149 }
150
151 nvkm_object_destroy(&parent->object);
152}
153
154
155void
156_nvkm_parent_dtor(struct nvkm_object *object)
157{
158 nvkm_parent_destroy(nv_parent(object));
159}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/printk.c b/drivers/gpu/drm/nouveau/nvkm/core/printk.c
deleted file mode 100644
index 4a220eb91660..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/printk.c
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <core/printk.h>
25#include <core/client.h>
26#include <core/device.h>
27
28int nv_info_debug_level = NV_DBG_INFO_NORMAL;
29
30void
31nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...)
32{
33 static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
34 const char *pfx;
35 char mfmt[256];
36 va_list args;
37
38 switch (level) {
39 case NV_DBG_FATAL:
40 pfx = KERN_CRIT;
41 break;
42 case NV_DBG_ERROR:
43 pfx = KERN_ERR;
44 break;
45 case NV_DBG_WARN:
46 pfx = KERN_WARNING;
47 break;
48 case NV_DBG_INFO_NORMAL:
49 pfx = KERN_INFO;
50 break;
51 case NV_DBG_DEBUG:
52 case NV_DBG_PARANOIA:
53 case NV_DBG_TRACE:
54 case NV_DBG_SPAM:
55 default:
56 pfx = KERN_DEBUG;
57 break;
58 }
59
60 if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
61 struct nvkm_object *device;
62 struct nvkm_object *subdev;
63 char obuf[64], *ofmt = "";
64
65 if (object->engine == NULL) {
66 subdev = object;
67 while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS))
68 subdev = subdev->parent;
69 } else {
70 subdev = &object->engine->subdev.object;
71 }
72
73 device = subdev;
74 if (device->parent)
75 device = device->parent;
76
77 if (object != subdev) {
78 snprintf(obuf, sizeof(obuf), "[0x%08x]",
79 nv_hclass(object));
80 ofmt = obuf;
81 }
82
83 if (level > nv_subdev(subdev)->debug)
84 return;
85
86 snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
87 name[level], nv_subdev(subdev)->name,
88 nv_device(device)->name, ofmt, fmt);
89 } else
90 if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
91 if (level > nv_client(object)->debug)
92 return;
93
94 snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
95 name[level], nv_client(object)->name, fmt);
96 } else {
97 snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
98 }
99
100 va_start(args, fmt);
101 vprintk(mfmt, args);
102 va_end(args);
103}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index ebd4d15479bd..3216e157a8a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -22,8 +22,6 @@
22#include <core/ramht.h> 22#include <core/ramht.h>
23#include <core/engine.h> 23#include <core/engine.h>
24 24
25#include <subdev/bar.h>
26
27static u32 25static u32
28nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle) 26nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
29{ 27{
@@ -35,72 +33,130 @@ nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
35 } 33 }
36 34
37 hash ^= chid << (ramht->bits - 4); 35 hash ^= chid << (ramht->bits - 4);
38 hash = hash << 3;
39 return hash; 36 return hash;
40} 37}
41 38
42int 39struct nvkm_gpuobj *
43nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context) 40nvkm_ramht_search(struct nvkm_ramht *ramht, int chid, u32 handle)
44{ 41{
45 struct nvkm_bar *bar = nvkm_bar(ramht);
46 u32 co, ho; 42 u32 co, ho;
47 43
48 co = ho = nvkm_ramht_hash(ramht, chid, handle); 44 co = ho = nvkm_ramht_hash(ramht, chid, handle);
49 do { 45 do {
50 if (!nv_ro32(ramht, co + 4)) { 46 if (ramht->data[co].chid == chid) {
51 nv_wo32(ramht, co + 0, handle); 47 if (ramht->data[co].handle == handle)
52 nv_wo32(ramht, co + 4, context); 48 return ramht->data[co].inst;
53 if (bar)
54 bar->flush(bar);
55 return co;
56 } 49 }
57 50
58 co += 8; 51 if (++co >= ramht->size)
59 if (co >= nv_gpuobj(ramht)->size)
60 co = 0; 52 co = 0;
61 } while (co != ho); 53 } while (co != ho);
62 54
63 return -ENOMEM; 55 return NULL;
56}
57
58static int
59nvkm_ramht_update(struct nvkm_ramht *ramht, int co, struct nvkm_object *object,
60 int chid, int addr, u32 handle, u32 context)
61{
62 struct nvkm_ramht_data *data = &ramht->data[co];
63 u64 inst = 0x00000040; /* just non-zero for <=g8x fifo ramht */
64 int ret;
65
66 nvkm_gpuobj_del(&data->inst);
67 data->chid = chid;
68 data->handle = handle;
69
70 if (object) {
71 ret = nvkm_object_bind(object, ramht->parent, 16, &data->inst);
72 if (ret) {
73 if (ret != -ENODEV) {
74 data->chid = -1;
75 return ret;
76 }
77 data->inst = NULL;
78 }
79
80 if (data->inst) {
81 if (ramht->device->card_type >= NV_50)
82 inst = data->inst->node->offset;
83 else
84 inst = data->inst->addr;
85 }
86
87 if (addr < 0) context |= inst << -addr;
88 else context |= inst >> addr;
89 }
90
91 nvkm_kmap(ramht->gpuobj);
92 nvkm_wo32(ramht->gpuobj, (co << 3) + 0, handle);
93 nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context);
94 nvkm_done(ramht->gpuobj);
95 return co + 1;
64} 96}
65 97
66void 98void
67nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie) 99nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie)
68{ 100{
69 struct nvkm_bar *bar = nvkm_bar(ramht); 101 if (--cookie >= 0)
70 nv_wo32(ramht, cookie + 0, 0x00000000); 102 nvkm_ramht_update(ramht, cookie, NULL, -1, 0, 0, 0);
71 nv_wo32(ramht, cookie + 4, 0x00000000); 103}
72 if (bar) 104
73 bar->flush(bar); 105int
106nvkm_ramht_insert(struct nvkm_ramht *ramht, struct nvkm_object *object,
107 int chid, int addr, u32 handle, u32 context)
108{
109 u32 co, ho;
110
111 if (nvkm_ramht_search(ramht, chid, handle))
112 return -EEXIST;
113
114 co = ho = nvkm_ramht_hash(ramht, chid, handle);
115 do {
116 if (ramht->data[co].chid < 0) {
117 return nvkm_ramht_update(ramht, co, object, chid,
118 addr, handle, context);
119 }
120
121 if (++co >= ramht->size)
122 co = 0;
123 } while (co != ho);
124
125 return -ENOSPC;
74} 126}
75 127
76static struct nvkm_oclass 128void
77nvkm_ramht_oclass = { 129nvkm_ramht_del(struct nvkm_ramht **pramht)
78 .handle = 0x0000abcd, 130{
79 .ofuncs = &(struct nvkm_ofuncs) { 131 struct nvkm_ramht *ramht = *pramht;
80 .ctor = NULL, 132 if (ramht) {
81 .dtor = _nvkm_gpuobj_dtor, 133 nvkm_gpuobj_del(&ramht->gpuobj);
82 .init = _nvkm_gpuobj_init, 134 kfree(*pramht);
83 .fini = _nvkm_gpuobj_fini, 135 *pramht = NULL;
84 .rd32 = _nvkm_gpuobj_rd32, 136 }
85 .wr32 = _nvkm_gpuobj_wr32, 137}
86 },
87};
88 138
89int 139int
90nvkm_ramht_new(struct nvkm_object *parent, struct nvkm_object *pargpu, 140nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
91 u32 size, u32 align, struct nvkm_ramht **pramht) 141 struct nvkm_gpuobj *parent, struct nvkm_ramht **pramht)
92{ 142{
93 struct nvkm_ramht *ramht; 143 struct nvkm_ramht *ramht;
94 int ret; 144 int ret, i;
95 145
96 ret = nvkm_gpuobj_create(parent, parent->engine ? 146 if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
97 &parent->engine->subdev.object : parent, /* <nv50 ramht */ 147 sizeof(*ramht->data), GFP_KERNEL)))
98 &nvkm_ramht_oclass, 0, pargpu, size, 148 return -ENOMEM;
99 align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
100 *pramht = ramht;
101 if (ret)
102 return ret;
103 149
104 ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3); 150 ramht->device = device;
105 return 0; 151 ramht->parent = parent;
152 ramht->size = size >> 3;
153 ramht->bits = order_base_2(ramht->size);
154 for (i = 0; i < ramht->size; i++)
155 ramht->data[i].chid = -1;
156
157 ret = nvkm_gpuobj_new(ramht->device, size, align, true,
158 ramht->parent, &ramht->gpuobj);
159 if (ret)
160 nvkm_ramht_del(pramht);
161 return ret;
106} 162}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index c5fb3a793174..7de98470a2a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -25,96 +25,178 @@
25#include <core/device.h> 25#include <core/device.h>
26#include <core/option.h> 26#include <core/option.h>
27 27
28struct nvkm_subdev * 28static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
29nvkm_subdev(void *obj, int idx) 29
30{ 30const char *
31 struct nvkm_object *object = nv_object(obj); 31nvkm_subdev_name[NVKM_SUBDEV_NR] = {
32 while (object && !nv_iclass(object, NV_SUBDEV_CLASS)) 32 [NVKM_SUBDEV_BAR ] = "bar",
33 object = object->parent; 33 [NVKM_SUBDEV_VBIOS ] = "bios",
34 if (object == NULL || nv_subidx(nv_subdev(object)) != idx) 34 [NVKM_SUBDEV_BUS ] = "bus",
35 object = nv_device(obj)->subdev[idx]; 35 [NVKM_SUBDEV_CLK ] = "clk",
36 return object ? nv_subdev(object) : NULL; 36 [NVKM_SUBDEV_DEVINIT] = "devinit",
37} 37 [NVKM_SUBDEV_FB ] = "fb",
38 [NVKM_SUBDEV_FUSE ] = "fuse",
39 [NVKM_SUBDEV_GPIO ] = "gpio",
40 [NVKM_SUBDEV_I2C ] = "i2c",
41 [NVKM_SUBDEV_IBUS ] = "priv",
42 [NVKM_SUBDEV_INSTMEM] = "imem",
43 [NVKM_SUBDEV_LTC ] = "ltc",
44 [NVKM_SUBDEV_MC ] = "mc",
45 [NVKM_SUBDEV_MMU ] = "mmu",
46 [NVKM_SUBDEV_MXM ] = "mxm",
47 [NVKM_SUBDEV_PCI ] = "pci",
48 [NVKM_SUBDEV_PMU ] = "pmu",
49 [NVKM_SUBDEV_THERM ] = "therm",
50 [NVKM_SUBDEV_TIMER ] = "tmr",
51 [NVKM_SUBDEV_VOLT ] = "volt",
52 [NVKM_ENGINE_BSP ] = "bsp",
53 [NVKM_ENGINE_CE0 ] = "ce0",
54 [NVKM_ENGINE_CE1 ] = "ce1",
55 [NVKM_ENGINE_CE2 ] = "ce2",
56 [NVKM_ENGINE_CIPHER ] = "cipher",
57 [NVKM_ENGINE_DISP ] = "disp",
58 [NVKM_ENGINE_DMAOBJ ] = "dma",
59 [NVKM_ENGINE_FIFO ] = "fifo",
60 [NVKM_ENGINE_GR ] = "gr",
61 [NVKM_ENGINE_IFB ] = "ifb",
62 [NVKM_ENGINE_ME ] = "me",
63 [NVKM_ENGINE_MPEG ] = "mpeg",
64 [NVKM_ENGINE_MSENC ] = "msenc",
65 [NVKM_ENGINE_MSPDEC ] = "mspdec",
66 [NVKM_ENGINE_MSPPP ] = "msppp",
67 [NVKM_ENGINE_MSVLD ] = "msvld",
68 [NVKM_ENGINE_PM ] = "pm",
69 [NVKM_ENGINE_SEC ] = "sec",
70 [NVKM_ENGINE_SW ] = "sw",
71 [NVKM_ENGINE_VIC ] = "vic",
72 [NVKM_ENGINE_VP ] = "vp",
73};
38 74
39void 75void
40nvkm_subdev_reset(struct nvkm_object *subdev) 76nvkm_subdev_intr(struct nvkm_subdev *subdev)
41{ 77{
42 nv_trace(subdev, "resetting...\n"); 78 if (subdev->func->intr)
43 nv_ofuncs(subdev)->fini(subdev, false); 79 subdev->func->intr(subdev);
44 nv_debug(subdev, "reset\n");
45} 80}
46 81
47int 82int
48nvkm_subdev_init(struct nvkm_subdev *subdev) 83nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
49{ 84{
50 int ret = nvkm_object_init(&subdev->object); 85 struct nvkm_device *device = subdev->device;
51 if (ret) 86 const char *action = suspend ? "suspend" : "fini";
52 return ret; 87 u32 pmc_enable = subdev->pmc_enable;
88 s64 time;
53 89
54 nvkm_subdev_reset(&subdev->object); 90 nvkm_trace(subdev, "%s running...\n", action);
55 return 0; 91 time = ktime_to_us(ktime_get());
56}
57 92
58int 93 if (subdev->func->fini) {
59_nvkm_subdev_init(struct nvkm_object *object) 94 int ret = subdev->func->fini(subdev, suspend);
60{ 95 if (ret) {
61 return nvkm_subdev_init(nv_subdev(object)); 96 nvkm_error(subdev, "%s failed, %d\n", action, ret);
62} 97 if (suspend)
98 return ret;
99 }
100 }
63 101
64int 102 if (pmc_enable) {
65nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) 103 nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
66{ 104 nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
67 if (subdev->unit) { 105 nvkm_rd32(device, 0x000200);
68 nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
69 nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
70 } 106 }
71 107
72 return nvkm_object_fini(&subdev->object, suspend); 108 time = ktime_to_us(ktime_get()) - time;
109 nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
110 return 0;
73} 111}
74 112
75int 113int
76_nvkm_subdev_fini(struct nvkm_object *object, bool suspend) 114nvkm_subdev_preinit(struct nvkm_subdev *subdev)
77{ 115{
78 return nvkm_subdev_fini(nv_subdev(object), suspend); 116 s64 time;
79}
80 117
81void 118 nvkm_trace(subdev, "preinit running...\n");
82nvkm_subdev_destroy(struct nvkm_subdev *subdev) 119 time = ktime_to_us(ktime_get());
83{
84 int subidx = nv_hclass(subdev) & 0xff;
85 nv_device(subdev)->subdev[subidx] = NULL;
86 nvkm_object_destroy(&subdev->object);
87}
88 120
89void 121 if (subdev->func->preinit) {
90_nvkm_subdev_dtor(struct nvkm_object *object) 122 int ret = subdev->func->preinit(subdev);
91{ 123 if (ret) {
92 nvkm_subdev_destroy(nv_subdev(object)); 124 nvkm_error(subdev, "preinit failed, %d\n", ret);
125 return ret;
126 }
127 }
128
129 time = ktime_to_us(ktime_get()) - time;
130 nvkm_trace(subdev, "preinit completed in %lldus\n", time);
131 return 0;
93} 132}
94 133
95int 134int
96nvkm_subdev_create_(struct nvkm_object *parent, struct nvkm_object *engine, 135nvkm_subdev_init(struct nvkm_subdev *subdev)
97 struct nvkm_oclass *oclass, u32 pclass,
98 const char *subname, const char *sysname,
99 int size, void **pobject)
100{ 136{
101 struct nvkm_subdev *subdev; 137 s64 time;
102 int ret; 138 int ret;
103 139
104 ret = nvkm_object_create_(parent, engine, oclass, pclass | 140 nvkm_trace(subdev, "init running...\n");
105 NV_SUBDEV_CLASS, size, pobject); 141 time = ktime_to_us(ktime_get());
106 subdev = *pobject; 142
107 if (ret) 143 if (subdev->func->oneinit && !subdev->oneinit) {
108 return ret; 144 s64 time;
145 nvkm_trace(subdev, "one-time init running...\n");
146 time = ktime_to_us(ktime_get());
147 ret = subdev->func->oneinit(subdev);
148 if (ret) {
149 nvkm_error(subdev, "one-time init failed, %d\n", ret);
150 return ret;
151 }
109 152
110 __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key); 153 subdev->oneinit = true;
111 subdev->name = subname; 154 time = ktime_to_us(ktime_get()) - time;
155 nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
156 }
112 157
113 if (parent) { 158 if (subdev->func->init) {
114 struct nvkm_device *device = nv_device(parent); 159 ret = subdev->func->init(subdev);
115 subdev->debug = nvkm_dbgopt(device->dbgopt, subname); 160 if (ret) {
116 subdev->mmio = nv_subdev(device)->mmio; 161 nvkm_error(subdev, "init failed, %d\n", ret);
162 return ret;
163 }
117 } 164 }
118 165
166 time = ktime_to_us(ktime_get()) - time;
167 nvkm_trace(subdev, "init completed in %lldus\n", time);
119 return 0; 168 return 0;
120} 169}
170
171void
172nvkm_subdev_del(struct nvkm_subdev **psubdev)
173{
174 struct nvkm_subdev *subdev = *psubdev;
175 s64 time;
176
177 if (subdev && !WARN_ON(!subdev->func)) {
178 nvkm_trace(subdev, "destroy running...\n");
179 time = ktime_to_us(ktime_get());
180 if (subdev->func->dtor)
181 *psubdev = subdev->func->dtor(subdev);
182 time = ktime_to_us(ktime_get()) - time;
183 nvkm_trace(subdev, "destroy completed in %lldus\n", time);
184 kfree(*psubdev);
185 *psubdev = NULL;
186 }
187}
188
189void
190nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
191 struct nvkm_device *device, int index, u32 pmc_enable,
192 struct nvkm_subdev *subdev)
193{
194 const char *name = nvkm_subdev_name[index];
195 subdev->func = func;
196 subdev->device = device;
197 subdev->index = index;
198 subdev->pmc_enable = pmc_enable;
199
200 __mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
201 subdev->debug = nvkm_dbgopt(device->dbgopt, name);
202}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 6bd3d756f32c..36f724763fde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -6,7 +6,7 @@ include $(src)/nvkm/engine/ce/Kbuild
6include $(src)/nvkm/engine/cipher/Kbuild 6include $(src)/nvkm/engine/cipher/Kbuild
7include $(src)/nvkm/engine/device/Kbuild 7include $(src)/nvkm/engine/device/Kbuild
8include $(src)/nvkm/engine/disp/Kbuild 8include $(src)/nvkm/engine/disp/Kbuild
9include $(src)/nvkm/engine/dmaobj/Kbuild 9include $(src)/nvkm/engine/dma/Kbuild
10include $(src)/nvkm/engine/fifo/Kbuild 10include $(src)/nvkm/engine/fifo/Kbuild
11include $(src)/nvkm/engine/gr/Kbuild 11include $(src)/nvkm/engine/gr/Kbuild
12include $(src)/nvkm/engine/mpeg/Kbuild 12include $(src)/nvkm/engine/mpeg/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index a0b1fd80fa93..3ef01071f073 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -22,72 +22,23 @@
22 * Authors: Ben Skeggs, Ilia Mirkin 22 * Authors: Ben Skeggs, Ilia Mirkin
23 */ 23 */
24#include <engine/bsp.h> 24#include <engine/bsp.h>
25#include <engine/xtensa.h>
26 25
27#include <core/engctx.h> 26#include <nvif/class.h>
28 27
29/******************************************************************************* 28static const struct nvkm_xtensa_func
30 * BSP object classes 29g84_bsp = {
31 ******************************************************************************/ 30 .pmc_enable = 0x04008000,
32 31 .fifo_val = 0x1111,
33static struct nvkm_oclass 32 .unkd28 = 0x90044,
34g84_bsp_sclass[] = { 33 .sclass = {
35 { 0x74b0, &nvkm_object_ofuncs }, 34 { -1, -1, NV74_BSP },
36 {}, 35 {}
37}; 36 }
38
39/*******************************************************************************
40 * BSP context
41 ******************************************************************************/
42
43static struct nvkm_oclass
44g84_bsp_cclass = {
45 .handle = NV_ENGCTX(BSP, 0x84),
46 .ofuncs = &(struct nvkm_ofuncs) {
47 .ctor = _nvkm_xtensa_engctx_ctor,
48 .dtor = _nvkm_engctx_dtor,
49 .init = _nvkm_engctx_init,
50 .fini = _nvkm_engctx_fini,
51 .rd32 = _nvkm_engctx_rd32,
52 .wr32 = _nvkm_engctx_wr32,
53 },
54}; 37};
55 38
56/******************************************************************************* 39int
57 * BSP engine/subdev functions 40g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
58 ******************************************************************************/
59
60static int
61g84_bsp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
62 struct nvkm_oclass *oclass, void *data, u32 size,
63 struct nvkm_object **pobject)
64{ 41{
65 struct nvkm_xtensa *priv; 42 return nvkm_xtensa_new_(&g84_bsp, device, index,
66 int ret; 43 true, 0x103000, pengine);
67
68 ret = nvkm_xtensa_create(parent, engine, oclass, 0x103000, true,
69 "PBSP", "bsp", &priv);
70 *pobject = nv_object(priv);
71 if (ret)
72 return ret;
73
74 nv_subdev(priv)->unit = 0x04008000;
75 nv_engine(priv)->cclass = &g84_bsp_cclass;
76 nv_engine(priv)->sclass = g84_bsp_sclass;
77 priv->fifo_val = 0x1111;
78 priv->unkd28 = 0x90044;
79 return 0;
80} 44}
81
82struct nvkm_oclass
83g84_bsp_oclass = {
84 .handle = NV_ENGINE(BSP, 0x84),
85 .ofuncs = &(struct nvkm_ofuncs) {
86 .ctor = g84_bsp_ctor,
87 .dtor = _nvkm_xtensa_dtor,
88 .init = _nvkm_xtensa_init,
89 .fini = _nvkm_xtensa_fini,
90 .rd32 = _nvkm_xtensa_rd32,
91 .wr32 = _nvkm_xtensa_wr32,
92 },
93};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
index a558dfa4d76a..6226bcd98ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
@@ -24,9 +24,9 @@
24 */ 24 */
25 25
26#ifdef GT215 26#ifdef GT215
27.section #gt215_pce_data 27.section #gt215_ce_data
28#else 28#else
29.section #gf100_pce_data 29.section #gf100_ce_data
30#endif 30#endif
31 31
32ctx_object: .b32 0 32ctx_object: .b32 0
@@ -128,9 +128,9 @@ dispatch_dma:
128.b16 0x800 0 128.b16 0x800 0
129 129
130#ifdef GT215 130#ifdef GT215
131.section #gt215_pce_code 131.section #gt215_ce_code
132#else 132#else
133.section #gf100_pce_code 133.section #gf100_ce_code
134#endif 134#endif
135 135
136main: 136main:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index d9af6e4e4585..05bb65608dfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
1uint32_t gf100_pce_data[] = { 1uint32_t gf100_ce_data[] = {
2/* 0x0000: ctx_object */ 2/* 0x0000: ctx_object */
3 0x00000000, 3 0x00000000,
4/* 0x0004: ctx_query_address_high */ 4/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ uint32_t gf100_pce_data[] = {
171 0x00000800, 171 0x00000800,
172}; 172};
173 173
174uint32_t gf100_pce_code[] = { 174uint32_t gf100_ce_code[] = {
175/* 0x0000: main */ 175/* 0x0000: main */
176 0x04fe04bd, 176 0x04fe04bd,
177 0x3517f000, 177 0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index f42c0d0d6cee..972281d10f38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
1uint32_t gt215_pce_data[] = { 1uint32_t gt215_ce_data[] = {
2/* 0x0000: ctx_object */ 2/* 0x0000: ctx_object */
3 0x00000000, 3 0x00000000,
4/* 0x0004: ctx_dma */ 4/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ uint32_t gt215_pce_data[] = {
183 0x00000800, 183 0x00000800,
184}; 184};
185 185
186uint32_t gt215_pce_code[] = { 186uint32_t gt215_ce_code[] = {
187/* 0x0000: main */ 187/* 0x0000: main */
188 0x04fe04bd, 188 0x04fe04bd,
189 0x3517f000, 189 0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index 2d2e549c2e34..92a9f35df1a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -21,146 +21,60 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/ce.h> 24#include "priv.h"
25#include <engine/falcon.h>
26#include "fuc/gf100.fuc3.h" 25#include "fuc/gf100.fuc3.h"
27 26
28struct gf100_ce_priv { 27#include <nvif/class.h>
29 struct nvkm_falcon base;
30};
31
32/*******************************************************************************
33 * Copy object classes
34 ******************************************************************************/
35
36static struct nvkm_oclass
37gf100_ce0_sclass[] = {
38 { 0x90b5, &nvkm_object_ofuncs },
39 {},
40};
41
42static struct nvkm_oclass
43gf100_ce1_sclass[] = {
44 { 0x90b8, &nvkm_object_ofuncs },
45 {},
46};
47
48/*******************************************************************************
49 * PCE context
50 ******************************************************************************/
51
52static struct nvkm_ofuncs
53gf100_ce_context_ofuncs = {
54 .ctor = _nvkm_falcon_context_ctor,
55 .dtor = _nvkm_falcon_context_dtor,
56 .init = _nvkm_falcon_context_init,
57 .fini = _nvkm_falcon_context_fini,
58 .rd32 = _nvkm_falcon_context_rd32,
59 .wr32 = _nvkm_falcon_context_wr32,
60};
61
62static struct nvkm_oclass
63gf100_ce0_cclass = {
64 .handle = NV_ENGCTX(CE0, 0xc0),
65 .ofuncs = &gf100_ce_context_ofuncs,
66};
67
68static struct nvkm_oclass
69gf100_ce1_cclass = {
70 .handle = NV_ENGCTX(CE1, 0xc0),
71 .ofuncs = &gf100_ce_context_ofuncs,
72};
73
74/*******************************************************************************
75 * PCE engine/subdev functions
76 ******************************************************************************/
77 28
78static int 29static void
79gf100_ce_init(struct nvkm_object *object) 30gf100_ce_init(struct nvkm_falcon *ce)
80{ 31{
81 struct gf100_ce_priv *priv = (void *)object; 32 struct nvkm_device *device = ce->engine.subdev.device;
82 int ret; 33 const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0;
83 34 nvkm_wr32(device, ce->addr + 0x084, index);
84 ret = nvkm_falcon_init(&priv->base);
85 if (ret)
86 return ret;
87
88 nv_wo32(priv, 0x084, nv_engidx(&priv->base.base) - NVDEV_ENGINE_CE0);
89 return 0;
90} 35}
91 36
92static int 37static const struct nvkm_falcon_func
93gf100_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 38gf100_ce0 = {
94 struct nvkm_oclass *oclass, void *data, u32 size, 39 .code.data = gf100_ce_code,
95 struct nvkm_object **pobject) 40 .code.size = sizeof(gf100_ce_code),
96{ 41 .data.data = gf100_ce_data,
97 struct gf100_ce_priv *priv; 42 .data.size = sizeof(gf100_ce_data),
98 int ret; 43 .pmc_enable = 0x00000040,
99 44 .init = gf100_ce_init,
100 ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, true, 45 .intr = gt215_ce_intr,
101 "PCE0", "ce0", &priv); 46 .sclass = {
102 *pobject = nv_object(priv); 47 { -1, -1, FERMI_DMA },
103 if (ret) 48 {}
104 return ret; 49 }
50};
105 51
106 nv_subdev(priv)->unit = 0x00000040; 52static const struct nvkm_falcon_func
107 nv_subdev(priv)->intr = gt215_ce_intr; 53gf100_ce1 = {
108 nv_engine(priv)->cclass = &gf100_ce0_cclass; 54 .code.data = gf100_ce_code,
109 nv_engine(priv)->sclass = gf100_ce0_sclass; 55 .code.size = sizeof(gf100_ce_code),
110 nv_falcon(priv)->code.data = gf100_pce_code; 56 .data.data = gf100_ce_data,
111 nv_falcon(priv)->code.size = sizeof(gf100_pce_code); 57 .data.size = sizeof(gf100_ce_data),
112 nv_falcon(priv)->data.data = gf100_pce_data; 58 .pmc_enable = 0x00000080,
113 nv_falcon(priv)->data.size = sizeof(gf100_pce_data); 59 .init = gf100_ce_init,
114 return 0; 60 .intr = gt215_ce_intr,
115} 61 .sclass = {
62 { -1, -1, FERMI_DECOMPRESS },
63 {}
64 }
65};
116 66
117static int 67int
118gf100_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 68gf100_ce_new(struct nvkm_device *device, int index,
119 struct nvkm_oclass *oclass, void *data, u32 size, 69 struct nvkm_engine **pengine)
120 struct nvkm_object **pobject)
121{ 70{
122 struct gf100_ce_priv *priv; 71 if (index == NVKM_ENGINE_CE0) {
123 int ret; 72 return nvkm_falcon_new_(&gf100_ce0, device, index, true,
124 73 0x104000, pengine);
125 ret = nvkm_falcon_create(parent, engine, oclass, 0x105000, true, 74 } else
126 "PCE1", "ce1", &priv); 75 if (index == NVKM_ENGINE_CE1) {
127 *pobject = nv_object(priv); 76 return nvkm_falcon_new_(&gf100_ce1, device, index, true,
128 if (ret) 77 0x105000, pengine);
129 return ret; 78 }
130 79 return -ENODEV;
131 nv_subdev(priv)->unit = 0x00000080;
132 nv_subdev(priv)->intr = gt215_ce_intr;
133 nv_engine(priv)->cclass = &gf100_ce1_cclass;
134 nv_engine(priv)->sclass = gf100_ce1_sclass;
135 nv_falcon(priv)->code.data = gf100_pce_code;
136 nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
137 nv_falcon(priv)->data.data = gf100_pce_data;
138 nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
139 return 0;
140} 80}
141
142struct nvkm_oclass
143gf100_ce0_oclass = {
144 .handle = NV_ENGINE(CE0, 0xc0),
145 .ofuncs = &(struct nvkm_ofuncs) {
146 .ctor = gf100_ce0_ctor,
147 .dtor = _nvkm_falcon_dtor,
148 .init = gf100_ce_init,
149 .fini = _nvkm_falcon_fini,
150 .rd32 = _nvkm_falcon_rd32,
151 .wr32 = _nvkm_falcon_wr32,
152 },
153};
154
155struct nvkm_oclass
156gf100_ce1_oclass = {
157 .handle = NV_ENGINE(CE1, 0xc0),
158 .ofuncs = &(struct nvkm_ofuncs) {
159 .ctor = gf100_ce1_ctor,
160 .dtor = _nvkm_falcon_dtor,
161 .init = gf100_ce_init,
162 .fini = _nvkm_falcon_fini,
163 .rd32 = _nvkm_falcon_rd32,
164 .wr32 = _nvkm_falcon_wr32,
165 },
166};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index a998932fae45..c541a1c012dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -21,153 +21,47 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/ce.h> 24#include "priv.h"
25 25
26#include <core/engctx.h> 26#include <nvif/class.h>
27 27
28struct gk104_ce_priv { 28void
29 struct nvkm_engine base; 29gk104_ce_intr(struct nvkm_engine *ce)
30};
31
32/*******************************************************************************
33 * Copy object classes
34 ******************************************************************************/
35
36static struct nvkm_oclass
37gk104_ce_sclass[] = {
38 { 0xa0b5, &nvkm_object_ofuncs },
39 {},
40};
41
42/*******************************************************************************
43 * PCE context
44 ******************************************************************************/
45
46static struct nvkm_ofuncs
47gk104_ce_context_ofuncs = {
48 .ctor = _nvkm_engctx_ctor,
49 .dtor = _nvkm_engctx_dtor,
50 .init = _nvkm_engctx_init,
51 .fini = _nvkm_engctx_fini,
52 .rd32 = _nvkm_engctx_rd32,
53 .wr32 = _nvkm_engctx_wr32,
54};
55
56static struct nvkm_oclass
57gk104_ce_cclass = {
58 .handle = NV_ENGCTX(CE0, 0xc0),
59 .ofuncs = &gk104_ce_context_ofuncs,
60};
61
62/*******************************************************************************
63 * PCE engine/subdev functions
64 ******************************************************************************/
65
66static void
67gk104_ce_intr(struct nvkm_subdev *subdev)
68{ 30{
69 const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0; 31 const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
70 struct gk104_ce_priv *priv = (void *)subdev; 32 struct nvkm_subdev *subdev = &ce->subdev;
71 u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000)); 33 struct nvkm_device *device = subdev->device;
72 34 u32 stat = nvkm_rd32(device, 0x104908 + base);
73 if (stat) { 35 if (stat) {
74 nv_warn(priv, "unhandled intr 0x%08x\n", stat); 36 nvkm_warn(subdev, "intr %08x\n", stat);
75 nv_wr32(priv, 0x104908 + (ce * 0x1000), stat); 37 nvkm_wr32(device, 0x104908 + base, stat);
76 } 38 }
77} 39}
78 40
79static int 41static const struct nvkm_engine_func
80gk104_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 42gk104_ce = {
81 struct nvkm_oclass *oclass, void *data, u32 size, 43 .intr = gk104_ce_intr,
82 struct nvkm_object **pobject) 44 .sclass = {
83{ 45 { -1, -1, KEPLER_DMA_COPY_A },
84 struct gk104_ce_priv *priv; 46 {}
85 int ret; 47 }
86 48};
87 ret = nvkm_engine_create(parent, engine, oclass, true,
88 "PCE0", "ce0", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00000040;
94 nv_subdev(priv)->intr = gk104_ce_intr;
95 nv_engine(priv)->cclass = &gk104_ce_cclass;
96 nv_engine(priv)->sclass = gk104_ce_sclass;
97 return 0;
98}
99
100static int
101gk104_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
102 struct nvkm_oclass *oclass, void *data, u32 size,
103 struct nvkm_object **pobject)
104{
105 struct gk104_ce_priv *priv;
106 int ret;
107
108 ret = nvkm_engine_create(parent, engine, oclass, true,
109 "PCE1", "ce1", &priv);
110 *pobject = nv_object(priv);
111 if (ret)
112 return ret;
113
114 nv_subdev(priv)->unit = 0x00000080;
115 nv_subdev(priv)->intr = gk104_ce_intr;
116 nv_engine(priv)->cclass = &gk104_ce_cclass;
117 nv_engine(priv)->sclass = gk104_ce_sclass;
118 return 0;
119}
120 49
121static int 50int
122gk104_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 51gk104_ce_new(struct nvkm_device *device, int index,
123 struct nvkm_oclass *oclass, void *data, u32 size, 52 struct nvkm_engine **pengine)
124 struct nvkm_object **pobject)
125{ 53{
126 struct gk104_ce_priv *priv; 54 if (index == NVKM_ENGINE_CE0) {
127 int ret; 55 return nvkm_engine_new_(&gk104_ce, device, index,
128 56 0x00000040, true, pengine);
129 ret = nvkm_engine_create(parent, engine, oclass, true, 57 } else
130 "PCE2", "ce2", &priv); 58 if (index == NVKM_ENGINE_CE1) {
131 *pobject = nv_object(priv); 59 return nvkm_engine_new_(&gk104_ce, device, index,
132 if (ret) 60 0x00000080, true, pengine);
133 return ret; 61 } else
134 62 if (index == NVKM_ENGINE_CE2) {
135 nv_subdev(priv)->unit = 0x00200000; 63 return nvkm_engine_new_(&gk104_ce, device, index,
136 nv_subdev(priv)->intr = gk104_ce_intr; 64 0x00200000, true, pengine);
137 nv_engine(priv)->cclass = &gk104_ce_cclass; 65 }
138 nv_engine(priv)->sclass = gk104_ce_sclass; 66 return -ENODEV;
139 return 0;
140} 67}
141
142struct nvkm_oclass
143gk104_ce0_oclass = {
144 .handle = NV_ENGINE(CE0, 0xe0),
145 .ofuncs = &(struct nvkm_ofuncs) {
146 .ctor = gk104_ce0_ctor,
147 .dtor = _nvkm_engine_dtor,
148 .init = _nvkm_engine_init,
149 .fini = _nvkm_engine_fini,
150 },
151};
152
153struct nvkm_oclass
154gk104_ce1_oclass = {
155 .handle = NV_ENGINE(CE1, 0xe0),
156 .ofuncs = &(struct nvkm_ofuncs) {
157 .ctor = gk104_ce1_ctor,
158 .dtor = _nvkm_engine_dtor,
159 .init = _nvkm_engine_init,
160 .fini = _nvkm_engine_fini,
161 },
162};
163
164struct nvkm_oclass
165gk104_ce2_oclass = {
166 .handle = NV_ENGINE(CE2, 0xe0),
167 .ofuncs = &(struct nvkm_ofuncs) {
168 .ctor = gk104_ce2_ctor,
169 .dtor = _nvkm_engine_dtor,
170 .init = _nvkm_engine_init,
171 .fini = _nvkm_engine_fini,
172 },
173};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
index 577eb2eead05..8eaa72a59f40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
@@ -21,153 +21,34 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/ce.h> 24#include "priv.h"
25 25
26#include <core/engctx.h> 26#include <nvif/class.h>
27 27
28struct gm204_ce_priv { 28static const struct nvkm_engine_func
29 struct nvkm_engine base; 29gm204_ce = {
30}; 30 .intr = gk104_ce_intr,
31 31 .sclass = {
32/******************************************************************************* 32 { -1, -1, MAXWELL_DMA_COPY_A },
33 * Copy object classes 33 {}
34 ******************************************************************************/ 34 }
35
36static struct nvkm_oclass
37gm204_ce_sclass[] = {
38 { 0xb0b5, &nvkm_object_ofuncs },
39 {},
40};
41
42/*******************************************************************************
43 * PCE context
44 ******************************************************************************/
45
46static struct nvkm_ofuncs
47gm204_ce_context_ofuncs = {
48 .ctor = _nvkm_engctx_ctor,
49 .dtor = _nvkm_engctx_dtor,
50 .init = _nvkm_engctx_init,
51 .fini = _nvkm_engctx_fini,
52 .rd32 = _nvkm_engctx_rd32,
53 .wr32 = _nvkm_engctx_wr32,
54};
55
56static struct nvkm_oclass
57gm204_ce_cclass = {
58 .handle = NV_ENGCTX(CE0, 0x24),
59 .ofuncs = &gm204_ce_context_ofuncs,
60}; 35};
61 36
62/******************************************************************************* 37int
63 * PCE engine/subdev functions 38gm204_ce_new(struct nvkm_device *device, int index,
64 ******************************************************************************/ 39 struct nvkm_engine **pengine)
65
66static void
67gm204_ce_intr(struct nvkm_subdev *subdev)
68{ 40{
69 const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0; 41 if (index == NVKM_ENGINE_CE0) {
70 struct gm204_ce_priv *priv = (void *)subdev; 42 return nvkm_engine_new_(&gm204_ce, device, index,
71 u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000)); 43 0x00000040, true, pengine);
72 44 } else
73 if (stat) { 45 if (index == NVKM_ENGINE_CE1) {
74 nv_warn(priv, "unhandled intr 0x%08x\n", stat); 46 return nvkm_engine_new_(&gm204_ce, device, index,
75 nv_wr32(priv, 0x104908 + (ce * 0x1000), stat); 47 0x00000080, true, pengine);
48 } else
49 if (index == NVKM_ENGINE_CE2) {
50 return nvkm_engine_new_(&gm204_ce, device, index,
51 0x00200000, true, pengine);
76 } 52 }
53 return -ENODEV;
77} 54}
78
79static int
80gm204_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
81 struct nvkm_oclass *oclass, void *data, u32 size,
82 struct nvkm_object **pobject)
83{
84 struct gm204_ce_priv *priv;
85 int ret;
86
87 ret = nvkm_engine_create(parent, engine, oclass, true,
88 "PCE0", "ce0", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00000040;
94 nv_subdev(priv)->intr = gm204_ce_intr;
95 nv_engine(priv)->cclass = &gm204_ce_cclass;
96 nv_engine(priv)->sclass = gm204_ce_sclass;
97 return 0;
98}
99
100static int
101gm204_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
102 struct nvkm_oclass *oclass, void *data, u32 size,
103 struct nvkm_object **pobject)
104{
105 struct gm204_ce_priv *priv;
106 int ret;
107
108 ret = nvkm_engine_create(parent, engine, oclass, true,
109 "PCE1", "ce1", &priv);
110 *pobject = nv_object(priv);
111 if (ret)
112 return ret;
113
114 nv_subdev(priv)->unit = 0x00000080;
115 nv_subdev(priv)->intr = gm204_ce_intr;
116 nv_engine(priv)->cclass = &gm204_ce_cclass;
117 nv_engine(priv)->sclass = gm204_ce_sclass;
118 return 0;
119}
120
121static int
122gm204_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
123 struct nvkm_oclass *oclass, void *data, u32 size,
124 struct nvkm_object **pobject)
125{
126 struct gm204_ce_priv *priv;
127 int ret;
128
129 ret = nvkm_engine_create(parent, engine, oclass, true,
130 "PCE2", "ce2", &priv);
131 *pobject = nv_object(priv);
132 if (ret)
133 return ret;
134
135 nv_subdev(priv)->unit = 0x00200000;
136 nv_subdev(priv)->intr = gm204_ce_intr;
137 nv_engine(priv)->cclass = &gm204_ce_cclass;
138 nv_engine(priv)->sclass = gm204_ce_sclass;
139 return 0;
140}
141
142struct nvkm_oclass
143gm204_ce0_oclass = {
144 .handle = NV_ENGINE(CE0, 0x24),
145 .ofuncs = &(struct nvkm_ofuncs) {
146 .ctor = gm204_ce0_ctor,
147 .dtor = _nvkm_engine_dtor,
148 .init = _nvkm_engine_init,
149 .fini = _nvkm_engine_fini,
150 },
151};
152
153struct nvkm_oclass
154gm204_ce1_oclass = {
155 .handle = NV_ENGINE(CE1, 0x24),
156 .ofuncs = &(struct nvkm_ofuncs) {
157 .ctor = gm204_ce1_ctor,
158 .dtor = _nvkm_engine_dtor,
159 .init = _nvkm_engine_init,
160 .fini = _nvkm_engine_fini,
161 },
162};
163
164struct nvkm_oclass
165gm204_ce2_oclass = {
166 .handle = NV_ENGINE(CE2, 0x24),
167 .ofuncs = &(struct nvkm_ofuncs) {
168 .ctor = gm204_ce2_ctor,
169 .dtor = _nvkm_engine_dtor,
170 .init = _nvkm_engine_init,
171 .fini = _nvkm_engine_fini,
172 },
173};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index d8bb4293bc11..402dcbcc2192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -21,50 +21,15 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/ce.h> 24#include "priv.h"
25#include <engine/falcon.h>
26#include <engine/fifo.h>
27#include "fuc/gt215.fuc3.h" 25#include "fuc/gt215.fuc3.h"
28 26
29#include <core/client.h> 27#include <core/client.h>
30#include <core/device.h>
31#include <core/enum.h> 28#include <core/enum.h>
29#include <core/gpuobj.h>
30#include <engine/fifo.h>
32 31
33struct gt215_ce_priv { 32#include <nvif/class.h>
34 struct nvkm_falcon base;
35};
36
37/*******************************************************************************
38 * Copy object classes
39 ******************************************************************************/
40
41static struct nvkm_oclass
42gt215_ce_sclass[] = {
43 { 0x85b5, &nvkm_object_ofuncs },
44 {}
45};
46
47/*******************************************************************************
48 * PCE context
49 ******************************************************************************/
50
51static struct nvkm_oclass
52gt215_ce_cclass = {
53 .handle = NV_ENGCTX(CE0, 0xa3),
54 .ofuncs = &(struct nvkm_ofuncs) {
55 .ctor = _nvkm_falcon_context_ctor,
56 .dtor = _nvkm_falcon_context_dtor,
57 .init = _nvkm_falcon_context_init,
58 .fini = _nvkm_falcon_context_fini,
59 .rd32 = _nvkm_falcon_context_rd32,
60 .wr32 = _nvkm_falcon_context_wr32,
61
62 },
63};
64
65/*******************************************************************************
66 * PCE engine/subdev functions
67 ******************************************************************************/
68 33
69static const struct nvkm_enum 34static const struct nvkm_enum
70gt215_ce_isr_error_name[] = { 35gt215_ce_isr_error_name[] = {
@@ -75,78 +40,45 @@ gt215_ce_isr_error_name[] = {
75}; 40};
76 41
77void 42void
78gt215_ce_intr(struct nvkm_subdev *subdev) 43gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
79{ 44{
80 struct nvkm_fifo *pfifo = nvkm_fifo(subdev); 45 struct nvkm_subdev *subdev = &ce->engine.subdev;
81 struct nvkm_engine *engine = nv_engine(subdev); 46 struct nvkm_device *device = subdev->device;
82 struct nvkm_falcon *falcon = (void *)subdev; 47 const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000;
83 struct nvkm_object *engctx; 48 u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
84 u32 dispatch = nv_ro32(falcon, 0x01c); 49 u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
85 u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
86 u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
87 u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
88 u32 addr = nv_ro32(falcon, 0x040) >> 16;
89 u32 mthd = (addr & 0x07ff) << 2; 50 u32 mthd = (addr & 0x07ff) << 2;
90 u32 subc = (addr & 0x3800) >> 11; 51 u32 subc = (addr & 0x3800) >> 11;
91 u32 data = nv_ro32(falcon, 0x044); 52 u32 data = nvkm_rd32(device, 0x104044 + base);
92 int chid; 53 const struct nvkm_enum *en =
93 54 nvkm_enum_find(gt215_ce_isr_error_name, ssta);
94 engctx = nvkm_engctx_get(engine, inst); 55
95 chid = pfifo->chid(pfifo, engctx); 56 nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
96 57 "subc %d mthd %04x data %08x\n", ssta,
97 if (stat & 0x00000040) { 58 en ? en->name : "", chan ? chan->chid : -1,
98 nv_error(falcon, "DISPATCH_ERROR ["); 59 chan ? chan->inst->addr : 0,
99 nvkm_enum_print(gt215_ce_isr_error_name, ssta); 60 chan ? chan->object.client->name : "unknown",
100 pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n", 61 subc, mthd, data);
101 chid, inst << 12, nvkm_client_name(engctx), subc, 62}
102 mthd, data);
103 nv_wo32(falcon, 0x004, 0x00000040);
104 stat &= ~0x00000040;
105 }
106 63
107 if (stat) { 64static const struct nvkm_falcon_func
108 nv_error(falcon, "unhandled intr 0x%08x\n", stat); 65gt215_ce = {
109 nv_wo32(falcon, 0x004, stat); 66 .code.data = gt215_ce_code,
67 .code.size = sizeof(gt215_ce_code),
68 .data.data = gt215_ce_data,
69 .data.size = sizeof(gt215_ce_data),
70 .pmc_enable = 0x00802000,
71 .intr = gt215_ce_intr,
72 .sclass = {
73 { -1, -1, GT212_DMA },
74 {}
110 } 75 }
76};
111 77
112 nvkm_engctx_put(engctx); 78int
113} 79gt215_ce_new(struct nvkm_device *device, int index,
114 80 struct nvkm_engine **pengine)
115static int
116gt215_ce_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
117 struct nvkm_oclass *oclass, void *data, u32 size,
118 struct nvkm_object **pobject)
119{ 81{
120 bool enable = (nv_device(parent)->chipset != 0xaf); 82 return nvkm_falcon_new_(&gt215_ce, device, index,
121 struct gt215_ce_priv *priv; 83 (device->chipset != 0xaf), 0x104000, pengine);
122 int ret;
123
124 ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, enable,
125 "PCE0", "ce0", &priv);
126 *pobject = nv_object(priv);
127 if (ret)
128 return ret;
129
130 nv_subdev(priv)->unit = 0x00802000;
131 nv_subdev(priv)->intr = gt215_ce_intr;
132 nv_engine(priv)->cclass = &gt215_ce_cclass;
133 nv_engine(priv)->sclass = gt215_ce_sclass;
134 nv_falcon(priv)->code.data = gt215_pce_code;
135 nv_falcon(priv)->code.size = sizeof(gt215_pce_code);
136 nv_falcon(priv)->data.data = gt215_pce_data;
137 nv_falcon(priv)->data.size = sizeof(gt215_pce_data);
138 return 0;
139} 84}
140
141struct nvkm_oclass
142gt215_ce_oclass = {
143 .handle = NV_ENGINE(CE0, 0xa3),
144 .ofuncs = &(struct nvkm_ofuncs) {
145 .ctor = gt215_ce_ctor,
146 .dtor = _nvkm_falcon_dtor,
147 .init = _nvkm_falcon_init,
148 .fini = _nvkm_falcon_fini,
149 .rd32 = _nvkm_falcon_rd32,
150 .wr32 = _nvkm_falcon_wr32,
151 },
152};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
new file mode 100644
index 000000000000..e2fa8b161943
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -0,0 +1,7 @@
1#ifndef __NVKM_CE_PRIV_H__
2#define __NVKM_CE_PRIV_H__
3#include <engine/ce.h>
4
5void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
6void gk104_ce_intr(struct nvkm_engine *);
7#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index 13f30428a305..bfd01625ec7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -25,76 +25,47 @@
25#include <engine/fifo.h> 25#include <engine/fifo.h>
26 26
27#include <core/client.h> 27#include <core/client.h>
28#include <core/engctx.h>
29#include <core/enum.h> 28#include <core/enum.h>
29#include <core/gpuobj.h>
30 30
31struct g84_cipher_priv { 31#include <nvif/class.h>
32 struct nvkm_engine base;
33};
34
35/*******************************************************************************
36 * Crypt object classes
37 ******************************************************************************/
38 32
39static int 33static int
40g84_cipher_object_ctor(struct nvkm_object *parent, 34g84_cipher_oclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
41 struct nvkm_object *engine, 35 int align, struct nvkm_gpuobj **pgpuobj)
42 struct nvkm_oclass *oclass, void *data, u32 size,
43 struct nvkm_object **pobject)
44{ 36{
45 struct nvkm_gpuobj *obj; 37 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
46 int ret; 38 align, false, parent, pgpuobj);
47 39 if (ret == 0) {
48 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent, 40 nvkm_kmap(*pgpuobj);
49 16, 16, 0, &obj); 41 nvkm_wo32(*pgpuobj, 0x00, object->oclass);
50 *pobject = nv_object(obj); 42 nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
51 if (ret) 43 nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
52 return ret; 44 nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
53 45 nvkm_done(*pgpuobj);
54 nv_wo32(obj, 0x00, nv_mclass(obj)); 46 }
55 nv_wo32(obj, 0x04, 0x00000000); 47 return ret;
56 nv_wo32(obj, 0x08, 0x00000000);
57 nv_wo32(obj, 0x0c, 0x00000000);
58 return 0;
59} 48}
60 49
61static struct nvkm_ofuncs 50static const struct nvkm_object_func
62g84_cipher_ofuncs = { 51g84_cipher_oclass_func = {
63 .ctor = g84_cipher_object_ctor, 52 .bind = g84_cipher_oclass_bind,
64 .dtor = _nvkm_gpuobj_dtor,
65 .init = _nvkm_gpuobj_init,
66 .fini = _nvkm_gpuobj_fini,
67 .rd32 = _nvkm_gpuobj_rd32,
68 .wr32 = _nvkm_gpuobj_wr32,
69}; 53};
70 54
71static struct nvkm_oclass 55static int
72g84_cipher_sclass[] = { 56g84_cipher_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
73 { 0x74c1, &g84_cipher_ofuncs }, 57 int align, struct nvkm_gpuobj **pgpuobj)
74 {} 58{
75}; 59 return nvkm_gpuobj_new(object->engine->subdev.device, 256,
60 align, true, parent, pgpuobj);
76 61
77/******************************************************************************* 62}
78 * PCIPHER context
79 ******************************************************************************/
80 63
81static struct nvkm_oclass 64static const struct nvkm_object_func
82g84_cipher_cclass = { 65g84_cipher_cclass = {
83 .handle = NV_ENGCTX(CIPHER, 0x84), 66 .bind = g84_cipher_cclass_bind,
84 .ofuncs = &(struct nvkm_ofuncs) {
85 .ctor = _nvkm_engctx_ctor,
86 .dtor = _nvkm_engctx_dtor,
87 .init = _nvkm_engctx_init,
88 .fini = _nvkm_engctx_fini,
89 .rd32 = _nvkm_engctx_rd32,
90 .wr32 = _nvkm_engctx_wr32,
91 },
92}; 67};
93 68
94/*******************************************************************************
95 * PCIPHER engine/subdev functions
96 ******************************************************************************/
97
98static const struct nvkm_bitfield 69static const struct nvkm_bitfield
99g84_cipher_intr_mask[] = { 70g84_cipher_intr_mask[] = {
100 { 0x00000001, "INVALID_STATE" }, 71 { 0x00000001, "INVALID_STATE" },
@@ -106,79 +77,59 @@ g84_cipher_intr_mask[] = {
106}; 77};
107 78
108static void 79static void
109g84_cipher_intr(struct nvkm_subdev *subdev) 80g84_cipher_intr(struct nvkm_engine *cipher)
110{ 81{
111 struct nvkm_fifo *pfifo = nvkm_fifo(subdev); 82 struct nvkm_subdev *subdev = &cipher->subdev;
112 struct nvkm_engine *engine = nv_engine(subdev); 83 struct nvkm_device *device = subdev->device;
113 struct nvkm_object *engctx; 84 struct nvkm_fifo *fifo = device->fifo;
114 struct g84_cipher_priv *priv = (void *)subdev; 85 struct nvkm_fifo_chan *chan;
115 u32 stat = nv_rd32(priv, 0x102130); 86 u32 stat = nvkm_rd32(device, 0x102130);
116 u32 mthd = nv_rd32(priv, 0x102190); 87 u32 mthd = nvkm_rd32(device, 0x102190);
117 u32 data = nv_rd32(priv, 0x102194); 88 u32 data = nvkm_rd32(device, 0x102194);
118 u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff; 89 u32 inst = nvkm_rd32(device, 0x102188) & 0x7fffffff;
119 int chid; 90 unsigned long flags;
120 91 char msg[128];
121 engctx = nvkm_engctx_get(engine, inst); 92
122 chid = pfifo->chid(pfifo, engctx); 93 chan = nvkm_fifo_chan_inst(fifo, (u64)inst << 12, &flags);
123
124 if (stat) { 94 if (stat) {
125 nv_error(priv, "%s", ""); 95 nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat);
126 nvkm_bitfield_print(g84_cipher_intr_mask, stat); 96 nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] "
127 pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n", 97 "mthd %04x data %08x\n", stat, msg,
128 chid, (u64)inst << 12, nvkm_client_name(engctx), 98 chan ? chan->chid : -1, (u64)inst << 12,
129 mthd, data); 99 chan ? chan->object.client->name : "unknown",
100 mthd, data);
130 } 101 }
102 nvkm_fifo_chan_put(fifo, flags, &chan);
131 103
132 nv_wr32(priv, 0x102130, stat); 104 nvkm_wr32(device, 0x102130, stat);
133 nv_wr32(priv, 0x10200c, 0x10); 105 nvkm_wr32(device, 0x10200c, 0x10);
134
135 nvkm_engctx_put(engctx);
136} 106}
137 107
138static int 108static int
139g84_cipher_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 109g84_cipher_init(struct nvkm_engine *cipher)
140 struct nvkm_oclass *oclass, void *data, u32 size,
141 struct nvkm_object **pobject)
142{ 110{
143 struct g84_cipher_priv *priv; 111 struct nvkm_device *device = cipher->subdev.device;
144 int ret; 112 nvkm_wr32(device, 0x102130, 0xffffffff);
145 113 nvkm_wr32(device, 0x102140, 0xffffffbf);
146 ret = nvkm_engine_create(parent, engine, oclass, true, 114 nvkm_wr32(device, 0x10200c, 0x00000010);
147 "PCIPHER", "cipher", &priv);
148 *pobject = nv_object(priv);
149 if (ret)
150 return ret;
151
152 nv_subdev(priv)->unit = 0x00004000;
153 nv_subdev(priv)->intr = g84_cipher_intr;
154 nv_engine(priv)->cclass = &g84_cipher_cclass;
155 nv_engine(priv)->sclass = g84_cipher_sclass;
156 return 0; 115 return 0;
157} 116}
158 117
159static int 118static const struct nvkm_engine_func
160g84_cipher_init(struct nvkm_object *object) 119g84_cipher = {
161{ 120 .init = g84_cipher_init,
162 struct g84_cipher_priv *priv = (void *)object; 121 .intr = g84_cipher_intr,
163 int ret; 122 .cclass = &g84_cipher_cclass,
164 123 .sclass = {
165 ret = nvkm_engine_init(&priv->base); 124 { -1, -1, NV74_CIPHER, &g84_cipher_oclass_func },
166 if (ret) 125 {}
167 return ret; 126 }
127};
168 128
169 nv_wr32(priv, 0x102130, 0xffffffff); 129int
170 nv_wr32(priv, 0x102140, 0xffffffbf); 130g84_cipher_new(struct nvkm_device *device, int index,
171 nv_wr32(priv, 0x10200c, 0x00000010); 131 struct nvkm_engine **pengine)
172 return 0; 132{
133 return nvkm_engine_new_(&g84_cipher, device, index,
134 0x00004000, true, pengine);
173} 135}
174
175struct nvkm_oclass
176g84_cipher_oclass = {
177 .handle = NV_ENGINE(CIPHER, 0x84),
178 .ofuncs = &(struct nvkm_ofuncs) {
179 .ctor = g84_cipher_ctor,
180 .dtor = _nvkm_engine_dtor,
181 .init = g84_cipher_init,
182 .fini = _nvkm_engine_fini,
183 },
184};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
index de1bf092b2b2..09032ba36000 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
@@ -1,12 +1,6 @@
1nvkm-y += nvkm/engine/device/acpi.o 1nvkm-y += nvkm/engine/device/acpi.o
2nvkm-y += nvkm/engine/device/base.o 2nvkm-y += nvkm/engine/device/base.o
3nvkm-y += nvkm/engine/device/ctrl.o 3nvkm-y += nvkm/engine/device/ctrl.o
4nvkm-y += nvkm/engine/device/nv04.o 4nvkm-y += nvkm/engine/device/pci.o
5nvkm-y += nvkm/engine/device/nv10.o 5nvkm-y += nvkm/engine/device/tegra.o
6nvkm-y += nvkm/engine/device/nv20.o 6nvkm-y += nvkm/engine/device/user.o
7nvkm-y += nvkm/engine/device/nv30.o
8nvkm-y += nvkm/engine/device/nv40.o
9nvkm-y += nvkm/engine/device/nv50.o
10nvkm-y += nvkm/engine/device/gf100.o
11nvkm-y += nvkm/engine/device/gk104.o
12nvkm-y += nvkm/engine/device/gm100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
index f42706e1d5db..fdca90bc8f0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
@@ -40,21 +40,19 @@ nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
40} 40}
41#endif 41#endif
42 42
43int 43void
44nvkm_acpi_fini(struct nvkm_device *device, bool suspend) 44nvkm_acpi_fini(struct nvkm_device *device)
45{ 45{
46#ifdef CONFIG_ACPI 46#ifdef CONFIG_ACPI
47 unregister_acpi_notifier(&device->acpi.nb); 47 unregister_acpi_notifier(&device->acpi.nb);
48#endif 48#endif
49 return 0;
50} 49}
51 50
52int 51void
53nvkm_acpi_init(struct nvkm_device *device) 52nvkm_acpi_init(struct nvkm_device *device)
54{ 53{
55#ifdef CONFIG_ACPI 54#ifdef CONFIG_ACPI
56 device->acpi.nb.notifier_call = nvkm_acpi_ntfy; 55 device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
57 register_acpi_notifier(&device->acpi.nb); 56 register_acpi_notifier(&device->acpi.nb);
58#endif 57#endif
59 return 0;
60} 58}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
index 82dd359ddfa4..1bbe76e0740a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -3,6 +3,6 @@
3#include <core/os.h> 3#include <core/os.h>
4struct nvkm_device; 4struct nvkm_device;
5 5
6int nvkm_acpi_init(struct nvkm_device *); 6void nvkm_acpi_init(struct nvkm_device *);
7int nvkm_acpi_fini(struct nvkm_device *, bool); 7void nvkm_acpi_fini(struct nvkm_device *);
8#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 63d8e52f4b22..94a906b8cb88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -24,33 +24,33 @@
24#include "priv.h" 24#include "priv.h"
25#include "acpi.h" 25#include "acpi.h"
26 26
27#include <core/client.h>
28#include <core/option.h>
29#include <core/notify.h> 27#include <core/notify.h>
30#include <core/parent.h> 28#include <core/option.h>
31#include <subdev/bios.h>
32#include <subdev/fb.h>
33#include <subdev/instmem.h>
34 29
35#include <nvif/class.h> 30#include <subdev/bios.h>
36#include <nvif/unpack.h>
37 31
38static DEFINE_MUTEX(nv_devices_mutex); 32static DEFINE_MUTEX(nv_devices_mutex);
39static LIST_HEAD(nv_devices); 33static LIST_HEAD(nv_devices);
40 34
41struct nvkm_device * 35static struct nvkm_device *
42nvkm_device_find(u64 name) 36nvkm_device_find_locked(u64 handle)
43{ 37{
44 struct nvkm_device *device, *match = NULL; 38 struct nvkm_device *device;
45 mutex_lock(&nv_devices_mutex);
46 list_for_each_entry(device, &nv_devices, head) { 39 list_for_each_entry(device, &nv_devices, head) {
47 if (device->handle == name) { 40 if (device->handle == handle)
48 match = device; 41 return device;
49 break;
50 }
51 } 42 }
43 return NULL;
44}
45
46struct nvkm_device *
47nvkm_device_find(u64 handle)
48{
49 struct nvkm_device *device;
50 mutex_lock(&nv_devices_mutex);
51 device = nvkm_device_find_locked(handle);
52 mutex_unlock(&nv_devices_mutex); 52 mutex_unlock(&nv_devices_mutex);
53 return match; 53 return device;
54} 54}
55 55
56int 56int
@@ -67,280 +67,2272 @@ nvkm_device_list(u64 *name, int size)
67 return nr; 67 return nr;
68} 68}
69 69
70/****************************************************************************** 70static const struct nvkm_device_chip
71 * nvkm_devobj (0x0080): class implementation 71null_chipset = {
72 *****************************************************************************/ 72 .name = "NULL",
73 .bios = nvkm_bios_new,
74};
75
76static const struct nvkm_device_chip
77nv4_chipset = {
78 .name = "NV04",
79 .bios = nvkm_bios_new,
80 .bus = nv04_bus_new,
81 .clk = nv04_clk_new,
82 .devinit = nv04_devinit_new,
83 .fb = nv04_fb_new,
84 .i2c = nv04_i2c_new,
85 .imem = nv04_instmem_new,
86 .mc = nv04_mc_new,
87 .mmu = nv04_mmu_new,
88 .pci = nv04_pci_new,
89 .timer = nv04_timer_new,
90 .disp = nv04_disp_new,
91 .dma = nv04_dma_new,
92 .fifo = nv04_fifo_new,
93 .gr = nv04_gr_new,
94 .sw = nv04_sw_new,
95};
96
97static const struct nvkm_device_chip
98nv5_chipset = {
99 .name = "NV05",
100 .bios = nvkm_bios_new,
101 .bus = nv04_bus_new,
102 .clk = nv04_clk_new,
103 .devinit = nv05_devinit_new,
104 .fb = nv04_fb_new,
105 .i2c = nv04_i2c_new,
106 .imem = nv04_instmem_new,
107 .mc = nv04_mc_new,
108 .mmu = nv04_mmu_new,
109 .pci = nv04_pci_new,
110 .timer = nv04_timer_new,
111 .disp = nv04_disp_new,
112 .dma = nv04_dma_new,
113 .fifo = nv04_fifo_new,
114 .gr = nv04_gr_new,
115 .sw = nv04_sw_new,
116};
117
118static const struct nvkm_device_chip
119nv10_chipset = {
120 .name = "NV10",
121 .bios = nvkm_bios_new,
122 .bus = nv04_bus_new,
123 .clk = nv04_clk_new,
124 .devinit = nv10_devinit_new,
125 .fb = nv10_fb_new,
126 .gpio = nv10_gpio_new,
127 .i2c = nv04_i2c_new,
128 .imem = nv04_instmem_new,
129 .mc = nv04_mc_new,
130 .mmu = nv04_mmu_new,
131 .pci = nv04_pci_new,
132 .timer = nv04_timer_new,
133 .disp = nv04_disp_new,
134 .dma = nv04_dma_new,
135 .gr = nv10_gr_new,
136};
137
138static const struct nvkm_device_chip
139nv11_chipset = {
140 .name = "NV11",
141 .bios = nvkm_bios_new,
142 .bus = nv04_bus_new,
143 .clk = nv04_clk_new,
144 .devinit = nv10_devinit_new,
145 .fb = nv10_fb_new,
146 .gpio = nv10_gpio_new,
147 .i2c = nv04_i2c_new,
148 .imem = nv04_instmem_new,
149 .mc = nv04_mc_new,
150 .mmu = nv04_mmu_new,
151 .pci = nv04_pci_new,
152 .timer = nv04_timer_new,
153 .disp = nv04_disp_new,
154 .dma = nv04_dma_new,
155 .fifo = nv10_fifo_new,
156 .gr = nv15_gr_new,
157 .sw = nv10_sw_new,
158};
159
160static const struct nvkm_device_chip
161nv15_chipset = {
162 .name = "NV15",
163 .bios = nvkm_bios_new,
164 .bus = nv04_bus_new,
165 .clk = nv04_clk_new,
166 .devinit = nv10_devinit_new,
167 .fb = nv10_fb_new,
168 .gpio = nv10_gpio_new,
169 .i2c = nv04_i2c_new,
170 .imem = nv04_instmem_new,
171 .mc = nv04_mc_new,
172 .mmu = nv04_mmu_new,
173 .pci = nv04_pci_new,
174 .timer = nv04_timer_new,
175 .disp = nv04_disp_new,
176 .dma = nv04_dma_new,
177 .fifo = nv10_fifo_new,
178 .gr = nv15_gr_new,
179 .sw = nv10_sw_new,
180};
181
182static const struct nvkm_device_chip
183nv17_chipset = {
184 .name = "NV17",
185 .bios = nvkm_bios_new,
186 .bus = nv04_bus_new,
187 .clk = nv04_clk_new,
188 .devinit = nv10_devinit_new,
189 .fb = nv10_fb_new,
190 .gpio = nv10_gpio_new,
191 .i2c = nv04_i2c_new,
192 .imem = nv04_instmem_new,
193 .mc = nv04_mc_new,
194 .mmu = nv04_mmu_new,
195 .pci = nv04_pci_new,
196 .timer = nv04_timer_new,
197 .disp = nv04_disp_new,
198 .dma = nv04_dma_new,
199 .fifo = nv17_fifo_new,
200 .gr = nv17_gr_new,
201 .sw = nv10_sw_new,
202};
203
204static const struct nvkm_device_chip
205nv18_chipset = {
206 .name = "NV18",
207 .bios = nvkm_bios_new,
208 .bus = nv04_bus_new,
209 .clk = nv04_clk_new,
210 .devinit = nv10_devinit_new,
211 .fb = nv10_fb_new,
212 .gpio = nv10_gpio_new,
213 .i2c = nv04_i2c_new,
214 .imem = nv04_instmem_new,
215 .mc = nv04_mc_new,
216 .mmu = nv04_mmu_new,
217 .pci = nv04_pci_new,
218 .timer = nv04_timer_new,
219 .disp = nv04_disp_new,
220 .dma = nv04_dma_new,
221 .fifo = nv17_fifo_new,
222 .gr = nv17_gr_new,
223 .sw = nv10_sw_new,
224};
225
226static const struct nvkm_device_chip
227nv1a_chipset = {
228 .name = "nForce",
229 .bios = nvkm_bios_new,
230 .bus = nv04_bus_new,
231 .clk = nv04_clk_new,
232 .devinit = nv1a_devinit_new,
233 .fb = nv1a_fb_new,
234 .gpio = nv10_gpio_new,
235 .i2c = nv04_i2c_new,
236 .imem = nv04_instmem_new,
237 .mc = nv04_mc_new,
238 .mmu = nv04_mmu_new,
239 .pci = nv04_pci_new,
240 .timer = nv04_timer_new,
241 .disp = nv04_disp_new,
242 .dma = nv04_dma_new,
243 .fifo = nv10_fifo_new,
244 .gr = nv15_gr_new,
245 .sw = nv10_sw_new,
246};
247
248static const struct nvkm_device_chip
249nv1f_chipset = {
250 .name = "nForce2",
251 .bios = nvkm_bios_new,
252 .bus = nv04_bus_new,
253 .clk = nv04_clk_new,
254 .devinit = nv1a_devinit_new,
255 .fb = nv1a_fb_new,
256 .gpio = nv10_gpio_new,
257 .i2c = nv04_i2c_new,
258 .imem = nv04_instmem_new,
259 .mc = nv04_mc_new,
260 .mmu = nv04_mmu_new,
261 .pci = nv04_pci_new,
262 .timer = nv04_timer_new,
263 .disp = nv04_disp_new,
264 .dma = nv04_dma_new,
265 .fifo = nv17_fifo_new,
266 .gr = nv17_gr_new,
267 .sw = nv10_sw_new,
268};
269
270static const struct nvkm_device_chip
271nv20_chipset = {
272 .name = "NV20",
273 .bios = nvkm_bios_new,
274 .bus = nv04_bus_new,
275 .clk = nv04_clk_new,
276 .devinit = nv20_devinit_new,
277 .fb = nv20_fb_new,
278 .gpio = nv10_gpio_new,
279 .i2c = nv04_i2c_new,
280 .imem = nv04_instmem_new,
281 .mc = nv04_mc_new,
282 .mmu = nv04_mmu_new,
283 .pci = nv04_pci_new,
284 .timer = nv04_timer_new,
285 .disp = nv04_disp_new,
286 .dma = nv04_dma_new,
287 .fifo = nv17_fifo_new,
288 .gr = nv20_gr_new,
289 .sw = nv10_sw_new,
290};
291
292static const struct nvkm_device_chip
293nv25_chipset = {
294 .name = "NV25",
295 .bios = nvkm_bios_new,
296 .bus = nv04_bus_new,
297 .clk = nv04_clk_new,
298 .devinit = nv20_devinit_new,
299 .fb = nv25_fb_new,
300 .gpio = nv10_gpio_new,
301 .i2c = nv04_i2c_new,
302 .imem = nv04_instmem_new,
303 .mc = nv04_mc_new,
304 .mmu = nv04_mmu_new,
305 .pci = nv04_pci_new,
306 .timer = nv04_timer_new,
307 .disp = nv04_disp_new,
308 .dma = nv04_dma_new,
309 .fifo = nv17_fifo_new,
310 .gr = nv25_gr_new,
311 .sw = nv10_sw_new,
312};
313
314static const struct nvkm_device_chip
315nv28_chipset = {
316 .name = "NV28",
317 .bios = nvkm_bios_new,
318 .bus = nv04_bus_new,
319 .clk = nv04_clk_new,
320 .devinit = nv20_devinit_new,
321 .fb = nv25_fb_new,
322 .gpio = nv10_gpio_new,
323 .i2c = nv04_i2c_new,
324 .imem = nv04_instmem_new,
325 .mc = nv04_mc_new,
326 .mmu = nv04_mmu_new,
327 .pci = nv04_pci_new,
328 .timer = nv04_timer_new,
329 .disp = nv04_disp_new,
330 .dma = nv04_dma_new,
331 .fifo = nv17_fifo_new,
332 .gr = nv25_gr_new,
333 .sw = nv10_sw_new,
334};
335
336static const struct nvkm_device_chip
337nv2a_chipset = {
338 .name = "NV2A",
339 .bios = nvkm_bios_new,
340 .bus = nv04_bus_new,
341 .clk = nv04_clk_new,
342 .devinit = nv20_devinit_new,
343 .fb = nv25_fb_new,
344 .gpio = nv10_gpio_new,
345 .i2c = nv04_i2c_new,
346 .imem = nv04_instmem_new,
347 .mc = nv04_mc_new,
348 .mmu = nv04_mmu_new,
349 .pci = nv04_pci_new,
350 .timer = nv04_timer_new,
351 .disp = nv04_disp_new,
352 .dma = nv04_dma_new,
353 .fifo = nv17_fifo_new,
354 .gr = nv2a_gr_new,
355 .sw = nv10_sw_new,
356};
357
358static const struct nvkm_device_chip
359nv30_chipset = {
360 .name = "NV30",
361 .bios = nvkm_bios_new,
362 .bus = nv04_bus_new,
363 .clk = nv04_clk_new,
364 .devinit = nv20_devinit_new,
365 .fb = nv30_fb_new,
366 .gpio = nv10_gpio_new,
367 .i2c = nv04_i2c_new,
368 .imem = nv04_instmem_new,
369 .mc = nv04_mc_new,
370 .mmu = nv04_mmu_new,
371 .pci = nv04_pci_new,
372 .timer = nv04_timer_new,
373 .disp = nv04_disp_new,
374 .dma = nv04_dma_new,
375 .fifo = nv17_fifo_new,
376 .gr = nv30_gr_new,
377 .sw = nv10_sw_new,
378};
379
380static const struct nvkm_device_chip
381nv31_chipset = {
382 .name = "NV31",
383 .bios = nvkm_bios_new,
384 .bus = nv31_bus_new,
385 .clk = nv04_clk_new,
386 .devinit = nv20_devinit_new,
387 .fb = nv30_fb_new,
388 .gpio = nv10_gpio_new,
389 .i2c = nv04_i2c_new,
390 .imem = nv04_instmem_new,
391 .mc = nv04_mc_new,
392 .mmu = nv04_mmu_new,
393 .pci = nv04_pci_new,
394 .timer = nv04_timer_new,
395 .disp = nv04_disp_new,
396 .dma = nv04_dma_new,
397 .fifo = nv17_fifo_new,
398 .gr = nv30_gr_new,
399 .mpeg = nv31_mpeg_new,
400 .sw = nv10_sw_new,
401};
402
403static const struct nvkm_device_chip
404nv34_chipset = {
405 .name = "NV34",
406 .bios = nvkm_bios_new,
407 .bus = nv31_bus_new,
408 .clk = nv04_clk_new,
409 .devinit = nv10_devinit_new,
410 .fb = nv10_fb_new,
411 .gpio = nv10_gpio_new,
412 .i2c = nv04_i2c_new,
413 .imem = nv04_instmem_new,
414 .mc = nv04_mc_new,
415 .mmu = nv04_mmu_new,
416 .pci = nv04_pci_new,
417 .timer = nv04_timer_new,
418 .disp = nv04_disp_new,
419 .dma = nv04_dma_new,
420 .fifo = nv17_fifo_new,
421 .gr = nv34_gr_new,
422 .mpeg = nv31_mpeg_new,
423 .sw = nv10_sw_new,
424};
425
426static const struct nvkm_device_chip
427nv35_chipset = {
428 .name = "NV35",
429 .bios = nvkm_bios_new,
430 .bus = nv04_bus_new,
431 .clk = nv04_clk_new,
432 .devinit = nv20_devinit_new,
433 .fb = nv35_fb_new,
434 .gpio = nv10_gpio_new,
435 .i2c = nv04_i2c_new,
436 .imem = nv04_instmem_new,
437 .mc = nv04_mc_new,
438 .mmu = nv04_mmu_new,
439 .pci = nv04_pci_new,
440 .timer = nv04_timer_new,
441 .disp = nv04_disp_new,
442 .dma = nv04_dma_new,
443 .fifo = nv17_fifo_new,
444 .gr = nv35_gr_new,
445 .sw = nv10_sw_new,
446};
447
448static const struct nvkm_device_chip
449nv36_chipset = {
450 .name = "NV36",
451 .bios = nvkm_bios_new,
452 .bus = nv31_bus_new,
453 .clk = nv04_clk_new,
454 .devinit = nv20_devinit_new,
455 .fb = nv36_fb_new,
456 .gpio = nv10_gpio_new,
457 .i2c = nv04_i2c_new,
458 .imem = nv04_instmem_new,
459 .mc = nv04_mc_new,
460 .mmu = nv04_mmu_new,
461 .pci = nv04_pci_new,
462 .timer = nv04_timer_new,
463 .disp = nv04_disp_new,
464 .dma = nv04_dma_new,
465 .fifo = nv17_fifo_new,
466 .gr = nv35_gr_new,
467 .mpeg = nv31_mpeg_new,
468 .sw = nv10_sw_new,
469};
470
471static const struct nvkm_device_chip
472nv40_chipset = {
473 .name = "NV40",
474 .bios = nvkm_bios_new,
475 .bus = nv31_bus_new,
476 .clk = nv40_clk_new,
477 .devinit = nv1a_devinit_new,
478 .fb = nv40_fb_new,
479 .gpio = nv10_gpio_new,
480 .i2c = nv04_i2c_new,
481 .imem = nv40_instmem_new,
482 .mc = nv04_mc_new,
483 .mmu = nv04_mmu_new,
484 .pci = nv40_pci_new,
485 .therm = nv40_therm_new,
486 .timer = nv40_timer_new,
487 .volt = nv40_volt_new,
488 .disp = nv04_disp_new,
489 .dma = nv04_dma_new,
490 .fifo = nv40_fifo_new,
491 .gr = nv40_gr_new,
492 .mpeg = nv40_mpeg_new,
493 .pm = nv40_pm_new,
494 .sw = nv10_sw_new,
495};
496
497static const struct nvkm_device_chip
498nv41_chipset = {
499 .name = "NV41",
500 .bios = nvkm_bios_new,
501 .bus = nv31_bus_new,
502 .clk = nv40_clk_new,
503 .devinit = nv1a_devinit_new,
504 .fb = nv41_fb_new,
505 .gpio = nv10_gpio_new,
506 .i2c = nv04_i2c_new,
507 .imem = nv40_instmem_new,
508 .mc = nv04_mc_new,
509 .mmu = nv41_mmu_new,
510 .pci = nv40_pci_new,
511 .therm = nv40_therm_new,
512 .timer = nv41_timer_new,
513 .volt = nv40_volt_new,
514 .disp = nv04_disp_new,
515 .dma = nv04_dma_new,
516 .fifo = nv40_fifo_new,
517 .gr = nv40_gr_new,
518 .mpeg = nv40_mpeg_new,
519 .pm = nv40_pm_new,
520 .sw = nv10_sw_new,
521};
522
523static const struct nvkm_device_chip
524nv42_chipset = {
525 .name = "NV42",
526 .bios = nvkm_bios_new,
527 .bus = nv31_bus_new,
528 .clk = nv40_clk_new,
529 .devinit = nv1a_devinit_new,
530 .fb = nv41_fb_new,
531 .gpio = nv10_gpio_new,
532 .i2c = nv04_i2c_new,
533 .imem = nv40_instmem_new,
534 .mc = nv04_mc_new,
535 .mmu = nv41_mmu_new,
536 .pci = nv40_pci_new,
537 .therm = nv40_therm_new,
538 .timer = nv41_timer_new,
539 .volt = nv40_volt_new,
540 .disp = nv04_disp_new,
541 .dma = nv04_dma_new,
542 .fifo = nv40_fifo_new,
543 .gr = nv40_gr_new,
544 .mpeg = nv40_mpeg_new,
545 .pm = nv40_pm_new,
546 .sw = nv10_sw_new,
547};
548
549static const struct nvkm_device_chip
550nv43_chipset = {
551 .name = "NV43",
552 .bios = nvkm_bios_new,
553 .bus = nv31_bus_new,
554 .clk = nv40_clk_new,
555 .devinit = nv1a_devinit_new,
556 .fb = nv41_fb_new,
557 .gpio = nv10_gpio_new,
558 .i2c = nv04_i2c_new,
559 .imem = nv40_instmem_new,
560 .mc = nv04_mc_new,
561 .mmu = nv41_mmu_new,
562 .pci = nv40_pci_new,
563 .therm = nv40_therm_new,
564 .timer = nv41_timer_new,
565 .volt = nv40_volt_new,
566 .disp = nv04_disp_new,
567 .dma = nv04_dma_new,
568 .fifo = nv40_fifo_new,
569 .gr = nv40_gr_new,
570 .mpeg = nv40_mpeg_new,
571 .pm = nv40_pm_new,
572 .sw = nv10_sw_new,
573};
574
575static const struct nvkm_device_chip
576nv44_chipset = {
577 .name = "NV44",
578 .bios = nvkm_bios_new,
579 .bus = nv31_bus_new,
580 .clk = nv40_clk_new,
581 .devinit = nv1a_devinit_new,
582 .fb = nv44_fb_new,
583 .gpio = nv10_gpio_new,
584 .i2c = nv04_i2c_new,
585 .imem = nv40_instmem_new,
586 .mc = nv44_mc_new,
587 .mmu = nv44_mmu_new,
588 .pci = nv40_pci_new,
589 .therm = nv40_therm_new,
590 .timer = nv41_timer_new,
591 .volt = nv40_volt_new,
592 .disp = nv04_disp_new,
593 .dma = nv04_dma_new,
594 .fifo = nv40_fifo_new,
595 .gr = nv44_gr_new,
596 .mpeg = nv44_mpeg_new,
597 .pm = nv40_pm_new,
598 .sw = nv10_sw_new,
599};
600
601static const struct nvkm_device_chip
602nv45_chipset = {
603 .name = "NV45",
604 .bios = nvkm_bios_new,
605 .bus = nv31_bus_new,
606 .clk = nv40_clk_new,
607 .devinit = nv1a_devinit_new,
608 .fb = nv40_fb_new,
609 .gpio = nv10_gpio_new,
610 .i2c = nv04_i2c_new,
611 .imem = nv40_instmem_new,
612 .mc = nv04_mc_new,
613 .mmu = nv04_mmu_new,
614 .pci = nv40_pci_new,
615 .therm = nv40_therm_new,
616 .timer = nv41_timer_new,
617 .volt = nv40_volt_new,
618 .disp = nv04_disp_new,
619 .dma = nv04_dma_new,
620 .fifo = nv40_fifo_new,
621 .gr = nv40_gr_new,
622 .mpeg = nv44_mpeg_new,
623 .pm = nv40_pm_new,
624 .sw = nv10_sw_new,
625};
626
627static const struct nvkm_device_chip
628nv46_chipset = {
629 .name = "G72",
630 .bios = nvkm_bios_new,
631 .bus = nv31_bus_new,
632 .clk = nv40_clk_new,
633 .devinit = nv1a_devinit_new,
634 .fb = nv46_fb_new,
635 .gpio = nv10_gpio_new,
636 .i2c = nv04_i2c_new,
637 .imem = nv40_instmem_new,
638 .mc = nv44_mc_new,
639 .mmu = nv44_mmu_new,
640 .pci = nv4c_pci_new,
641 .therm = nv40_therm_new,
642 .timer = nv41_timer_new,
643 .volt = nv40_volt_new,
644 .disp = nv04_disp_new,
645 .dma = nv04_dma_new,
646 .fifo = nv40_fifo_new,
647 .gr = nv44_gr_new,
648 .mpeg = nv44_mpeg_new,
649 .pm = nv40_pm_new,
650 .sw = nv10_sw_new,
651};
652
653static const struct nvkm_device_chip
654nv47_chipset = {
655 .name = "G70",
656 .bios = nvkm_bios_new,
657 .bus = nv31_bus_new,
658 .clk = nv40_clk_new,
659 .devinit = nv1a_devinit_new,
660 .fb = nv47_fb_new,
661 .gpio = nv10_gpio_new,
662 .i2c = nv04_i2c_new,
663 .imem = nv40_instmem_new,
664 .mc = nv04_mc_new,
665 .mmu = nv41_mmu_new,
666 .pci = nv40_pci_new,
667 .therm = nv40_therm_new,
668 .timer = nv41_timer_new,
669 .volt = nv40_volt_new,
670 .disp = nv04_disp_new,
671 .dma = nv04_dma_new,
672 .fifo = nv40_fifo_new,
673 .gr = nv40_gr_new,
674 .mpeg = nv44_mpeg_new,
675 .pm = nv40_pm_new,
676 .sw = nv10_sw_new,
677};
678
679static const struct nvkm_device_chip
680nv49_chipset = {
681 .name = "G71",
682 .bios = nvkm_bios_new,
683 .bus = nv31_bus_new,
684 .clk = nv40_clk_new,
685 .devinit = nv1a_devinit_new,
686 .fb = nv49_fb_new,
687 .gpio = nv10_gpio_new,
688 .i2c = nv04_i2c_new,
689 .imem = nv40_instmem_new,
690 .mc = nv04_mc_new,
691 .mmu = nv41_mmu_new,
692 .pci = nv40_pci_new,
693 .therm = nv40_therm_new,
694 .timer = nv41_timer_new,
695 .volt = nv40_volt_new,
696 .disp = nv04_disp_new,
697 .dma = nv04_dma_new,
698 .fifo = nv40_fifo_new,
699 .gr = nv40_gr_new,
700 .mpeg = nv44_mpeg_new,
701 .pm = nv40_pm_new,
702 .sw = nv10_sw_new,
703};
704
705static const struct nvkm_device_chip
706nv4a_chipset = {
707 .name = "NV44A",
708 .bios = nvkm_bios_new,
709 .bus = nv31_bus_new,
710 .clk = nv40_clk_new,
711 .devinit = nv1a_devinit_new,
712 .fb = nv44_fb_new,
713 .gpio = nv10_gpio_new,
714 .i2c = nv04_i2c_new,
715 .imem = nv40_instmem_new,
716 .mc = nv44_mc_new,
717 .mmu = nv44_mmu_new,
718 .pci = nv40_pci_new,
719 .therm = nv40_therm_new,
720 .timer = nv41_timer_new,
721 .volt = nv40_volt_new,
722 .disp = nv04_disp_new,
723 .dma = nv04_dma_new,
724 .fifo = nv40_fifo_new,
725 .gr = nv44_gr_new,
726 .mpeg = nv44_mpeg_new,
727 .pm = nv40_pm_new,
728 .sw = nv10_sw_new,
729};
730
731static const struct nvkm_device_chip
732nv4b_chipset = {
733 .name = "G73",
734 .bios = nvkm_bios_new,
735 .bus = nv31_bus_new,
736 .clk = nv40_clk_new,
737 .devinit = nv1a_devinit_new,
738 .fb = nv49_fb_new,
739 .gpio = nv10_gpio_new,
740 .i2c = nv04_i2c_new,
741 .imem = nv40_instmem_new,
742 .mc = nv04_mc_new,
743 .mmu = nv41_mmu_new,
744 .pci = nv40_pci_new,
745 .therm = nv40_therm_new,
746 .timer = nv41_timer_new,
747 .volt = nv40_volt_new,
748 .disp = nv04_disp_new,
749 .dma = nv04_dma_new,
750 .fifo = nv40_fifo_new,
751 .gr = nv40_gr_new,
752 .mpeg = nv44_mpeg_new,
753 .pm = nv40_pm_new,
754 .sw = nv10_sw_new,
755};
756
757static const struct nvkm_device_chip
758nv4c_chipset = {
759 .name = "C61",
760 .bios = nvkm_bios_new,
761 .bus = nv31_bus_new,
762 .clk = nv40_clk_new,
763 .devinit = nv1a_devinit_new,
764 .fb = nv46_fb_new,
765 .gpio = nv10_gpio_new,
766 .i2c = nv04_i2c_new,
767 .imem = nv40_instmem_new,
768 .mc = nv44_mc_new,
769 .mmu = nv44_mmu_new,
770 .pci = nv4c_pci_new,
771 .therm = nv40_therm_new,
772 .timer = nv41_timer_new,
773 .volt = nv40_volt_new,
774 .disp = nv04_disp_new,
775 .dma = nv04_dma_new,
776 .fifo = nv40_fifo_new,
777 .gr = nv44_gr_new,
778 .mpeg = nv44_mpeg_new,
779 .pm = nv40_pm_new,
780 .sw = nv10_sw_new,
781};
782
783static const struct nvkm_device_chip
784nv4e_chipset = {
785 .name = "C51",
786 .bios = nvkm_bios_new,
787 .bus = nv31_bus_new,
788 .clk = nv40_clk_new,
789 .devinit = nv1a_devinit_new,
790 .fb = nv4e_fb_new,
791 .gpio = nv10_gpio_new,
792 .i2c = nv4e_i2c_new,
793 .imem = nv40_instmem_new,
794 .mc = nv44_mc_new,
795 .mmu = nv44_mmu_new,
796 .pci = nv4c_pci_new,
797 .therm = nv40_therm_new,
798 .timer = nv41_timer_new,
799 .volt = nv40_volt_new,
800 .disp = nv04_disp_new,
801 .dma = nv04_dma_new,
802 .fifo = nv40_fifo_new,
803 .gr = nv44_gr_new,
804 .mpeg = nv44_mpeg_new,
805 .pm = nv40_pm_new,
806 .sw = nv10_sw_new,
807};
808
809static const struct nvkm_device_chip
810nv50_chipset = {
811 .name = "G80",
812 .bar = nv50_bar_new,
813 .bios = nvkm_bios_new,
814 .bus = nv50_bus_new,
815 .clk = nv50_clk_new,
816 .devinit = nv50_devinit_new,
817 .fb = nv50_fb_new,
818 .fuse = nv50_fuse_new,
819 .gpio = nv50_gpio_new,
820 .i2c = nv50_i2c_new,
821 .imem = nv50_instmem_new,
822 .mc = nv50_mc_new,
823 .mmu = nv50_mmu_new,
824 .mxm = nv50_mxm_new,
825 .pci = nv50_pci_new,
826 .therm = nv50_therm_new,
827 .timer = nv41_timer_new,
828 .volt = nv40_volt_new,
829 .disp = nv50_disp_new,
830 .dma = nv50_dma_new,
831 .fifo = nv50_fifo_new,
832 .gr = nv50_gr_new,
833 .mpeg = nv50_mpeg_new,
834 .pm = nv50_pm_new,
835 .sw = nv50_sw_new,
836};
837
838static const struct nvkm_device_chip
839nv63_chipset = {
840 .name = "C73",
841 .bios = nvkm_bios_new,
842 .bus = nv31_bus_new,
843 .clk = nv40_clk_new,
844 .devinit = nv1a_devinit_new,
845 .fb = nv46_fb_new,
846 .gpio = nv10_gpio_new,
847 .i2c = nv04_i2c_new,
848 .imem = nv40_instmem_new,
849 .mc = nv44_mc_new,
850 .mmu = nv44_mmu_new,
851 .pci = nv4c_pci_new,
852 .therm = nv40_therm_new,
853 .timer = nv41_timer_new,
854 .volt = nv40_volt_new,
855 .disp = nv04_disp_new,
856 .dma = nv04_dma_new,
857 .fifo = nv40_fifo_new,
858 .gr = nv44_gr_new,
859 .mpeg = nv44_mpeg_new,
860 .pm = nv40_pm_new,
861 .sw = nv10_sw_new,
862};
863
864static const struct nvkm_device_chip
865nv67_chipset = {
866 .name = "C67",
867 .bios = nvkm_bios_new,
868 .bus = nv31_bus_new,
869 .clk = nv40_clk_new,
870 .devinit = nv1a_devinit_new,
871 .fb = nv46_fb_new,
872 .gpio = nv10_gpio_new,
873 .i2c = nv04_i2c_new,
874 .imem = nv40_instmem_new,
875 .mc = nv44_mc_new,
876 .mmu = nv44_mmu_new,
877 .pci = nv4c_pci_new,
878 .therm = nv40_therm_new,
879 .timer = nv41_timer_new,
880 .volt = nv40_volt_new,
881 .disp = nv04_disp_new,
882 .dma = nv04_dma_new,
883 .fifo = nv40_fifo_new,
884 .gr = nv44_gr_new,
885 .mpeg = nv44_mpeg_new,
886 .pm = nv40_pm_new,
887 .sw = nv10_sw_new,
888};
889
890static const struct nvkm_device_chip
891nv68_chipset = {
892 .name = "C68",
893 .bios = nvkm_bios_new,
894 .bus = nv31_bus_new,
895 .clk = nv40_clk_new,
896 .devinit = nv1a_devinit_new,
897 .fb = nv46_fb_new,
898 .gpio = nv10_gpio_new,
899 .i2c = nv04_i2c_new,
900 .imem = nv40_instmem_new,
901 .mc = nv44_mc_new,
902 .mmu = nv44_mmu_new,
903 .pci = nv4c_pci_new,
904 .therm = nv40_therm_new,
905 .timer = nv41_timer_new,
906 .volt = nv40_volt_new,
907 .disp = nv04_disp_new,
908 .dma = nv04_dma_new,
909 .fifo = nv40_fifo_new,
910 .gr = nv44_gr_new,
911 .mpeg = nv44_mpeg_new,
912 .pm = nv40_pm_new,
913 .sw = nv10_sw_new,
914};
915
916static const struct nvkm_device_chip
917nv84_chipset = {
918 .name = "G84",
919 .bar = g84_bar_new,
920 .bios = nvkm_bios_new,
921 .bus = nv50_bus_new,
922 .clk = g84_clk_new,
923 .devinit = g84_devinit_new,
924 .fb = g84_fb_new,
925 .fuse = nv50_fuse_new,
926 .gpio = nv50_gpio_new,
927 .i2c = nv50_i2c_new,
928 .imem = nv50_instmem_new,
929 .mc = nv50_mc_new,
930 .mmu = nv50_mmu_new,
931 .mxm = nv50_mxm_new,
932 .pci = nv50_pci_new,
933 .therm = g84_therm_new,
934 .timer = nv41_timer_new,
935 .volt = nv40_volt_new,
936 .bsp = g84_bsp_new,
937 .cipher = g84_cipher_new,
938 .disp = g84_disp_new,
939 .dma = nv50_dma_new,
940 .fifo = g84_fifo_new,
941 .gr = g84_gr_new,
942 .mpeg = g84_mpeg_new,
943 .pm = g84_pm_new,
944 .sw = nv50_sw_new,
945 .vp = g84_vp_new,
946};
947
948static const struct nvkm_device_chip
949nv86_chipset = {
950 .name = "G86",
951 .bar = g84_bar_new,
952 .bios = nvkm_bios_new,
953 .bus = nv50_bus_new,
954 .clk = g84_clk_new,
955 .devinit = g84_devinit_new,
956 .fb = g84_fb_new,
957 .fuse = nv50_fuse_new,
958 .gpio = nv50_gpio_new,
959 .i2c = nv50_i2c_new,
960 .imem = nv50_instmem_new,
961 .mc = nv50_mc_new,
962 .mmu = nv50_mmu_new,
963 .mxm = nv50_mxm_new,
964 .pci = nv50_pci_new,
965 .therm = g84_therm_new,
966 .timer = nv41_timer_new,
967 .volt = nv40_volt_new,
968 .bsp = g84_bsp_new,
969 .cipher = g84_cipher_new,
970 .disp = g84_disp_new,
971 .dma = nv50_dma_new,
972 .fifo = g84_fifo_new,
973 .gr = g84_gr_new,
974 .mpeg = g84_mpeg_new,
975 .pm = g84_pm_new,
976 .sw = nv50_sw_new,
977 .vp = g84_vp_new,
978};
979
980static const struct nvkm_device_chip
981nv92_chipset = {
982 .name = "G92",
983 .bar = g84_bar_new,
984 .bios = nvkm_bios_new,
985 .bus = nv50_bus_new,
986 .clk = g84_clk_new,
987 .devinit = g84_devinit_new,
988 .fb = g84_fb_new,
989 .fuse = nv50_fuse_new,
990 .gpio = nv50_gpio_new,
991 .i2c = nv50_i2c_new,
992 .imem = nv50_instmem_new,
993 .mc = nv50_mc_new,
994 .mmu = nv50_mmu_new,
995 .mxm = nv50_mxm_new,
996 .pci = nv50_pci_new,
997 .therm = g84_therm_new,
998 .timer = nv41_timer_new,
999 .volt = nv40_volt_new,
1000 .bsp = g84_bsp_new,
1001 .cipher = g84_cipher_new,
1002 .disp = g84_disp_new,
1003 .dma = nv50_dma_new,
1004 .fifo = g84_fifo_new,
1005 .gr = g84_gr_new,
1006 .mpeg = g84_mpeg_new,
1007 .pm = g84_pm_new,
1008 .sw = nv50_sw_new,
1009 .vp = g84_vp_new,
1010};
1011
1012static const struct nvkm_device_chip
1013nv94_chipset = {
1014 .name = "G94",
1015 .bar = g84_bar_new,
1016 .bios = nvkm_bios_new,
1017 .bus = g94_bus_new,
1018 .clk = g84_clk_new,
1019 .devinit = g84_devinit_new,
1020 .fb = g84_fb_new,
1021 .fuse = nv50_fuse_new,
1022 .gpio = g94_gpio_new,
1023 .i2c = g94_i2c_new,
1024 .imem = nv50_instmem_new,
1025 .mc = nv50_mc_new,
1026 .mmu = nv50_mmu_new,
1027 .mxm = nv50_mxm_new,
1028 .pci = nv40_pci_new,
1029 .therm = g84_therm_new,
1030 .timer = nv41_timer_new,
1031 .volt = nv40_volt_new,
1032 .bsp = g84_bsp_new,
1033 .cipher = g84_cipher_new,
1034 .disp = g94_disp_new,
1035 .dma = nv50_dma_new,
1036 .fifo = g84_fifo_new,
1037 .gr = g84_gr_new,
1038 .mpeg = g84_mpeg_new,
1039 .pm = g84_pm_new,
1040 .sw = nv50_sw_new,
1041 .vp = g84_vp_new,
1042};
1043
1044static const struct nvkm_device_chip
1045nv96_chipset = {
1046 .name = "G96",
1047 .bar = g84_bar_new,
1048 .bios = nvkm_bios_new,
1049 .bus = g94_bus_new,
1050 .clk = g84_clk_new,
1051 .devinit = g84_devinit_new,
1052 .fb = g84_fb_new,
1053 .fuse = nv50_fuse_new,
1054 .gpio = g94_gpio_new,
1055 .i2c = g94_i2c_new,
1056 .imem = nv50_instmem_new,
1057 .mc = nv50_mc_new,
1058 .mmu = nv50_mmu_new,
1059 .mxm = nv50_mxm_new,
1060 .pci = nv40_pci_new,
1061 .therm = g84_therm_new,
1062 .timer = nv41_timer_new,
1063 .volt = nv40_volt_new,
1064 .bsp = g84_bsp_new,
1065 .cipher = g84_cipher_new,
1066 .disp = g94_disp_new,
1067 .dma = nv50_dma_new,
1068 .fifo = g84_fifo_new,
1069 .gr = g84_gr_new,
1070 .mpeg = g84_mpeg_new,
1071 .pm = g84_pm_new,
1072 .sw = nv50_sw_new,
1073 .vp = g84_vp_new,
1074};
1075
1076static const struct nvkm_device_chip
1077nv98_chipset = {
1078 .name = "G98",
1079 .bar = g84_bar_new,
1080 .bios = nvkm_bios_new,
1081 .bus = g94_bus_new,
1082 .clk = g84_clk_new,
1083 .devinit = g98_devinit_new,
1084 .fb = g84_fb_new,
1085 .fuse = nv50_fuse_new,
1086 .gpio = g94_gpio_new,
1087 .i2c = g94_i2c_new,
1088 .imem = nv50_instmem_new,
1089 .mc = g98_mc_new,
1090 .mmu = nv50_mmu_new,
1091 .mxm = nv50_mxm_new,
1092 .pci = nv40_pci_new,
1093 .therm = g84_therm_new,
1094 .timer = nv41_timer_new,
1095 .volt = nv40_volt_new,
1096 .disp = g94_disp_new,
1097 .dma = nv50_dma_new,
1098 .fifo = g84_fifo_new,
1099 .gr = g84_gr_new,
1100 .mspdec = g98_mspdec_new,
1101 .msppp = g98_msppp_new,
1102 .msvld = g98_msvld_new,
1103 .pm = g84_pm_new,
1104 .sec = g98_sec_new,
1105 .sw = nv50_sw_new,
1106};
73 1107
74struct nvkm_devobj { 1108static const struct nvkm_device_chip
75 struct nvkm_parent base; 1109nva0_chipset = {
76 struct nvkm_object *subdev[NVDEV_SUBDEV_NR]; 1110 .name = "GT200",
1111 .bar = g84_bar_new,
1112 .bios = nvkm_bios_new,
1113 .bus = g94_bus_new,
1114 .clk = g84_clk_new,
1115 .devinit = g84_devinit_new,
1116 .fb = g84_fb_new,
1117 .fuse = nv50_fuse_new,
1118 .gpio = g94_gpio_new,
1119 .i2c = nv50_i2c_new,
1120 .imem = nv50_instmem_new,
1121 .mc = g98_mc_new,
1122 .mmu = nv50_mmu_new,
1123 .mxm = nv50_mxm_new,
1124 .pci = nv40_pci_new,
1125 .therm = g84_therm_new,
1126 .timer = nv41_timer_new,
1127 .volt = nv40_volt_new,
1128 .bsp = g84_bsp_new,
1129 .cipher = g84_cipher_new,
1130 .disp = gt200_disp_new,
1131 .dma = nv50_dma_new,
1132 .fifo = g84_fifo_new,
1133 .gr = gt200_gr_new,
1134 .mpeg = g84_mpeg_new,
1135 .pm = gt200_pm_new,
1136 .sw = nv50_sw_new,
1137 .vp = g84_vp_new,
1138};
1139
1140static const struct nvkm_device_chip
1141nva3_chipset = {
1142 .name = "GT215",
1143 .bar = g84_bar_new,
1144 .bios = nvkm_bios_new,
1145 .bus = g94_bus_new,
1146 .clk = gt215_clk_new,
1147 .devinit = gt215_devinit_new,
1148 .fb = gt215_fb_new,
1149 .fuse = nv50_fuse_new,
1150 .gpio = g94_gpio_new,
1151 .i2c = g94_i2c_new,
1152 .imem = nv50_instmem_new,
1153 .mc = g98_mc_new,
1154 .mmu = nv50_mmu_new,
1155 .mxm = nv50_mxm_new,
1156 .pci = nv40_pci_new,
1157 .pmu = gt215_pmu_new,
1158 .therm = gt215_therm_new,
1159 .timer = nv41_timer_new,
1160 .volt = nv40_volt_new,
1161 .ce[0] = gt215_ce_new,
1162 .disp = gt215_disp_new,
1163 .dma = nv50_dma_new,
1164 .fifo = g84_fifo_new,
1165 .gr = gt215_gr_new,
1166 .mpeg = g84_mpeg_new,
1167 .mspdec = gt215_mspdec_new,
1168 .msppp = gt215_msppp_new,
1169 .msvld = gt215_msvld_new,
1170 .pm = gt215_pm_new,
1171 .sw = nv50_sw_new,
1172};
1173
1174static const struct nvkm_device_chip
1175nva5_chipset = {
1176 .name = "GT216",
1177 .bar = g84_bar_new,
1178 .bios = nvkm_bios_new,
1179 .bus = g94_bus_new,
1180 .clk = gt215_clk_new,
1181 .devinit = gt215_devinit_new,
1182 .fb = gt215_fb_new,
1183 .fuse = nv50_fuse_new,
1184 .gpio = g94_gpio_new,
1185 .i2c = g94_i2c_new,
1186 .imem = nv50_instmem_new,
1187 .mc = g98_mc_new,
1188 .mmu = nv50_mmu_new,
1189 .mxm = nv50_mxm_new,
1190 .pci = nv40_pci_new,
1191 .pmu = gt215_pmu_new,
1192 .therm = gt215_therm_new,
1193 .timer = nv41_timer_new,
1194 .volt = nv40_volt_new,
1195 .ce[0] = gt215_ce_new,
1196 .disp = gt215_disp_new,
1197 .dma = nv50_dma_new,
1198 .fifo = g84_fifo_new,
1199 .gr = gt215_gr_new,
1200 .mspdec = gt215_mspdec_new,
1201 .msppp = gt215_msppp_new,
1202 .msvld = gt215_msvld_new,
1203 .pm = gt215_pm_new,
1204 .sw = nv50_sw_new,
1205};
1206
1207static const struct nvkm_device_chip
1208nva8_chipset = {
1209 .name = "GT218",
1210 .bar = g84_bar_new,
1211 .bios = nvkm_bios_new,
1212 .bus = g94_bus_new,
1213 .clk = gt215_clk_new,
1214 .devinit = gt215_devinit_new,
1215 .fb = gt215_fb_new,
1216 .fuse = nv50_fuse_new,
1217 .gpio = g94_gpio_new,
1218 .i2c = g94_i2c_new,
1219 .imem = nv50_instmem_new,
1220 .mc = g98_mc_new,
1221 .mmu = nv50_mmu_new,
1222 .mxm = nv50_mxm_new,
1223 .pci = nv40_pci_new,
1224 .pmu = gt215_pmu_new,
1225 .therm = gt215_therm_new,
1226 .timer = nv41_timer_new,
1227 .volt = nv40_volt_new,
1228 .ce[0] = gt215_ce_new,
1229 .disp = gt215_disp_new,
1230 .dma = nv50_dma_new,
1231 .fifo = g84_fifo_new,
1232 .gr = gt215_gr_new,
1233 .mspdec = gt215_mspdec_new,
1234 .msppp = gt215_msppp_new,
1235 .msvld = gt215_msvld_new,
1236 .pm = gt215_pm_new,
1237 .sw = nv50_sw_new,
1238};
1239
1240static const struct nvkm_device_chip
1241nvaa_chipset = {
1242 .name = "MCP77/MCP78",
1243 .bar = g84_bar_new,
1244 .bios = nvkm_bios_new,
1245 .bus = g94_bus_new,
1246 .clk = mcp77_clk_new,
1247 .devinit = g98_devinit_new,
1248 .fb = mcp77_fb_new,
1249 .fuse = nv50_fuse_new,
1250 .gpio = g94_gpio_new,
1251 .i2c = g94_i2c_new,
1252 .imem = nv50_instmem_new,
1253 .mc = g98_mc_new,
1254 .mmu = nv50_mmu_new,
1255 .mxm = nv50_mxm_new,
1256 .pci = nv40_pci_new,
1257 .therm = g84_therm_new,
1258 .timer = nv41_timer_new,
1259 .volt = nv40_volt_new,
1260 .disp = g94_disp_new,
1261 .dma = nv50_dma_new,
1262 .fifo = g84_fifo_new,
1263 .gr = gt200_gr_new,
1264 .mspdec = g98_mspdec_new,
1265 .msppp = g98_msppp_new,
1266 .msvld = g98_msvld_new,
1267 .pm = g84_pm_new,
1268 .sec = g98_sec_new,
1269 .sw = nv50_sw_new,
1270};
1271
1272static const struct nvkm_device_chip
1273nvac_chipset = {
1274 .name = "MCP79/MCP7A",
1275 .bar = g84_bar_new,
1276 .bios = nvkm_bios_new,
1277 .bus = g94_bus_new,
1278 .clk = mcp77_clk_new,
1279 .devinit = g98_devinit_new,
1280 .fb = mcp77_fb_new,
1281 .fuse = nv50_fuse_new,
1282 .gpio = g94_gpio_new,
1283 .i2c = g94_i2c_new,
1284 .imem = nv50_instmem_new,
1285 .mc = g98_mc_new,
1286 .mmu = nv50_mmu_new,
1287 .mxm = nv50_mxm_new,
1288 .pci = nv40_pci_new,
1289 .therm = g84_therm_new,
1290 .timer = nv41_timer_new,
1291 .volt = nv40_volt_new,
1292 .disp = g94_disp_new,
1293 .dma = nv50_dma_new,
1294 .fifo = g84_fifo_new,
1295 .gr = mcp79_gr_new,
1296 .mspdec = g98_mspdec_new,
1297 .msppp = g98_msppp_new,
1298 .msvld = g98_msvld_new,
1299 .pm = g84_pm_new,
1300 .sec = g98_sec_new,
1301 .sw = nv50_sw_new,
1302};
1303
1304static const struct nvkm_device_chip
1305nvaf_chipset = {
1306 .name = "MCP89",
1307 .bar = g84_bar_new,
1308 .bios = nvkm_bios_new,
1309 .bus = g94_bus_new,
1310 .clk = gt215_clk_new,
1311 .devinit = mcp89_devinit_new,
1312 .fb = mcp89_fb_new,
1313 .fuse = nv50_fuse_new,
1314 .gpio = g94_gpio_new,
1315 .i2c = g94_i2c_new,
1316 .imem = nv50_instmem_new,
1317 .mc = g98_mc_new,
1318 .mmu = nv50_mmu_new,
1319 .mxm = nv50_mxm_new,
1320 .pci = nv40_pci_new,
1321 .pmu = gt215_pmu_new,
1322 .therm = gt215_therm_new,
1323 .timer = nv41_timer_new,
1324 .volt = nv40_volt_new,
1325 .ce[0] = gt215_ce_new,
1326 .disp = gt215_disp_new,
1327 .dma = nv50_dma_new,
1328 .fifo = g84_fifo_new,
1329 .gr = mcp89_gr_new,
1330 .mspdec = gt215_mspdec_new,
1331 .msppp = gt215_msppp_new,
1332 .msvld = mcp89_msvld_new,
1333 .pm = gt215_pm_new,
1334 .sw = nv50_sw_new,
1335};
1336
1337static const struct nvkm_device_chip
1338nvc0_chipset = {
1339 .name = "GF100",
1340 .bar = gf100_bar_new,
1341 .bios = nvkm_bios_new,
1342 .bus = gf100_bus_new,
1343 .clk = gf100_clk_new,
1344 .devinit = gf100_devinit_new,
1345 .fb = gf100_fb_new,
1346 .fuse = gf100_fuse_new,
1347 .gpio = g94_gpio_new,
1348 .i2c = g94_i2c_new,
1349 .ibus = gf100_ibus_new,
1350 .imem = nv50_instmem_new,
1351 .ltc = gf100_ltc_new,
1352 .mc = gf100_mc_new,
1353 .mmu = gf100_mmu_new,
1354 .mxm = nv50_mxm_new,
1355 .pci = gf100_pci_new,
1356 .pmu = gf100_pmu_new,
1357 .therm = gt215_therm_new,
1358 .timer = nv41_timer_new,
1359 .volt = nv40_volt_new,
1360 .ce[0] = gf100_ce_new,
1361 .ce[1] = gf100_ce_new,
1362 .disp = gt215_disp_new,
1363 .dma = gf100_dma_new,
1364 .fifo = gf100_fifo_new,
1365 .gr = gf100_gr_new,
1366 .mspdec = gf100_mspdec_new,
1367 .msppp = gf100_msppp_new,
1368 .msvld = gf100_msvld_new,
1369 .pm = gf100_pm_new,
1370 .sw = gf100_sw_new,
1371};
1372
1373static const struct nvkm_device_chip
1374nvc1_chipset = {
1375 .name = "GF108",
1376 .bar = gf100_bar_new,
1377 .bios = nvkm_bios_new,
1378 .bus = gf100_bus_new,
1379 .clk = gf100_clk_new,
1380 .devinit = gf100_devinit_new,
1381 .fb = gf100_fb_new,
1382 .fuse = gf100_fuse_new,
1383 .gpio = g94_gpio_new,
1384 .i2c = g94_i2c_new,
1385 .ibus = gf100_ibus_new,
1386 .imem = nv50_instmem_new,
1387 .ltc = gf100_ltc_new,
1388 .mc = gf100_mc_new,
1389 .mmu = gf100_mmu_new,
1390 .mxm = nv50_mxm_new,
1391 .pci = nv40_pci_new,
1392 .pmu = gf100_pmu_new,
1393 .therm = gt215_therm_new,
1394 .timer = nv41_timer_new,
1395 .volt = nv40_volt_new,
1396 .ce[0] = gf100_ce_new,
1397 .disp = gt215_disp_new,
1398 .dma = gf100_dma_new,
1399 .fifo = gf100_fifo_new,
1400 .gr = gf108_gr_new,
1401 .mspdec = gf100_mspdec_new,
1402 .msppp = gf100_msppp_new,
1403 .msvld = gf100_msvld_new,
1404 .pm = gf108_pm_new,
1405 .sw = gf100_sw_new,
1406};
1407
1408static const struct nvkm_device_chip
1409nvc3_chipset = {
1410 .name = "GF106",
1411 .bar = gf100_bar_new,
1412 .bios = nvkm_bios_new,
1413 .bus = gf100_bus_new,
1414 .clk = gf100_clk_new,
1415 .devinit = gf100_devinit_new,
1416 .fb = gf100_fb_new,
1417 .fuse = gf100_fuse_new,
1418 .gpio = g94_gpio_new,
1419 .i2c = g94_i2c_new,
1420 .ibus = gf100_ibus_new,
1421 .imem = nv50_instmem_new,
1422 .ltc = gf100_ltc_new,
1423 .mc = gf100_mc_new,
1424 .mmu = gf100_mmu_new,
1425 .mxm = nv50_mxm_new,
1426 .pci = nv40_pci_new,
1427 .pmu = gf100_pmu_new,
1428 .therm = gt215_therm_new,
1429 .timer = nv41_timer_new,
1430 .volt = nv40_volt_new,
1431 .ce[0] = gf100_ce_new,
1432 .disp = gt215_disp_new,
1433 .dma = gf100_dma_new,
1434 .fifo = gf100_fifo_new,
1435 .gr = gf104_gr_new,
1436 .mspdec = gf100_mspdec_new,
1437 .msppp = gf100_msppp_new,
1438 .msvld = gf100_msvld_new,
1439 .pm = gf100_pm_new,
1440 .sw = gf100_sw_new,
1441};
1442
1443static const struct nvkm_device_chip
1444nvc4_chipset = {
1445 .name = "GF104",
1446 .bar = gf100_bar_new,
1447 .bios = nvkm_bios_new,
1448 .bus = gf100_bus_new,
1449 .clk = gf100_clk_new,
1450 .devinit = gf100_devinit_new,
1451 .fb = gf100_fb_new,
1452 .fuse = gf100_fuse_new,
1453 .gpio = g94_gpio_new,
1454 .i2c = g94_i2c_new,
1455 .ibus = gf100_ibus_new,
1456 .imem = nv50_instmem_new,
1457 .ltc = gf100_ltc_new,
1458 .mc = gf100_mc_new,
1459 .mmu = gf100_mmu_new,
1460 .mxm = nv50_mxm_new,
1461 .pci = gf100_pci_new,
1462 .pmu = gf100_pmu_new,
1463 .therm = gt215_therm_new,
1464 .timer = nv41_timer_new,
1465 .volt = nv40_volt_new,
1466 .ce[0] = gf100_ce_new,
1467 .ce[1] = gf100_ce_new,
1468 .disp = gt215_disp_new,
1469 .dma = gf100_dma_new,
1470 .fifo = gf100_fifo_new,
1471 .gr = gf104_gr_new,
1472 .mspdec = gf100_mspdec_new,
1473 .msppp = gf100_msppp_new,
1474 .msvld = gf100_msvld_new,
1475 .pm = gf100_pm_new,
1476 .sw = gf100_sw_new,
1477};
1478
1479static const struct nvkm_device_chip
1480nvc8_chipset = {
1481 .name = "GF110",
1482 .bar = gf100_bar_new,
1483 .bios = nvkm_bios_new,
1484 .bus = gf100_bus_new,
1485 .clk = gf100_clk_new,
1486 .devinit = gf100_devinit_new,
1487 .fb = gf100_fb_new,
1488 .fuse = gf100_fuse_new,
1489 .gpio = g94_gpio_new,
1490 .i2c = g94_i2c_new,
1491 .ibus = gf100_ibus_new,
1492 .imem = nv50_instmem_new,
1493 .ltc = gf100_ltc_new,
1494 .mc = gf100_mc_new,
1495 .mmu = gf100_mmu_new,
1496 .mxm = nv50_mxm_new,
1497 .pci = gf100_pci_new,
1498 .pmu = gf100_pmu_new,
1499 .therm = gt215_therm_new,
1500 .timer = nv41_timer_new,
1501 .volt = nv40_volt_new,
1502 .ce[0] = gf100_ce_new,
1503 .ce[1] = gf100_ce_new,
1504 .disp = gt215_disp_new,
1505 .dma = gf100_dma_new,
1506 .fifo = gf100_fifo_new,
1507 .gr = gf110_gr_new,
1508 .mspdec = gf100_mspdec_new,
1509 .msppp = gf100_msppp_new,
1510 .msvld = gf100_msvld_new,
1511 .pm = gf100_pm_new,
1512 .sw = gf100_sw_new,
1513};
1514
1515static const struct nvkm_device_chip
1516nvce_chipset = {
1517 .name = "GF114",
1518 .bar = gf100_bar_new,
1519 .bios = nvkm_bios_new,
1520 .bus = gf100_bus_new,
1521 .clk = gf100_clk_new,
1522 .devinit = gf100_devinit_new,
1523 .fb = gf100_fb_new,
1524 .fuse = gf100_fuse_new,
1525 .gpio = g94_gpio_new,
1526 .i2c = g94_i2c_new,
1527 .ibus = gf100_ibus_new,
1528 .imem = nv50_instmem_new,
1529 .ltc = gf100_ltc_new,
1530 .mc = gf100_mc_new,
1531 .mmu = gf100_mmu_new,
1532 .mxm = nv50_mxm_new,
1533 .pci = gf100_pci_new,
1534 .pmu = gf100_pmu_new,
1535 .therm = gt215_therm_new,
1536 .timer = nv41_timer_new,
1537 .volt = nv40_volt_new,
1538 .ce[0] = gf100_ce_new,
1539 .ce[1] = gf100_ce_new,
1540 .disp = gt215_disp_new,
1541 .dma = gf100_dma_new,
1542 .fifo = gf100_fifo_new,
1543 .gr = gf104_gr_new,
1544 .mspdec = gf100_mspdec_new,
1545 .msppp = gf100_msppp_new,
1546 .msvld = gf100_msvld_new,
1547 .pm = gf100_pm_new,
1548 .sw = gf100_sw_new,
1549};
1550
1551static const struct nvkm_device_chip
1552nvcf_chipset = {
1553 .name = "GF116",
1554 .bar = gf100_bar_new,
1555 .bios = nvkm_bios_new,
1556 .bus = gf100_bus_new,
1557 .clk = gf100_clk_new,
1558 .devinit = gf100_devinit_new,
1559 .fb = gf100_fb_new,
1560 .fuse = gf100_fuse_new,
1561 .gpio = g94_gpio_new,
1562 .i2c = g94_i2c_new,
1563 .ibus = gf100_ibus_new,
1564 .imem = nv50_instmem_new,
1565 .ltc = gf100_ltc_new,
1566 .mc = gf100_mc_new,
1567 .mmu = gf100_mmu_new,
1568 .mxm = nv50_mxm_new,
1569 .pci = nv40_pci_new,
1570 .pmu = gf100_pmu_new,
1571 .therm = gt215_therm_new,
1572 .timer = nv41_timer_new,
1573 .volt = nv40_volt_new,
1574 .ce[0] = gf100_ce_new,
1575 .disp = gt215_disp_new,
1576 .dma = gf100_dma_new,
1577 .fifo = gf100_fifo_new,
1578 .gr = gf104_gr_new,
1579 .mspdec = gf100_mspdec_new,
1580 .msppp = gf100_msppp_new,
1581 .msvld = gf100_msvld_new,
1582 .pm = gf100_pm_new,
1583 .sw = gf100_sw_new,
1584};
1585
1586static const struct nvkm_device_chip
1587nvd7_chipset = {
1588 .name = "GF117",
1589 .bar = gf100_bar_new,
1590 .bios = nvkm_bios_new,
1591 .bus = gf100_bus_new,
1592 .clk = gf100_clk_new,
1593 .devinit = gf100_devinit_new,
1594 .fb = gf100_fb_new,
1595 .fuse = gf100_fuse_new,
1596 .gpio = gf119_gpio_new,
1597 .i2c = gf117_i2c_new,
1598 .ibus = gf100_ibus_new,
1599 .imem = nv50_instmem_new,
1600 .ltc = gf100_ltc_new,
1601 .mc = gf100_mc_new,
1602 .mmu = gf100_mmu_new,
1603 .mxm = nv50_mxm_new,
1604 .pci = nv40_pci_new,
1605 .therm = gf119_therm_new,
1606 .timer = nv41_timer_new,
1607 .ce[0] = gf100_ce_new,
1608 .disp = gf119_disp_new,
1609 .dma = gf119_dma_new,
1610 .fifo = gf100_fifo_new,
1611 .gr = gf117_gr_new,
1612 .mspdec = gf100_mspdec_new,
1613 .msppp = gf100_msppp_new,
1614 .msvld = gf100_msvld_new,
1615 .pm = gf117_pm_new,
1616 .sw = gf100_sw_new,
1617};
1618
1619static const struct nvkm_device_chip
1620nvd9_chipset = {
1621 .name = "GF119",
1622 .bar = gf100_bar_new,
1623 .bios = nvkm_bios_new,
1624 .bus = gf100_bus_new,
1625 .clk = gf100_clk_new,
1626 .devinit = gf100_devinit_new,
1627 .fb = gf100_fb_new,
1628 .fuse = gf100_fuse_new,
1629 .gpio = gf119_gpio_new,
1630 .i2c = gf119_i2c_new,
1631 .ibus = gf100_ibus_new,
1632 .imem = nv50_instmem_new,
1633 .ltc = gf100_ltc_new,
1634 .mc = gf100_mc_new,
1635 .mmu = gf100_mmu_new,
1636 .mxm = nv50_mxm_new,
1637 .pci = nv40_pci_new,
1638 .pmu = gf119_pmu_new,
1639 .therm = gf119_therm_new,
1640 .timer = nv41_timer_new,
1641 .volt = nv40_volt_new,
1642 .ce[0] = gf100_ce_new,
1643 .disp = gf119_disp_new,
1644 .dma = gf119_dma_new,
1645 .fifo = gf100_fifo_new,
1646 .gr = gf119_gr_new,
1647 .mspdec = gf100_mspdec_new,
1648 .msppp = gf100_msppp_new,
1649 .msvld = gf100_msvld_new,
1650 .pm = gf117_pm_new,
1651 .sw = gf100_sw_new,
1652};
1653
1654static const struct nvkm_device_chip
1655nve4_chipset = {
1656 .name = "GK104",
1657 .bar = gf100_bar_new,
1658 .bios = nvkm_bios_new,
1659 .bus = gf100_bus_new,
1660 .clk = gk104_clk_new,
1661 .devinit = gf100_devinit_new,
1662 .fb = gk104_fb_new,
1663 .fuse = gf100_fuse_new,
1664 .gpio = gk104_gpio_new,
1665 .i2c = gk104_i2c_new,
1666 .ibus = gk104_ibus_new,
1667 .imem = nv50_instmem_new,
1668 .ltc = gk104_ltc_new,
1669 .mc = gf100_mc_new,
1670 .mmu = gf100_mmu_new,
1671 .mxm = nv50_mxm_new,
1672 .pci = nv40_pci_new,
1673 .pmu = gk104_pmu_new,
1674 .therm = gf119_therm_new,
1675 .timer = nv41_timer_new,
1676 .volt = nv40_volt_new,
1677 .ce[0] = gk104_ce_new,
1678 .ce[1] = gk104_ce_new,
1679 .ce[2] = gk104_ce_new,
1680 .disp = gk104_disp_new,
1681 .dma = gf119_dma_new,
1682 .fifo = gk104_fifo_new,
1683 .gr = gk104_gr_new,
1684 .mspdec = gk104_mspdec_new,
1685 .msppp = gf100_msppp_new,
1686 .msvld = gk104_msvld_new,
1687 .pm = gk104_pm_new,
1688 .sw = gf100_sw_new,
1689};
1690
1691static const struct nvkm_device_chip
1692nve6_chipset = {
1693 .name = "GK106",
1694 .bar = gf100_bar_new,
1695 .bios = nvkm_bios_new,
1696 .bus = gf100_bus_new,
1697 .clk = gk104_clk_new,
1698 .devinit = gf100_devinit_new,
1699 .fb = gk104_fb_new,
1700 .fuse = gf100_fuse_new,
1701 .gpio = gk104_gpio_new,
1702 .i2c = gk104_i2c_new,
1703 .ibus = gk104_ibus_new,
1704 .imem = nv50_instmem_new,
1705 .ltc = gk104_ltc_new,
1706 .mc = gf100_mc_new,
1707 .mmu = gf100_mmu_new,
1708 .mxm = nv50_mxm_new,
1709 .pci = nv40_pci_new,
1710 .pmu = gk104_pmu_new,
1711 .therm = gf119_therm_new,
1712 .timer = nv41_timer_new,
1713 .volt = nv40_volt_new,
1714 .ce[0] = gk104_ce_new,
1715 .ce[1] = gk104_ce_new,
1716 .ce[2] = gk104_ce_new,
1717 .disp = gk104_disp_new,
1718 .dma = gf119_dma_new,
1719 .fifo = gk104_fifo_new,
1720 .gr = gk104_gr_new,
1721 .mspdec = gk104_mspdec_new,
1722 .msppp = gf100_msppp_new,
1723 .msvld = gk104_msvld_new,
1724 .pm = gk104_pm_new,
1725 .sw = gf100_sw_new,
1726};
1727
1728static const struct nvkm_device_chip
1729nve7_chipset = {
1730 .name = "GK107",
1731 .bar = gf100_bar_new,
1732 .bios = nvkm_bios_new,
1733 .bus = gf100_bus_new,
1734 .clk = gk104_clk_new,
1735 .devinit = gf100_devinit_new,
1736 .fb = gk104_fb_new,
1737 .fuse = gf100_fuse_new,
1738 .gpio = gk104_gpio_new,
1739 .i2c = gk104_i2c_new,
1740 .ibus = gk104_ibus_new,
1741 .imem = nv50_instmem_new,
1742 .ltc = gk104_ltc_new,
1743 .mc = gf100_mc_new,
1744 .mmu = gf100_mmu_new,
1745 .mxm = nv50_mxm_new,
1746 .pci = nv40_pci_new,
1747 .pmu = gf119_pmu_new,
1748 .therm = gf119_therm_new,
1749 .timer = nv41_timer_new,
1750 .volt = nv40_volt_new,
1751 .ce[0] = gk104_ce_new,
1752 .ce[1] = gk104_ce_new,
1753 .ce[2] = gk104_ce_new,
1754 .disp = gk104_disp_new,
1755 .dma = gf119_dma_new,
1756 .fifo = gk104_fifo_new,
1757 .gr = gk104_gr_new,
1758 .mspdec = gk104_mspdec_new,
1759 .msppp = gf100_msppp_new,
1760 .msvld = gk104_msvld_new,
1761 .pm = gk104_pm_new,
1762 .sw = gf100_sw_new,
1763};
1764
1765static const struct nvkm_device_chip
1766nvea_chipset = {
1767 .name = "GK20A",
1768 .bar = gk20a_bar_new,
1769 .bus = gf100_bus_new,
1770 .clk = gk20a_clk_new,
1771 .fb = gk20a_fb_new,
1772 .fuse = gf100_fuse_new,
1773 .ibus = gk20a_ibus_new,
1774 .imem = gk20a_instmem_new,
1775 .ltc = gk104_ltc_new,
1776 .mc = gk20a_mc_new,
1777 .mmu = gf100_mmu_new,
1778 .pmu = gk20a_pmu_new,
1779 .timer = gk20a_timer_new,
1780 .volt = gk20a_volt_new,
1781 .ce[2] = gk104_ce_new,
1782 .dma = gf119_dma_new,
1783 .fifo = gk20a_fifo_new,
1784 .gr = gk20a_gr_new,
1785 .pm = gk104_pm_new,
1786 .sw = gf100_sw_new,
1787};
1788
1789static const struct nvkm_device_chip
1790nvf0_chipset = {
1791 .name = "GK110",
1792 .bar = gf100_bar_new,
1793 .bios = nvkm_bios_new,
1794 .bus = gf100_bus_new,
1795 .clk = gk104_clk_new,
1796 .devinit = gf100_devinit_new,
1797 .fb = gk104_fb_new,
1798 .fuse = gf100_fuse_new,
1799 .gpio = gk104_gpio_new,
1800 .i2c = gk104_i2c_new,
1801 .ibus = gk104_ibus_new,
1802 .imem = nv50_instmem_new,
1803 .ltc = gk104_ltc_new,
1804 .mc = gf100_mc_new,
1805 .mmu = gf100_mmu_new,
1806 .mxm = nv50_mxm_new,
1807 .pci = nv40_pci_new,
1808 .pmu = gk110_pmu_new,
1809 .therm = gf119_therm_new,
1810 .timer = nv41_timer_new,
1811 .volt = nv40_volt_new,
1812 .ce[0] = gk104_ce_new,
1813 .ce[1] = gk104_ce_new,
1814 .ce[2] = gk104_ce_new,
1815 .disp = gk110_disp_new,
1816 .dma = gf119_dma_new,
1817 .fifo = gk104_fifo_new,
1818 .gr = gk110_gr_new,
1819 .mspdec = gk104_mspdec_new,
1820 .msppp = gf100_msppp_new,
1821 .msvld = gk104_msvld_new,
1822 .sw = gf100_sw_new,
1823};
1824
1825static const struct nvkm_device_chip
1826nvf1_chipset = {
1827 .name = "GK110B",
1828 .bar = gf100_bar_new,
1829 .bios = nvkm_bios_new,
1830 .bus = gf100_bus_new,
1831 .clk = gk104_clk_new,
1832 .devinit = gf100_devinit_new,
1833 .fb = gk104_fb_new,
1834 .fuse = gf100_fuse_new,
1835 .gpio = gk104_gpio_new,
1836 .i2c = gf119_i2c_new,
1837 .ibus = gk104_ibus_new,
1838 .imem = nv50_instmem_new,
1839 .ltc = gk104_ltc_new,
1840 .mc = gf100_mc_new,
1841 .mmu = gf100_mmu_new,
1842 .mxm = nv50_mxm_new,
1843 .pci = nv40_pci_new,
1844 .pmu = gk110_pmu_new,
1845 .therm = gf119_therm_new,
1846 .timer = nv41_timer_new,
1847 .volt = nv40_volt_new,
1848 .ce[0] = gk104_ce_new,
1849 .ce[1] = gk104_ce_new,
1850 .ce[2] = gk104_ce_new,
1851 .disp = gk110_disp_new,
1852 .dma = gf119_dma_new,
1853 .fifo = gk104_fifo_new,
1854 .gr = gk110b_gr_new,
1855 .mspdec = gk104_mspdec_new,
1856 .msppp = gf100_msppp_new,
1857 .msvld = gk104_msvld_new,
1858 .sw = gf100_sw_new,
1859};
1860
1861static const struct nvkm_device_chip
1862nv106_chipset = {
1863 .name = "GK208B",
1864 .bar = gf100_bar_new,
1865 .bios = nvkm_bios_new,
1866 .bus = gf100_bus_new,
1867 .clk = gk104_clk_new,
1868 .devinit = gf100_devinit_new,
1869 .fb = gk104_fb_new,
1870 .fuse = gf100_fuse_new,
1871 .gpio = gk104_gpio_new,
1872 .i2c = gk104_i2c_new,
1873 .ibus = gk104_ibus_new,
1874 .imem = nv50_instmem_new,
1875 .ltc = gk104_ltc_new,
1876 .mc = gk20a_mc_new,
1877 .mmu = gf100_mmu_new,
1878 .mxm = nv50_mxm_new,
1879 .pci = nv40_pci_new,
1880 .pmu = gk208_pmu_new,
1881 .therm = gf119_therm_new,
1882 .timer = nv41_timer_new,
1883 .volt = nv40_volt_new,
1884 .ce[0] = gk104_ce_new,
1885 .ce[1] = gk104_ce_new,
1886 .ce[2] = gk104_ce_new,
1887 .disp = gk110_disp_new,
1888 .dma = gf119_dma_new,
1889 .fifo = gk208_fifo_new,
1890 .gr = gk208_gr_new,
1891 .mspdec = gk104_mspdec_new,
1892 .msppp = gf100_msppp_new,
1893 .msvld = gk104_msvld_new,
1894 .sw = gf100_sw_new,
1895};
1896
1897static const struct nvkm_device_chip
1898nv108_chipset = {
1899 .name = "GK208",
1900 .bar = gf100_bar_new,
1901 .bios = nvkm_bios_new,
1902 .bus = gf100_bus_new,
1903 .clk = gk104_clk_new,
1904 .devinit = gf100_devinit_new,
1905 .fb = gk104_fb_new,
1906 .fuse = gf100_fuse_new,
1907 .gpio = gk104_gpio_new,
1908 .i2c = gk104_i2c_new,
1909 .ibus = gk104_ibus_new,
1910 .imem = nv50_instmem_new,
1911 .ltc = gk104_ltc_new,
1912 .mc = gk20a_mc_new,
1913 .mmu = gf100_mmu_new,
1914 .mxm = nv50_mxm_new,
1915 .pci = nv40_pci_new,
1916 .pmu = gk208_pmu_new,
1917 .therm = gf119_therm_new,
1918 .timer = nv41_timer_new,
1919 .volt = nv40_volt_new,
1920 .ce[0] = gk104_ce_new,
1921 .ce[1] = gk104_ce_new,
1922 .ce[2] = gk104_ce_new,
1923 .disp = gk110_disp_new,
1924 .dma = gf119_dma_new,
1925 .fifo = gk208_fifo_new,
1926 .gr = gk208_gr_new,
1927 .mspdec = gk104_mspdec_new,
1928 .msppp = gf100_msppp_new,
1929 .msvld = gk104_msvld_new,
1930 .sw = gf100_sw_new,
1931};
1932
1933static const struct nvkm_device_chip
1934nv117_chipset = {
1935 .name = "GM107",
1936 .bar = gf100_bar_new,
1937 .bios = nvkm_bios_new,
1938 .bus = gf100_bus_new,
1939 .clk = gk104_clk_new,
1940 .devinit = gm107_devinit_new,
1941 .fb = gm107_fb_new,
1942 .fuse = gm107_fuse_new,
1943 .gpio = gk104_gpio_new,
1944 .i2c = gf119_i2c_new,
1945 .ibus = gk104_ibus_new,
1946 .imem = nv50_instmem_new,
1947 .ltc = gm107_ltc_new,
1948 .mc = gk20a_mc_new,
1949 .mmu = gf100_mmu_new,
1950 .mxm = nv50_mxm_new,
1951 .pci = nv40_pci_new,
1952 .pmu = gm107_pmu_new,
1953 .therm = gm107_therm_new,
1954 .timer = gk20a_timer_new,
1955 .ce[0] = gk104_ce_new,
1956 .ce[2] = gk104_ce_new,
1957 .disp = gm107_disp_new,
1958 .dma = gf119_dma_new,
1959 .fifo = gk208_fifo_new,
1960 .gr = gm107_gr_new,
1961 .sw = gf100_sw_new,
1962};
1963
1964static const struct nvkm_device_chip
1965nv124_chipset = {
1966 .name = "GM204",
1967 .bar = gf100_bar_new,
1968 .bios = nvkm_bios_new,
1969 .bus = gf100_bus_new,
1970 .devinit = gm204_devinit_new,
1971 .fb = gm107_fb_new,
1972 .fuse = gm107_fuse_new,
1973 .gpio = gk104_gpio_new,
1974 .i2c = gm204_i2c_new,
1975 .ibus = gk104_ibus_new,
1976 .imem = nv50_instmem_new,
1977 .ltc = gm107_ltc_new,
1978 .mc = gk20a_mc_new,
1979 .mmu = gf100_mmu_new,
1980 .mxm = nv50_mxm_new,
1981 .pci = nv40_pci_new,
1982 .pmu = gm107_pmu_new,
1983 .timer = gk20a_timer_new,
1984 .ce[0] = gm204_ce_new,
1985 .ce[1] = gm204_ce_new,
1986 .ce[2] = gm204_ce_new,
1987 .disp = gm204_disp_new,
1988 .dma = gf119_dma_new,
1989 .fifo = gm204_fifo_new,
1990 .gr = gm204_gr_new,
1991 .sw = gf100_sw_new,
1992};
1993
1994static const struct nvkm_device_chip
1995nv126_chipset = {
1996 .name = "GM206",
1997 .bar = gf100_bar_new,
1998 .bios = nvkm_bios_new,
1999 .bus = gf100_bus_new,
2000 .devinit = gm204_devinit_new,
2001 .fb = gm107_fb_new,
2002 .fuse = gm107_fuse_new,
2003 .gpio = gk104_gpio_new,
2004 .i2c = gm204_i2c_new,
2005 .ibus = gk104_ibus_new,
2006 .imem = nv50_instmem_new,
2007 .ltc = gm107_ltc_new,
2008 .mc = gk20a_mc_new,
2009 .mmu = gf100_mmu_new,
2010 .mxm = nv50_mxm_new,
2011 .pci = nv40_pci_new,
2012 .pmu = gm107_pmu_new,
2013 .timer = gk20a_timer_new,
2014 .ce[0] = gm204_ce_new,
2015 .ce[1] = gm204_ce_new,
2016 .ce[2] = gm204_ce_new,
2017 .disp = gm204_disp_new,
2018 .dma = gf119_dma_new,
2019 .fifo = gm204_fifo_new,
2020 .gr = gm206_gr_new,
2021 .sw = gf100_sw_new,
2022};
2023
2024static const struct nvkm_device_chip
2025nv12b_chipset = {
2026 .name = "GM20B",
2027 .bar = gk20a_bar_new,
2028 .bus = gf100_bus_new,
2029 .fb = gk20a_fb_new,
2030 .fuse = gm107_fuse_new,
2031 .ibus = gk20a_ibus_new,
2032 .imem = gk20a_instmem_new,
2033 .ltc = gm107_ltc_new,
2034 .mc = gk20a_mc_new,
2035 .mmu = gf100_mmu_new,
2036 .timer = gk20a_timer_new,
2037 .ce[2] = gm204_ce_new,
2038 .dma = gf119_dma_new,
2039 .fifo = gm20b_fifo_new,
2040 .gr = gm20b_gr_new,
2041 .sw = gf100_sw_new,
77}; 2042};
78 2043
79static int 2044static int
80nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size) 2045nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
2046 struct nvkm_notify *notify)
81{ 2047{
82 struct nvkm_device *device = nv_device(object); 2048 if (!WARN_ON(size != 0)) {
83 struct nvkm_fb *pfb = nvkm_fb(device); 2049 notify->size = 0;
84 struct nvkm_instmem *imem = nvkm_instmem(device); 2050 notify->types = 1;
85 union { 2051 notify->index = 0;
86 struct nv_device_info_v0 v0; 2052 return 0;
87 } *args = data; 2053 }
88 int ret; 2054 return -EINVAL;
89 2055}
90 nv_ioctl(object, "device info size %d\n", size);
91 if (nvif_unpack(args->v0, 0, 0, false)) {
92 nv_ioctl(object, "device info vers %d\n", args->v0.version);
93 } else
94 return ret;
95 2056
96 switch (device->chipset) { 2057static const struct nvkm_event_func
97 case 0x01a: 2058nvkm_device_event_func = {
98 case 0x01f: 2059 .ctor = nvkm_device_event_ctor,
99 case 0x04c: 2060};
100 case 0x04e: 2061
101 case 0x063: 2062struct nvkm_subdev *
102 case 0x067: 2063nvkm_device_subdev(struct nvkm_device *device, int index)
103 case 0x068: 2064{
104 case 0x0aa: 2065 struct nvkm_engine *engine;
105 case 0x0ac: 2066
106 case 0x0af: 2067 if (device->disable_mask & (1ULL << index))
107 args->v0.platform = NV_DEVICE_INFO_V0_IGP; 2068 return NULL;
108 break; 2069
2070 switch (index) {
2071#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
2072 _(BAR , device->bar , &device->bar->subdev);
2073 _(VBIOS , device->bios , &device->bios->subdev);
2074 _(BUS , device->bus , &device->bus->subdev);
2075 _(CLK , device->clk , &device->clk->subdev);
2076 _(DEVINIT, device->devinit, &device->devinit->subdev);
2077 _(FB , device->fb , &device->fb->subdev);
2078 _(FUSE , device->fuse , &device->fuse->subdev);
2079 _(GPIO , device->gpio , &device->gpio->subdev);
2080 _(I2C , device->i2c , &device->i2c->subdev);
2081 _(IBUS , device->ibus , device->ibus);
2082 _(INSTMEM, device->imem , &device->imem->subdev);
2083 _(LTC , device->ltc , &device->ltc->subdev);
2084 _(MC , device->mc , &device->mc->subdev);
2085 _(MMU , device->mmu , &device->mmu->subdev);
2086 _(MXM , device->mxm , device->mxm);
2087 _(PCI , device->pci , &device->pci->subdev);
2088 _(PMU , device->pmu , &device->pmu->subdev);
2089 _(THERM , device->therm , &device->therm->subdev);
2090 _(TIMER , device->timer , &device->timer->subdev);
2091 _(VOLT , device->volt , &device->volt->subdev);
2092#undef _
109 default: 2093 default:
110 if (device->pdev) { 2094 engine = nvkm_device_engine(device, index);
111 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP)) 2095 if (engine)
112 args->v0.platform = NV_DEVICE_INFO_V0_AGP; 2096 return &engine->subdev;
113 else
114 if (pci_is_pcie(device->pdev))
115 args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
116 else
117 args->v0.platform = NV_DEVICE_INFO_V0_PCI;
118 } else {
119 args->v0.platform = NV_DEVICE_INFO_V0_SOC;
120 }
121 break; 2097 break;
122 } 2098 }
2099 return NULL;
2100}
123 2101
124 switch (device->card_type) { 2102struct nvkm_engine *
125 case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break; 2103nvkm_device_engine(struct nvkm_device *device, int index)
126 case NV_10: 2104{
127 case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break; 2105 if (device->disable_mask & (1ULL << index))
128 case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break; 2106 return NULL;
129 case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break; 2107
130 case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break; 2108 switch (index) {
131 case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break; 2109#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
132 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break; 2110 _(BSP , device->bsp , device->bsp);
133 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break; 2111 _(CE0 , device->ce[0] , device->ce[0]);
134 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break; 2112 _(CE1 , device->ce[1] , device->ce[1]);
2113 _(CE2 , device->ce[2] , device->ce[2]);
2114 _(CIPHER , device->cipher , device->cipher);
2115 _(DISP , device->disp , &device->disp->engine);
2116 _(DMAOBJ , device->dma , &device->dma->engine);
2117 _(FIFO , device->fifo , &device->fifo->engine);
2118 _(GR , device->gr , &device->gr->engine);
2119 _(IFB , device->ifb , device->ifb);
2120 _(ME , device->me , device->me);
2121 _(MPEG , device->mpeg , device->mpeg);
2122 _(MSENC , device->msenc , device->msenc);
2123 _(MSPDEC , device->mspdec , device->mspdec);
2124 _(MSPPP , device->msppp , device->msppp);
2125 _(MSVLD , device->msvld , device->msvld);
2126 _(PM , device->pm , &device->pm->engine);
2127 _(SEC , device->sec , device->sec);
2128 _(SW , device->sw , &device->sw->engine);
2129 _(VIC , device->vic , device->vic);
2130 _(VP , device->vp , device->vp);
2131#undef _
135 default: 2132 default:
136 args->v0.family = 0; 2133 WARN_ON(1);
137 break; 2134 break;
138 } 2135 }
2136 return NULL;
2137}
2138
2139int
2140nvkm_device_fini(struct nvkm_device *device, bool suspend)
2141{
2142 const char *action = suspend ? "suspend" : "fini";
2143 struct nvkm_subdev *subdev;
2144 int ret, i;
2145 s64 time;
2146
2147 nvdev_trace(device, "%s running...\n", action);
2148 time = ktime_to_us(ktime_get());
2149
2150 nvkm_acpi_fini(device);
2151
2152 for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
2153 if ((subdev = nvkm_device_subdev(device, i))) {
2154 ret = nvkm_subdev_fini(subdev, suspend);
2155 if (ret && suspend)
2156 goto fail;
2157 }
2158 }
139 2159
140 args->v0.chipset = device->chipset;
141 args->v0.revision = device->chiprev;
142 if (pfb && pfb->ram)
143 args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
144 else
145 args->v0.ram_size = args->v0.ram_user = 0;
146 if (imem && args->v0.ram_size > 0)
147 args->v0.ram_user = args->v0.ram_user - imem->reserved;
148 2160
2161 if (device->func->fini)
2162 device->func->fini(device, suspend);
2163
2164 time = ktime_to_us(ktime_get()) - time;
2165 nvdev_trace(device, "%s completed in %lldus...\n", action, time);
149 return 0; 2166 return 0;
2167
2168fail:
2169 do {
2170 if ((subdev = nvkm_device_subdev(device, i))) {
2171 int rret = nvkm_subdev_init(subdev);
2172 if (rret)
2173 nvkm_fatal(subdev, "failed restart, %d\n", ret);
2174 }
2175 } while (++i < NVKM_SUBDEV_NR);
2176
2177 nvdev_trace(device, "%s failed with %d\n", action, ret);
2178 return ret;
150} 2179}
151 2180
152static int 2181static int
153nvkm_devobj_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 2182nvkm_device_preinit(struct nvkm_device *device)
154{ 2183{
155 switch (mthd) { 2184 struct nvkm_subdev *subdev;
156 case NV_DEVICE_V0_INFO: 2185 int ret, i;
157 return nvkm_devobj_info(object, data, size); 2186 s64 time;
158 default: 2187
159 break; 2188 nvdev_trace(device, "preinit running...\n");
2189 time = ktime_to_us(ktime_get());
2190
2191 if (device->func->preinit) {
2192 ret = device->func->preinit(device);
2193 if (ret)
2194 goto fail;
160 } 2195 }
161 return -EINVAL;
162}
163 2196
164static u8 2197 for (i = 0; i < NVKM_SUBDEV_NR; i++) {
165nvkm_devobj_rd08(struct nvkm_object *object, u64 addr) 2198 if ((subdev = nvkm_device_subdev(device, i))) {
166{ 2199 ret = nvkm_subdev_preinit(subdev);
167 return nv_rd08(object->engine, addr); 2200 if (ret)
168} 2201 goto fail;
2202 }
2203 }
169 2204
170static u16 2205 ret = nvkm_devinit_post(device->devinit, &device->disable_mask);
171nvkm_devobj_rd16(struct nvkm_object *object, u64 addr) 2206 if (ret)
172{ 2207 goto fail;
173 return nv_rd16(object->engine, addr);
174}
175 2208
176static u32 2209 time = ktime_to_us(ktime_get()) - time;
177nvkm_devobj_rd32(struct nvkm_object *object, u64 addr) 2210 nvdev_trace(device, "preinit completed in %lldus\n", time);
178{ 2211 return 0;
179 return nv_rd32(object->engine, addr);
180}
181 2212
182static void 2213fail:
183nvkm_devobj_wr08(struct nvkm_object *object, u64 addr, u8 data) 2214 nvdev_error(device, "preinit failed with %d\n", ret);
184{ 2215 return ret;
185 nv_wr08(object->engine, addr, data);
186} 2216}
187 2217
188static void 2218int
189nvkm_devobj_wr16(struct nvkm_object *object, u64 addr, u16 data) 2219nvkm_device_init(struct nvkm_device *device)
190{ 2220{
191 nv_wr16(object->engine, addr, data); 2221 struct nvkm_subdev *subdev;
192} 2222 int ret, i;
2223 s64 time;
193 2224
194static void 2225 ret = nvkm_device_preinit(device);
195nvkm_devobj_wr32(struct nvkm_object *object, u64 addr, u32 data) 2226 if (ret)
196{ 2227 return ret;
197 nv_wr32(object->engine, addr, data);
198}
199 2228
200static int 2229 nvkm_device_fini(device, false);
201nvkm_devobj_map(struct nvkm_object *object, u64 *addr, u32 *size) 2230
202{ 2231 nvdev_trace(device, "init running...\n");
203 struct nvkm_device *device = nv_device(object); 2232 time = ktime_to_us(ktime_get());
204 *addr = nv_device_resource_start(device, 0); 2233
205 *size = nv_device_resource_len(device, 0); 2234 if (device->func->init) {
2235 ret = device->func->init(device);
2236 if (ret)
2237 goto fail;
2238 }
2239
2240 for (i = 0; i < NVKM_SUBDEV_NR; i++) {
2241 if ((subdev = nvkm_device_subdev(device, i))) {
2242 ret = nvkm_subdev_init(subdev);
2243 if (ret)
2244 goto fail_subdev;
2245 }
2246 }
2247
2248 nvkm_acpi_init(device);
2249
2250 time = ktime_to_us(ktime_get()) - time;
2251 nvdev_trace(device, "init completed in %lldus\n", time);
206 return 0; 2252 return 0;
2253
2254fail_subdev:
2255 do {
2256 if ((subdev = nvkm_device_subdev(device, i)))
2257 nvkm_subdev_fini(subdev, false);
2258 } while (--i >= 0);
2259
2260fail:
2261 nvdev_error(device, "init failed with %d\n", ret);
2262 return ret;
207} 2263}
208 2264
209static const u64 disable_map[] = { 2265void
210 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS, 2266nvkm_device_del(struct nvkm_device **pdevice)
211 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
212 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
213 [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
214 [NVDEV_SUBDEV_CLK ] = NV_DEVICE_V0_DISABLE_CORE,
215 [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
216 [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
217 [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
218 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE,
219 [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE,
220 [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
221 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
222 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
223 [NVDEV_SUBDEV_MMU] = NV_DEVICE_V0_DISABLE_CORE,
224 [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
225 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
226 [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
227 [NVDEV_SUBDEV_PMU] = NV_DEVICE_V0_DISABLE_CORE,
228 [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
229 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
230 [NVDEV_ENGINE_PM ] = NV_DEVICE_V0_DISABLE_CORE,
231 [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
232 [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
233 [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GR,
234 [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
235 [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
236 [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
237 [NVDEV_ENGINE_CIPHER] = NV_DEVICE_V0_DISABLE_CIPHER,
238 [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
239 [NVDEV_ENGINE_MSPPP] = NV_DEVICE_V0_DISABLE_MSPPP,
240 [NVDEV_ENGINE_CE0] = NV_DEVICE_V0_DISABLE_CE0,
241 [NVDEV_ENGINE_CE1] = NV_DEVICE_V0_DISABLE_CE1,
242 [NVDEV_ENGINE_CE2] = NV_DEVICE_V0_DISABLE_CE2,
243 [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
244 [NVDEV_ENGINE_MSENC] = NV_DEVICE_V0_DISABLE_MSENC,
245 [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
246 [NVDEV_ENGINE_MSVLD] = NV_DEVICE_V0_DISABLE_MSVLD,
247 [NVDEV_ENGINE_SEC] = NV_DEVICE_V0_DISABLE_SEC,
248 [NVDEV_SUBDEV_NR] = 0,
249};
250
251static void
252nvkm_devobj_dtor(struct nvkm_object *object)
253{ 2267{
254 struct nvkm_devobj *devobj = (void *)object; 2268 struct nvkm_device *device = *pdevice;
255 int i; 2269 int i;
2270 if (device) {
2271 mutex_lock(&nv_devices_mutex);
2272 device->disable_mask = 0;
2273 for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
2274 struct nvkm_subdev *subdev =
2275 nvkm_device_subdev(device, i);
2276 nvkm_subdev_del(&subdev);
2277 }
256 2278
257 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) 2279 nvkm_event_fini(&device->event);
258 nvkm_object_ref(NULL, &devobj->subdev[i]);
259 2280
260 nvkm_parent_destroy(&devobj->base); 2281 if (device->pri)
261} 2282 iounmap(device->pri);
2283 list_del(&device->head);
262 2284
263static struct nvkm_oclass 2285 if (device->func->dtor)
264nvkm_devobj_oclass_super = { 2286 *pdevice = device->func->dtor(device);
265 .handle = NV_DEVICE, 2287 mutex_unlock(&nv_devices_mutex);
266 .ofuncs = &(struct nvkm_ofuncs) { 2288
267 .dtor = nvkm_devobj_dtor, 2289 kfree(*pdevice);
268 .init = _nvkm_parent_init, 2290 *pdevice = NULL;
269 .fini = _nvkm_parent_fini,
270 .mthd = nvkm_devobj_mthd,
271 .map = nvkm_devobj_map,
272 .rd08 = nvkm_devobj_rd08,
273 .rd16 = nvkm_devobj_rd16,
274 .rd32 = nvkm_devobj_rd32,
275 .wr08 = nvkm_devobj_wr08,
276 .wr16 = nvkm_devobj_wr16,
277 .wr32 = nvkm_devobj_wr32,
278 } 2291 }
279}; 2292}
280 2293
281static int 2294int
282nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 2295nvkm_device_ctor(const struct nvkm_device_func *func,
283 struct nvkm_oclass *oclass, void *data, u32 size, 2296 const struct nvkm_device_quirk *quirk,
284 struct nvkm_object **pobject) 2297 struct device *dev, enum nvkm_device_type type, u64 handle,
2298 const char *name, const char *cfg, const char *dbg,
2299 bool detect, bool mmio, u64 subdev_mask,
2300 struct nvkm_device *device)
285{ 2301{
286 union { 2302 struct nvkm_subdev *subdev;
287 struct nv_device_v0 v0; 2303 u64 mmio_base, mmio_size;
288 } *args = data;
289 struct nvkm_client *client = nv_client(parent);
290 struct nvkm_device *device;
291 struct nvkm_devobj *devobj;
292 u32 boot0, strap; 2304 u32 boot0, strap;
293 u64 disable, mmio_base, mmio_size;
294 void __iomem *map; 2305 void __iomem *map;
295 int ret, i, c; 2306 int ret = -EEXIST;
296 2307 int i;
297 nv_ioctl(parent, "create device size %d\n", size);
298 if (nvif_unpack(args->v0, 0, 0, false)) {
299 nv_ioctl(parent, "create device v%d device %016llx "
300 "disable %016llx debug0 %016llx\n",
301 args->v0.version, args->v0.device,
302 args->v0.disable, args->v0.debug0);
303 } else
304 return ret;
305 2308
306 /* give priviledged clients register access */ 2309 mutex_lock(&nv_devices_mutex);
307 if (client->super) 2310 if (nvkm_device_find_locked(handle))
308 oclass = &nvkm_devobj_oclass_super; 2311 goto done;
309 2312
310 /* find the device subdev that matches what the client requested */ 2313 device->func = func;
311 device = nv_device(client->device); 2314 device->quirk = quirk;
312 if (args->v0.device != ~0) { 2315 device->dev = dev;
313 device = nvkm_device_find(args->v0.device); 2316 device->type = type;
314 if (!device) 2317 device->handle = handle;
315 return -ENODEV; 2318 device->cfgopt = cfg;
316 } 2319 device->dbgopt = dbg;
2320 device->name = name;
2321 list_add_tail(&device->head, &nv_devices);
2322 device->debug = nvkm_dbgopt(device->dbgopt, "device");
317 2323
318 ret = nvkm_parent_create(parent, nv_object(device), oclass, 0, 2324 ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
319 nvkm_control_oclass,
320 (1ULL << NVDEV_ENGINE_DMAOBJ) |
321 (1ULL << NVDEV_ENGINE_FIFO) |
322 (1ULL << NVDEV_ENGINE_DISP) |
323 (1ULL << NVDEV_ENGINE_PM), &devobj);
324 *pobject = nv_object(devobj);
325 if (ret) 2325 if (ret)
326 return ret; 2326 goto done;
327
328 mmio_base = nv_device_resource_start(device, 0);
329 mmio_size = nv_device_resource_len(device, 0);
330 2327
331 /* translate api disable mask into internal mapping */ 2328 mmio_base = device->func->resource_addr(device, 0);
332 disable = args->v0.debug0; 2329 mmio_size = device->func->resource_size(device, 0);
333 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
334 if (args->v0.disable & disable_map[i])
335 disable |= (1ULL << i);
336 }
337 2330
338 /* identify the chipset, and determine classes of subdev/engines */ 2331 /* identify the chipset, and determine classes of subdev/engines */
339 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) && 2332 if (detect) {
340 !device->card_type) {
341 map = ioremap(mmio_base, 0x102000); 2333 map = ioremap(mmio_base, 0x102000);
342 if (map == NULL) 2334 if (ret = -ENOMEM, map == NULL)
343 return -ENOMEM; 2335 goto done;
344 2336
345 /* switch mmio to cpu's native endianness */ 2337 /* switch mmio to cpu's native endianness */
346#ifndef __BIG_ENDIAN 2338#ifndef __BIG_ENDIAN
@@ -397,31 +2389,83 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
397 device->card_type = NV_04; 2389 device->card_type = NV_04;
398 } 2390 }
399 2391
400 switch (device->card_type) { 2392 switch (device->chipset) {
401 case NV_04: ret = nv04_identify(device); break; 2393 case 0x004: device->chip = &nv4_chipset; break;
402 case NV_10: 2394 case 0x005: device->chip = &nv5_chipset; break;
403 case NV_11: ret = nv10_identify(device); break; 2395 case 0x010: device->chip = &nv10_chipset; break;
404 case NV_20: ret = nv20_identify(device); break; 2396 case 0x011: device->chip = &nv11_chipset; break;
405 case NV_30: ret = nv30_identify(device); break; 2397 case 0x015: device->chip = &nv15_chipset; break;
406 case NV_40: ret = nv40_identify(device); break; 2398 case 0x017: device->chip = &nv17_chipset; break;
407 case NV_50: ret = nv50_identify(device); break; 2399 case 0x018: device->chip = &nv18_chipset; break;
408 case NV_C0: ret = gf100_identify(device); break; 2400 case 0x01a: device->chip = &nv1a_chipset; break;
409 case NV_E0: ret = gk104_identify(device); break; 2401 case 0x01f: device->chip = &nv1f_chipset; break;
410 case GM100: ret = gm100_identify(device); break; 2402 case 0x020: device->chip = &nv20_chipset; break;
2403 case 0x025: device->chip = &nv25_chipset; break;
2404 case 0x028: device->chip = &nv28_chipset; break;
2405 case 0x02a: device->chip = &nv2a_chipset; break;
2406 case 0x030: device->chip = &nv30_chipset; break;
2407 case 0x031: device->chip = &nv31_chipset; break;
2408 case 0x034: device->chip = &nv34_chipset; break;
2409 case 0x035: device->chip = &nv35_chipset; break;
2410 case 0x036: device->chip = &nv36_chipset; break;
2411 case 0x040: device->chip = &nv40_chipset; break;
2412 case 0x041: device->chip = &nv41_chipset; break;
2413 case 0x042: device->chip = &nv42_chipset; break;
2414 case 0x043: device->chip = &nv43_chipset; break;
2415 case 0x044: device->chip = &nv44_chipset; break;
2416 case 0x045: device->chip = &nv45_chipset; break;
2417 case 0x046: device->chip = &nv46_chipset; break;
2418 case 0x047: device->chip = &nv47_chipset; break;
2419 case 0x049: device->chip = &nv49_chipset; break;
2420 case 0x04a: device->chip = &nv4a_chipset; break;
2421 case 0x04b: device->chip = &nv4b_chipset; break;
2422 case 0x04c: device->chip = &nv4c_chipset; break;
2423 case 0x04e: device->chip = &nv4e_chipset; break;
2424 case 0x050: device->chip = &nv50_chipset; break;
2425 case 0x063: device->chip = &nv63_chipset; break;
2426 case 0x067: device->chip = &nv67_chipset; break;
2427 case 0x068: device->chip = &nv68_chipset; break;
2428 case 0x084: device->chip = &nv84_chipset; break;
2429 case 0x086: device->chip = &nv86_chipset; break;
2430 case 0x092: device->chip = &nv92_chipset; break;
2431 case 0x094: device->chip = &nv94_chipset; break;
2432 case 0x096: device->chip = &nv96_chipset; break;
2433 case 0x098: device->chip = &nv98_chipset; break;
2434 case 0x0a0: device->chip = &nva0_chipset; break;
2435 case 0x0a3: device->chip = &nva3_chipset; break;
2436 case 0x0a5: device->chip = &nva5_chipset; break;
2437 case 0x0a8: device->chip = &nva8_chipset; break;
2438 case 0x0aa: device->chip = &nvaa_chipset; break;
2439 case 0x0ac: device->chip = &nvac_chipset; break;
2440 case 0x0af: device->chip = &nvaf_chipset; break;
2441 case 0x0c0: device->chip = &nvc0_chipset; break;
2442 case 0x0c1: device->chip = &nvc1_chipset; break;
2443 case 0x0c3: device->chip = &nvc3_chipset; break;
2444 case 0x0c4: device->chip = &nvc4_chipset; break;
2445 case 0x0c8: device->chip = &nvc8_chipset; break;
2446 case 0x0ce: device->chip = &nvce_chipset; break;
2447 case 0x0cf: device->chip = &nvcf_chipset; break;
2448 case 0x0d7: device->chip = &nvd7_chipset; break;
2449 case 0x0d9: device->chip = &nvd9_chipset; break;
2450 case 0x0e4: device->chip = &nve4_chipset; break;
2451 case 0x0e6: device->chip = &nve6_chipset; break;
2452 case 0x0e7: device->chip = &nve7_chipset; break;
2453 case 0x0ea: device->chip = &nvea_chipset; break;
2454 case 0x0f0: device->chip = &nvf0_chipset; break;
2455 case 0x0f1: device->chip = &nvf1_chipset; break;
2456 case 0x106: device->chip = &nv106_chipset; break;
2457 case 0x108: device->chip = &nv108_chipset; break;
2458 case 0x117: device->chip = &nv117_chipset; break;
2459 case 0x124: device->chip = &nv124_chipset; break;
2460 case 0x126: device->chip = &nv126_chipset; break;
2461 case 0x12b: device->chip = &nv12b_chipset; break;
411 default: 2462 default:
412 ret = -EINVAL; 2463 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
413 break; 2464 goto done;
414 }
415
416 if (ret) {
417 nv_error(device, "unknown chipset, 0x%08x\n", boot0);
418 return ret;
419 } 2465 }
420 2466
421 nv_info(device, "BOOT0 : 0x%08x\n", boot0); 2467 nvdev_info(device, "NVIDIA %s (%08x)\n",
422 nv_info(device, "Chipset: %s (NV%02X)\n", 2468 device->chip->name, boot0);
423 device->cname, device->chipset);
424 nv_info(device, "Family : NV%02X\n", device->card_type);
425 2469
426 /* determine frequency of timing crystal */ 2470 /* determine frequency of timing crystal */
427 if ( device->card_type <= NV_10 || device->chipset < 0x17 || 2471 if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
@@ -436,300 +2480,89 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
436 case 0x00400000: device->crystal = 27000; break; 2480 case 0x00400000: device->crystal = 27000; break;
437 case 0x00400040: device->crystal = 25000; break; 2481 case 0x00400040: device->crystal = 25000; break;
438 } 2482 }
439
440 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
441 } else
442 if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
443 device->cname = "NULL";
444 device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass;
445 }
446
447 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
448 !nv_subdev(device)->mmio) {
449 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
450 if (!nv_subdev(device)->mmio) {
451 nv_error(device, "unable to map device registers\n");
452 return -ENOMEM;
453 }
454 }
455
456 /* ensure requested subsystems are available for use */
457 for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
458 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
459 continue;
460
461 if (device->subdev[i]) {
462 nvkm_object_ref(device->subdev[i], &devobj->subdev[i]);
463 continue;
464 }
465
466 ret = nvkm_object_ctor(nv_object(device), NULL, oclass,
467 NULL, i, &devobj->subdev[i]);
468 if (ret == -ENODEV)
469 continue;
470 if (ret)
471 return ret;
472
473 device->subdev[i] = devobj->subdev[i];
474
475 /* note: can't init *any* subdevs until devinit has been run
476 * due to not knowing exactly what the vbios init tables will
477 * mess with. devinit also can't be run until all of its
478 * dependencies have been created.
479 *
480 * this code delays init of any subdev until all of devinit's
481 * dependencies have been created, and then initialises each
482 * subdev in turn as they're created.
483 */
484 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
485 struct nvkm_object *subdev = devobj->subdev[c++];
486 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
487 ret = nvkm_object_inc(subdev);
488 if (ret)
489 return ret;
490 atomic_dec(&nv_object(device)->usecount);
491 } else
492 if (subdev) {
493 nvkm_subdev_reset(subdev);
494 }
495 }
496 }
497
498 return 0;
499}
500
501static struct nvkm_ofuncs
502nvkm_devobj_ofuncs = {
503 .ctor = nvkm_devobj_ctor,
504 .dtor = nvkm_devobj_dtor,
505 .init = _nvkm_parent_init,
506 .fini = _nvkm_parent_fini,
507 .mthd = nvkm_devobj_mthd,
508};
509
510/******************************************************************************
511 * nvkm_device: engine functions
512 *****************************************************************************/
513
514struct nvkm_device *
515nv_device(void *obj)
516{
517 struct nvkm_object *device = nv_object(obj);
518 if (device->engine == NULL) {
519 while (device && device->parent)
520 device = device->parent;
521 } else { 2483 } else {
522 device = &nv_object(obj)->engine->subdev.object; 2484 device->chip = &null_chipset;
523 if (device && device->parent)
524 device = device->parent;
525 } 2485 }
526#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
527 if (unlikely(!device))
528 nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj));
529#endif
530 return (void *)device;
531}
532 2486
533static struct nvkm_oclass 2487 if (!device->name)
534nvkm_device_sclass[] = { 2488 device->name = device->chip->name;
535 { 0x0080, &nvkm_devobj_ofuncs },
536 {}
537};
538 2489
539static int 2490 if (mmio) {
540nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, 2491 device->pri = ioremap(mmio_base, mmio_size);
541 struct nvkm_notify *notify) 2492 if (!device->pri) {
542{ 2493 nvdev_error(device, "unable to map PRI\n");
543 if (!WARN_ON(size != 0)) { 2494 return -ENOMEM;
544 notify->size = 0;
545 notify->types = 1;
546 notify->index = 0;
547 return 0;
548 }
549 return -EINVAL;
550}
551
552static const struct nvkm_event_func
553nvkm_device_event_func = {
554 .ctor = nvkm_device_event_ctor,
555};
556
557static int
558nvkm_device_fini(struct nvkm_object *object, bool suspend)
559{
560 struct nvkm_device *device = (void *)object;
561 struct nvkm_object *subdev;
562 int ret, i;
563
564 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
565 if ((subdev = device->subdev[i])) {
566 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
567 ret = nvkm_object_dec(subdev, suspend);
568 if (ret && suspend)
569 goto fail;
570 }
571 }
572 }
573
574 ret = nvkm_acpi_fini(device, suspend);
575fail:
576 for (; ret && i < NVDEV_SUBDEV_NR; i++) {
577 if ((subdev = device->subdev[i])) {
578 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
579 ret = nvkm_object_inc(subdev);
580 if (ret) {
581 /* XXX */
582 }
583 }
584 } 2495 }
585 } 2496 }
586 2497
587 return ret; 2498 mutex_init(&device->mutex);
588} 2499
589 2500 for (i = 0; i < NVKM_SUBDEV_NR; i++) {
590static int 2501#define _(s,m) case s: \
591nvkm_device_init(struct nvkm_object *object) 2502 if (device->chip->m && (subdev_mask & (1ULL << (s)))) { \
592{ 2503 ret = device->chip->m(device, (s), &device->m); \
593 struct nvkm_device *device = (void *)object; 2504 if (ret) { \
594 struct nvkm_object *subdev; 2505 subdev = nvkm_device_subdev(device, (s)); \
595 int ret, i = 0; 2506 nvkm_subdev_del(&subdev); \
596 2507 device->m = NULL; \
597 ret = nvkm_acpi_init(device); 2508 if (ret != -ENODEV) { \
598 if (ret) 2509 nvdev_error(device, "%s ctor failed, %d\n", \
599 goto fail; 2510 nvkm_subdev_name[s], ret); \
600 2511 goto done; \
601 for (i = 0; i < NVDEV_SUBDEV_NR; i++) { 2512 } \
602 if ((subdev = device->subdev[i])) { 2513 } \
603 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { 2514 } \
604 ret = nvkm_object_inc(subdev); 2515 break
605 if (ret) 2516 switch (i) {
606 goto fail; 2517 _(NVKM_SUBDEV_BAR , bar);
607 } else { 2518 _(NVKM_SUBDEV_VBIOS , bios);
608 nvkm_subdev_reset(subdev); 2519 _(NVKM_SUBDEV_BUS , bus);
609 } 2520 _(NVKM_SUBDEV_CLK , clk);
2521 _(NVKM_SUBDEV_DEVINIT, devinit);
2522 _(NVKM_SUBDEV_FB , fb);
2523 _(NVKM_SUBDEV_FUSE , fuse);
2524 _(NVKM_SUBDEV_GPIO , gpio);
2525 _(NVKM_SUBDEV_I2C , i2c);
2526 _(NVKM_SUBDEV_IBUS , ibus);
2527 _(NVKM_SUBDEV_INSTMEM, imem);
2528 _(NVKM_SUBDEV_LTC , ltc);
2529 _(NVKM_SUBDEV_MC , mc);
2530 _(NVKM_SUBDEV_MMU , mmu);
2531 _(NVKM_SUBDEV_MXM , mxm);
2532 _(NVKM_SUBDEV_PCI , pci);
2533 _(NVKM_SUBDEV_PMU , pmu);
2534 _(NVKM_SUBDEV_THERM , therm);
2535 _(NVKM_SUBDEV_TIMER , timer);
2536 _(NVKM_SUBDEV_VOLT , volt);
2537 _(NVKM_ENGINE_BSP , bsp);
2538 _(NVKM_ENGINE_CE0 , ce[0]);
2539 _(NVKM_ENGINE_CE1 , ce[1]);
2540 _(NVKM_ENGINE_CE2 , ce[2]);
2541 _(NVKM_ENGINE_CIPHER , cipher);
2542 _(NVKM_ENGINE_DISP , disp);
2543 _(NVKM_ENGINE_DMAOBJ , dma);
2544 _(NVKM_ENGINE_FIFO , fifo);
2545 _(NVKM_ENGINE_GR , gr);
2546 _(NVKM_ENGINE_IFB , ifb);
2547 _(NVKM_ENGINE_ME , me);
2548 _(NVKM_ENGINE_MPEG , mpeg);
2549 _(NVKM_ENGINE_MSENC , msenc);
2550 _(NVKM_ENGINE_MSPDEC , mspdec);
2551 _(NVKM_ENGINE_MSPPP , msppp);
2552 _(NVKM_ENGINE_MSVLD , msvld);
2553 _(NVKM_ENGINE_PM , pm);
2554 _(NVKM_ENGINE_SEC , sec);
2555 _(NVKM_ENGINE_SW , sw);
2556 _(NVKM_ENGINE_VIC , vic);
2557 _(NVKM_ENGINE_VP , vp);
2558 default:
2559 WARN_ON(1);
2560 continue;
610 } 2561 }
2562#undef _
611 } 2563 }
612 2564
613 ret = 0; 2565 ret = 0;
614fail:
615 for (--i; ret && i >= 0; i--) {
616 if ((subdev = device->subdev[i])) {
617 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
618 nvkm_object_dec(subdev, false);
619 }
620 }
621
622 if (ret)
623 nvkm_acpi_fini(device, false);
624 return ret;
625}
626
627static void
628nvkm_device_dtor(struct nvkm_object *object)
629{
630 struct nvkm_device *device = (void *)object;
631
632 nvkm_event_fini(&device->event);
633
634 mutex_lock(&nv_devices_mutex);
635 list_del(&device->head);
636 mutex_unlock(&nv_devices_mutex);
637
638 if (nv_subdev(device)->mmio)
639 iounmap(nv_subdev(device)->mmio);
640
641 nvkm_engine_destroy(&device->engine);
642}
643
644resource_size_t
645nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
646{
647 if (nv_device_is_pci(device)) {
648 return pci_resource_start(device->pdev, bar);
649 } else {
650 struct resource *res;
651 res = platform_get_resource(device->platformdev,
652 IORESOURCE_MEM, bar);
653 if (!res)
654 return 0;
655 return res->start;
656 }
657}
658
659resource_size_t
660nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
661{
662 if (nv_device_is_pci(device)) {
663 return pci_resource_len(device->pdev, bar);
664 } else {
665 struct resource *res;
666 res = platform_get_resource(device->platformdev,
667 IORESOURCE_MEM, bar);
668 if (!res)
669 return 0;
670 return resource_size(res);
671 }
672}
673
674int
675nv_device_get_irq(struct nvkm_device *device, bool stall)
676{
677 if (nv_device_is_pci(device)) {
678 return device->pdev->irq;
679 } else {
680 return platform_get_irq_byname(device->platformdev,
681 stall ? "stall" : "nonstall");
682 }
683}
684
685static struct nvkm_oclass
686nvkm_device_oclass = {
687 .handle = NV_ENGINE(DEVICE, 0x00),
688 .ofuncs = &(struct nvkm_ofuncs) {
689 .dtor = nvkm_device_dtor,
690 .init = nvkm_device_init,
691 .fini = nvkm_device_fini,
692 },
693};
694
695int
696nvkm_device_create_(void *dev, enum nv_bus_type type, u64 name,
697 const char *sname, const char *cfg, const char *dbg,
698 int length, void **pobject)
699{
700 struct nvkm_device *device;
701 int ret = -EEXIST;
702
703 mutex_lock(&nv_devices_mutex);
704 list_for_each_entry(device, &nv_devices, head) {
705 if (device->handle == name)
706 goto done;
707 }
708
709 ret = nvkm_engine_create_(NULL, NULL, &nvkm_device_oclass, true,
710 "DEVICE", "device", length, pobject);
711 device = *pobject;
712 if (ret)
713 goto done;
714
715 switch (type) {
716 case NVKM_BUS_PCI:
717 device->pdev = dev;
718 break;
719 case NVKM_BUS_PLATFORM:
720 device->platformdev = dev;
721 break;
722 }
723 device->handle = name;
724 device->cfgopt = cfg;
725 device->dbgopt = dbg;
726 device->name = sname;
727
728 nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE");
729 nv_engine(device)->sclass = nvkm_device_sclass;
730 list_add(&device->head, &nv_devices);
731
732 ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
733done: 2566done:
734 mutex_unlock(&nv_devices_mutex); 2567 mutex_unlock(&nv_devices_mutex);
735 return ret; 2568 return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index 0b794b13cec3..cf8bc068e9b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "priv.h" 24#include "ctrl.h"
25 25
26#include <core/client.h> 26#include <core/client.h>
27#include <subdev/clk.h> 27#include <subdev/clk.h>
@@ -31,18 +31,18 @@
31#include <nvif/unpack.h> 31#include <nvif/unpack.h>
32 32
33static int 33static int
34nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size) 34nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
35{ 35{
36 union { 36 union {
37 struct nvif_control_pstate_info_v0 v0; 37 struct nvif_control_pstate_info_v0 v0;
38 } *args = data; 38 } *args = data;
39 struct nvkm_clk *clk = nvkm_clk(object); 39 struct nvkm_clk *clk = ctrl->device->clk;
40 int ret; 40 int ret;
41 41
42 nv_ioctl(object, "control pstate info size %d\n", size); 42 nvif_ioctl(&ctrl->object, "control pstate info size %d\n", size);
43 if (nvif_unpack(args->v0, 0, 0, false)) { 43 if (nvif_unpack(args->v0, 0, 0, false)) {
44 nv_ioctl(object, "control pstate info vers %d\n", 44 nvif_ioctl(&ctrl->object, "control pstate info vers %d\n",
45 args->v0.version); 45 args->v0.version);
46 } else 46 } else
47 return ret; 47 return ret;
48 48
@@ -64,24 +64,24 @@ nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size)
64} 64}
65 65
66static int 66static int
67nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size) 67nvkm_control_mthd_pstate_attr(struct nvkm_control *ctrl, void *data, u32 size)
68{ 68{
69 union { 69 union {
70 struct nvif_control_pstate_attr_v0 v0; 70 struct nvif_control_pstate_attr_v0 v0;
71 } *args = data; 71 } *args = data;
72 struct nvkm_clk *clk = nvkm_clk(object); 72 struct nvkm_clk *clk = ctrl->device->clk;
73 struct nvkm_domain *domain; 73 const struct nvkm_domain *domain;
74 struct nvkm_pstate *pstate; 74 struct nvkm_pstate *pstate;
75 struct nvkm_cstate *cstate; 75 struct nvkm_cstate *cstate;
76 int i = 0, j = -1; 76 int i = 0, j = -1;
77 u32 lo, hi; 77 u32 lo, hi;
78 int ret; 78 int ret;
79 79
80 nv_ioctl(object, "control pstate attr size %d\n", size); 80 nvif_ioctl(&ctrl->object, "control pstate attr size %d\n", size);
81 if (nvif_unpack(args->v0, 0, 0, false)) { 81 if (nvif_unpack(args->v0, 0, 0, false)) {
82 nv_ioctl(object, "control pstate attr vers %d state %d " 82 nvif_ioctl(&ctrl->object,
83 "index %d\n", 83 "control pstate attr vers %d state %d index %d\n",
84 args->v0.version, args->v0.state, args->v0.index); 84 args->v0.version, args->v0.state, args->v0.index);
85 if (!clk) 85 if (!clk)
86 return -ENODEV; 86 return -ENODEV;
87 if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) 87 if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
@@ -116,7 +116,7 @@ nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
116 116
117 args->v0.state = pstate->pstate; 117 args->v0.state = pstate->pstate;
118 } else { 118 } else {
119 lo = max(clk->read(clk, domain->name), 0); 119 lo = max(nvkm_clk_read(clk, domain->name), 0);
120 hi = lo; 120 hi = lo;
121 } 121 }
122 122
@@ -137,19 +137,19 @@ nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
137} 137}
138 138
139static int 139static int
140nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size) 140nvkm_control_mthd_pstate_user(struct nvkm_control *ctrl, void *data, u32 size)
141{ 141{
142 union { 142 union {
143 struct nvif_control_pstate_user_v0 v0; 143 struct nvif_control_pstate_user_v0 v0;
144 } *args = data; 144 } *args = data;
145 struct nvkm_clk *clk = nvkm_clk(object); 145 struct nvkm_clk *clk = ctrl->device->clk;
146 int ret; 146 int ret;
147 147
148 nv_ioctl(object, "control pstate user size %d\n", size); 148 nvif_ioctl(&ctrl->object, "control pstate user size %d\n", size);
149 if (nvif_unpack(args->v0, 0, 0, false)) { 149 if (nvif_unpack(args->v0, 0, 0, false)) {
150 nv_ioctl(object, "control pstate user vers %d ustate %d " 150 nvif_ioctl(&ctrl->object,
151 "pwrsrc %d\n", args->v0.version, 151 "control pstate user vers %d ustate %d pwrsrc %d\n",
152 args->v0.ustate, args->v0.pwrsrc); 152 args->v0.version, args->v0.ustate, args->v0.pwrsrc);
153 if (!clk) 153 if (!clk)
154 return -ENODEV; 154 return -ENODEV;
155 } else 155 } else
@@ -168,32 +168,44 @@ nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size)
168static int 168static int
169nvkm_control_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 169nvkm_control_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
170{ 170{
171 struct nvkm_control *ctrl = nvkm_control(object);
171 switch (mthd) { 172 switch (mthd) {
172 case NVIF_CONTROL_PSTATE_INFO: 173 case NVIF_CONTROL_PSTATE_INFO:
173 return nvkm_control_mthd_pstate_info(object, data, size); 174 return nvkm_control_mthd_pstate_info(ctrl, data, size);
174 case NVIF_CONTROL_PSTATE_ATTR: 175 case NVIF_CONTROL_PSTATE_ATTR:
175 return nvkm_control_mthd_pstate_attr(object, data, size); 176 return nvkm_control_mthd_pstate_attr(ctrl, data, size);
176 case NVIF_CONTROL_PSTATE_USER: 177 case NVIF_CONTROL_PSTATE_USER:
177 return nvkm_control_mthd_pstate_user(object, data, size); 178 return nvkm_control_mthd_pstate_user(ctrl, data, size);
178 default: 179 default:
179 break; 180 break;
180 } 181 }
181 return -EINVAL; 182 return -EINVAL;
182} 183}
183 184
184static struct nvkm_ofuncs 185static const struct nvkm_object_func
185nvkm_control_ofuncs = { 186nvkm_control = {
186 .ctor = _nvkm_object_ctor,
187 .dtor = nvkm_object_destroy,
188 .init = nvkm_object_init,
189 .fini = nvkm_object_fini,
190 .mthd = nvkm_control_mthd, 187 .mthd = nvkm_control_mthd,
191}; 188};
192 189
193struct nvkm_oclass 190static int
194nvkm_control_oclass[] = { 191nvkm_control_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
195 { .handle = NVIF_IOCTL_NEW_V0_CONTROL, 192 void *data, u32 size, struct nvkm_object **pobject)
196 .ofuncs = &nvkm_control_ofuncs 193{
197 }, 194 struct nvkm_control *ctrl;
198 {} 195
196 if (!(ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL)))
197 return -ENOMEM;
198 *pobject = &ctrl->object;
199 ctrl->device = device;
200
201 nvkm_object_ctor(&nvkm_control, oclass, &ctrl->object);
202 return 0;
203}
204
205const struct nvkm_device_oclass
206nvkm_control_oclass = {
207 .base.oclass = NVIF_IOCTL_NEW_V0_CONTROL,
208 .base.minver = -1,
209 .base.maxver = -1,
210 .ctor = nvkm_control_new,
199}; 211};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
new file mode 100644
index 000000000000..20249d8e444d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -0,0 +1,12 @@
1#ifndef __NVKM_DEVICE_CTRL_H__
2#define __NVKM_DEVICE_CTRL_H__
3#define nvkm_control(p) container_of((p), struct nvkm_control, object)
4#include <core/device.h>
5
6struct nvkm_control {
7 struct nvkm_object object;
8 struct nvkm_device *device;
9};
10
11extern const struct nvkm_device_oclass nvkm_control_oclass;
12#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
deleted file mode 100644
index 82b38d7e9730..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
+++ /dev/null
@@ -1,358 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/gpio.h>
29#include <subdev/i2c.h>
30#include <subdev/fuse.h>
31#include <subdev/clk.h>
32#include <subdev/therm.h>
33#include <subdev/mxm.h>
34#include <subdev/devinit.h>
35#include <subdev/mc.h>
36#include <subdev/timer.h>
37#include <subdev/fb.h>
38#include <subdev/ltc.h>
39#include <subdev/ibus.h>
40#include <subdev/instmem.h>
41#include <subdev/mmu.h>
42#include <subdev/bar.h>
43#include <subdev/pmu.h>
44#include <subdev/volt.h>
45
46#include <engine/dmaobj.h>
47#include <engine/fifo.h>
48#include <engine/sw.h>
49#include <engine/gr.h>
50#include <engine/mspdec.h>
51#include <engine/bsp.h>
52#include <engine/msvld.h>
53#include <engine/msppp.h>
54#include <engine/ce.h>
55#include <engine/disp.h>
56#include <engine/pm.h>
57
58int
59gf100_identify(struct nvkm_device *device)
60{
61 switch (device->chipset) {
62 case 0xc0:
63 device->cname = "GF100";
64 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
65 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
66 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
67 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
68 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
69 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
70 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
71 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
72 device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
73 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
77 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
78 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
79 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
80 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
81 device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
82 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
83 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
84 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
85 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
86 device->oclass[NVDEV_ENGINE_GR ] = gf100_gr_oclass;
87 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
88 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
89 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
90 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
91 device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
92 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
93 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
94 break;
95 case 0xc4:
96 device->cname = "GF104";
97 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
98 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
99 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
100 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
101 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
102 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
103 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
104 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
105 device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
106 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
107 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
108 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
109 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
110 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
111 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
112 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
113 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
114 device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
115 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
116 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
117 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
118 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
119 device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
120 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
121 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
122 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
123 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
124 device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
125 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
126 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
127 break;
128 case 0xc3:
129 device->cname = "GF106";
130 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
131 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
132 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
133 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
134 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
135 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
136 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
137 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
138 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
139 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
140 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
141 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
142 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
143 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
144 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
145 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
146 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
147 device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
148 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
149 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
150 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
151 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
152 device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
153 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
154 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
155 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
156 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
157 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
158 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
159 break;
160 case 0xce:
161 device->cname = "GF114";
162 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
163 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
164 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
165 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
166 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
167 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
168 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
169 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
170 device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
171 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
172 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
173 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
174 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
175 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
176 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
177 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
178 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
179 device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
180 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
181 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
182 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
183 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
184 device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
185 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
186 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
187 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
188 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
189 device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
190 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
191 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
192 break;
193 case 0xcf:
194 device->cname = "GF116";
195 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
196 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
197 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
198 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
199 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
200 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
201 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
202 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
203 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
204 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
205 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
206 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
207 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
208 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
209 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
210 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
211 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
212 device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
213 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
214 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
215 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
216 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
217 device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
218 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
219 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
220 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
221 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
224 break;
225 case 0xc1:
226 device->cname = "GF108";
227 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
228 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
229 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
230 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
231 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
232 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
233 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
234 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
235 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
236 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
237 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
238 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
239 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
240 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
241 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
242 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
243 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
244 device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
245 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
246 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
247 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
248 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
249 device->oclass[NVDEV_ENGINE_GR ] = gf108_gr_oclass;
250 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
251 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
252 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
253 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
254 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
255 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
256 break;
257 case 0xc8:
258 device->cname = "GF110";
259 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
260 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
261 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
262 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
263 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
264 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
265 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
266 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
267 device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
268 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
269 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
270 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
271 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
272 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
273 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
274 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
275 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
276 device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
277 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
278 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
279 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
280 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
281 device->oclass[NVDEV_ENGINE_GR ] = gf110_gr_oclass;
282 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
283 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
284 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
285 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
286 device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
287 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
288 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
289 break;
290 case 0xd9:
291 device->cname = "GF119";
292 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
293 device->oclass[NVDEV_SUBDEV_GPIO ] = gf110_gpio_oclass;
294 device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
295 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
296 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
297 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
298 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
299 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
300 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
301 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
302 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
303 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
304 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
305 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
306 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
307 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
308 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
309 device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
310 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
311 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
312 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
313 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
314 device->oclass[NVDEV_ENGINE_GR ] = gf119_gr_oclass;
315 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
316 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
317 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
318 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
319 device->oclass[NVDEV_ENGINE_DISP ] = gf110_disp_oclass;
320 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
321 break;
322 case 0xd7:
323 device->cname = "GF117";
324 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
325 device->oclass[NVDEV_SUBDEV_GPIO ] = gf110_gpio_oclass;
326 device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
327 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
328 device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
329 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
330 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
331 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
332 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
333 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
334 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
335 device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
336 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
337 device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
338 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
339 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
340 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
341 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
342 device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
343 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
344 device->oclass[NVDEV_ENGINE_GR ] = gf117_gr_oclass;
345 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
346 device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
347 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
348 device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
349 device->oclass[NVDEV_ENGINE_DISP ] = gf110_disp_oclass;
350 device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
351 break;
352 default:
353 nv_fatal(device, "unknown Fermi chipset\n");
354 return -EINVAL;
355 }
356
357 return 0;
358}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
deleted file mode 100644
index 6a9483f65d83..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
+++ /dev/null
@@ -1,326 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/gpio.h>
29#include <subdev/i2c.h>
30#include <subdev/fuse.h>
31#include <subdev/clk.h>
32#include <subdev/therm.h>
33#include <subdev/mxm.h>
34#include <subdev/devinit.h>
35#include <subdev/mc.h>
36#include <subdev/timer.h>
37#include <subdev/fb.h>
38#include <subdev/ltc.h>
39#include <subdev/ibus.h>
40#include <subdev/instmem.h>
41#include <subdev/mmu.h>
42#include <subdev/bar.h>
43#include <subdev/pmu.h>
44#include <subdev/volt.h>
45
46#include <engine/dmaobj.h>
47#include <engine/fifo.h>
48#include <engine/sw.h>
49#include <engine/gr.h>
50#include <engine/disp.h>
51#include <engine/ce.h>
52#include <engine/bsp.h>
53#include <engine/msvld.h>
54#include <engine/mspdec.h>
55#include <engine/msppp.h>
56#include <engine/pm.h>
57
58int
59gk104_identify(struct nvkm_device *device)
60{
61 switch (device->chipset) {
62 case 0xe4:
63 device->cname = "GK104";
64 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
65 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
66 device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
67 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
68 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
69 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
70 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
71 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
72 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
73 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
77 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
78 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
79 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
80 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
81 device->oclass[NVDEV_SUBDEV_PMU ] = gk104_pmu_oclass;
82 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
83 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
84 device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
85 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
86 device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
87 device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
88 device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
89 device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
90 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
91 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
92 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
93 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
94 device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
95 break;
96 case 0xe7:
97 device->cname = "GK107";
98 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
99 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
100 device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
101 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
102 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
103 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
104 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
105 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
106 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
107 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
108 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
109 device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
110 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
111 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
112 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
113 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
114 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
115 device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
116 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
117 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
118 device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
119 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
120 device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
121 device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
122 device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
123 device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
124 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
125 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
126 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
127 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
128 device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
129 break;
130 case 0xe6:
131 device->cname = "GK106";
132 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
133 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
134 device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
135 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
136 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
137 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
138 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
139 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
140 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
141 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
142 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
143 device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
144 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
145 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
146 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
147 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
148 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
149 device->oclass[NVDEV_SUBDEV_PMU ] = gk104_pmu_oclass;
150 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
151 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
152 device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
153 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
154 device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
155 device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
156 device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
157 device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
158 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
159 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
160 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
161 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
162 device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
163 break;
164 case 0xea:
165 device->cname = "GK20A";
166 device->oclass[NVDEV_SUBDEV_CLK ] = &gk20a_clk_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
168 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
169 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
170 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
171 device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
172 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
173 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
174 device->oclass[NVDEV_SUBDEV_INSTMEM] = gk20a_instmem_oclass;
175 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
176 device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
177 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
178 device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
179 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
180 device->oclass[NVDEV_ENGINE_GR ] = gk20a_gr_oclass;
181 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
182 device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
183 device->oclass[NVDEV_SUBDEV_VOLT ] = &gk20a_volt_oclass;
184 device->oclass[NVDEV_SUBDEV_PMU ] = gk20a_pmu_oclass;
185 break;
186 case 0xf0:
187 device->cname = "GK110";
188 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
189 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
190 device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
191 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
192 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
193 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
194 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
195 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
196 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
197 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
199 device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
200 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
201 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
202 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
203 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
204 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
205 device->oclass[NVDEV_SUBDEV_PMU ] = gk110_pmu_oclass;
206 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
207 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
208 device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
209 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
210 device->oclass[NVDEV_ENGINE_GR ] = gk110_gr_oclass;
211 device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
212 device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
213 device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
214 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
215 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
216 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
217 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
218 device->oclass[NVDEV_ENGINE_PM ] = &gk110_pm_oclass;
219 break;
220 case 0xf1:
221 device->cname = "GK110B";
222 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
223 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
224 device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
225 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
226 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
227 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
228 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
229 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
230 device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
231 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
232 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
233 device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
234 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
235 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
236 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
237 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
238 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
239 device->oclass[NVDEV_SUBDEV_PMU ] = gk110_pmu_oclass;
240 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
241 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
242 device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
243 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
244 device->oclass[NVDEV_ENGINE_GR ] = gk110b_gr_oclass;
245 device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
246 device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
247 device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
248 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
249 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
250 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
251 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
252 device->oclass[NVDEV_ENGINE_PM ] = &gk110_pm_oclass;
253 break;
254 case 0x106:
255 device->cname = "GK208B";
256 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
257 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
258 device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
259 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
260 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
261 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
262 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
263 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
264 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
265 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
266 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
267 device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
268 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
269 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
270 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
271 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
272 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
273 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
274 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
276 device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
277 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
278 device->oclass[NVDEV_ENGINE_GR ] = gk208_gr_oclass;
279 device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
280 device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
281 device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
282 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
283 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
284 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
285 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
286 break;
287 case 0x108:
288 device->cname = "GK208";
289 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
290 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
291 device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
292 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
293 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
294 device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
299 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
300 device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
301 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
302 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
303 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
304 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
305 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
306 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
307 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
308 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
309 device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
310 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
311 device->oclass[NVDEV_ENGINE_GR ] = gk208_gr_oclass;
312 device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
313 device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
314 device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
315 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
316 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
317 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
318 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
319 break;
320 default:
321 nv_fatal(device, "unknown Kepler chipset\n");
322 return -EINVAL;
323 }
324
325 return 0;
326}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
deleted file mode 100644
index 70abf1ec7c98..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/gpio.h>
29#include <subdev/i2c.h>
30#include <subdev/fuse.h>
31#include <subdev/clk.h>
32#include <subdev/therm.h>
33#include <subdev/mxm.h>
34#include <subdev/devinit.h>
35#include <subdev/mc.h>
36#include <subdev/timer.h>
37#include <subdev/fb.h>
38#include <subdev/ltc.h>
39#include <subdev/ibus.h>
40#include <subdev/instmem.h>
41#include <subdev/mmu.h>
42#include <subdev/bar.h>
43#include <subdev/pmu.h>
44#include <subdev/volt.h>
45
46#include <engine/dmaobj.h>
47#include <engine/fifo.h>
48#include <engine/sw.h>
49#include <engine/gr.h>
50#include <engine/disp.h>
51#include <engine/ce.h>
52#include <engine/bsp.h>
53#include <engine/msvld.h>
54#include <engine/mspdec.h>
55#include <engine/msppp.h>
56#include <engine/pm.h>
57
58int
59gm100_identify(struct nvkm_device *device)
60{
61 switch (device->chipset) {
62 case 0x117:
63 device->cname = "GM107";
64 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
65 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
66 device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
67 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
68 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
69 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
70 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
71 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
72 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
73 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
77 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
78 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
79 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
80 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
81 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
82
83#if 0
84 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
85#endif
86 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
87 device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
88 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
89 device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
90 device->oclass[NVDEV_ENGINE_DISP ] = gm107_disp_oclass;
91 device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
92#if 0
93 device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
94#endif
95 device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
96#if 0
97 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
98 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
99 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
100#endif
101 break;
102 case 0x124:
103 device->cname = "GM204";
104 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
105 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
106 device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
107 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
108#if 0
109 /* looks to be some non-trivial changes */
110 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
111 /* priv ring says no to 0x10eb14 writes */
112 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
113#endif
114 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
115 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
116 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
117 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
118 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
119 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
120 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
121 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
122 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
123 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
124 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
125 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
126#if 0
127 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
128#endif
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = gm204_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = gm204_gr_oclass;
133 device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
134 device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
135 device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
136 device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
137#if 0
138 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
139 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
140 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
141#endif
142 break;
143 case 0x126:
144 device->cname = "GM206";
145 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
146 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
147 device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
148 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
149#if 0
150 /* looks to be some non-trivial changes */
151 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
152 /* priv ring says no to 0x10eb14 writes */
153 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
154#endif
155 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
156 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
157 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
158 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
159 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
160 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
161 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
162 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
163 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
164 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
165 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
166 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
167#if 0
168 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
169#endif
170 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
171 device->oclass[NVDEV_ENGINE_FIFO ] = gm204_fifo_oclass;
172 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
173 device->oclass[NVDEV_ENGINE_GR ] = gm206_gr_oclass;
174 device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
175 device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
176 device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
177 device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
178#if 0
179 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
180 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
181 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
182#endif
183 break;
184 default:
185 nv_fatal(device, "unknown Maxwell chipset\n");
186 return -EINVAL;
187 }
188
189 return 0;
190}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
deleted file mode 100644
index 5a2ae043b478..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/i2c.h>
29#include <subdev/clk.h>
30#include <subdev/devinit.h>
31#include <subdev/mc.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/mmu.h>
36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/sw.h>
40#include <engine/gr.h>
41#include <engine/disp.h>
42
43int
44nv04_identify(struct nvkm_device *device)
45{
46 switch (device->chipset) {
47 case 0x04:
48 device->cname = "NV04";
49 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
50 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
51 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
52 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass;
53 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
54 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
56 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
57 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
58 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
60 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
61 device->oclass[NVDEV_ENGINE_SW ] = nv04_sw_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_gr_oclass;
63 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
64 break;
65 case 0x05:
66 device->cname = "NV05";
67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
68 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
69 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
70 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass;
71 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
72 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
73 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
74 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
76 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
78 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
79 device->oclass[NVDEV_ENGINE_SW ] = nv04_sw_oclass;
80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_gr_oclass;
81 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
82 break;
83 default:
84 nv_fatal(device, "unknown RIVA chipset\n");
85 return -EINVAL;
86 }
87
88 return 0;
89}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
deleted file mode 100644
index 94a1ca45e94a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
+++ /dev/null
@@ -1,204 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/gpio.h>
29#include <subdev/i2c.h>
30#include <subdev/clk.h>
31#include <subdev/devinit.h>
32#include <subdev/mc.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35#include <subdev/instmem.h>
36#include <subdev/mmu.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40#include <engine/sw.h>
41#include <engine/gr.h>
42#include <engine/disp.h>
43
44int
45nv10_identify(struct nvkm_device *device)
46{
47 switch (device->chipset) {
48 case 0x10:
49 device->cname = "NV10";
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
51 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
52 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
57 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
58 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
59 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
60 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
63 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
64 break;
65 case 0x15:
66 device->cname = "NV15";
67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
68 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
69 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
70 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
71 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
72 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
73 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
79 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
83 break;
84 case 0x16:
85 device->cname = "NV16";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
93 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
94 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
95 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
96 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
97 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
98 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
99 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
102 break;
103 case 0x1a:
104 device->cname = "nForce";
105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
106 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
107 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
111 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
112 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
113 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
114 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
115 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
116 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
117 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
118 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
120 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
121 break;
122 case 0x11:
123 device->cname = "NV11";
124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
125 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
126 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
131 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
132 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
133 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
134 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
136 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
137 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
139 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
140 break;
141 case 0x17:
142 device->cname = "NV17";
143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
144 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
145 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
146 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
147 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
148 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
149 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
151 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
152 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
153 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
154 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
155 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
156 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
158 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
159 break;
160 case 0x1f:
161 device->cname = "nForce2";
162 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
163 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
164 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
165 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
166 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
167 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
168 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
170 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
171 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
172 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
173 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
174 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
177 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
178 break;
179 case 0x18:
180 device->cname = "NV18";
181 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
182 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
183 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
184 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
185 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
186 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
187 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
189 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
190 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
191 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
192 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
193 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
194 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
196 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
197 break;
198 default:
199 nv_fatal(device, "unknown Celsius chipset\n");
200 return -EINVAL;
201 }
202
203 return 0;
204}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
deleted file mode 100644
index d5ec8937df68..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/gpio.h>
29#include <subdev/i2c.h>
30#include <subdev/clk.h>
31#include <subdev/therm.h>
32#include <subdev/devinit.h>
33#include <subdev/mc.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36#include <subdev/instmem.h>
37#include <subdev/mmu.h>
38
39#include <engine/dmaobj.h>
40#include <engine/fifo.h>
41#include <engine/sw.h>
42#include <engine/gr.h>
43#include <engine/disp.h>
44
45int
46nv20_identify(struct nvkm_device *device)
47{
48 switch (device->chipset) {
49 case 0x20:
50 device->cname = "NV20";
51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
52 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
53 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
55 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
57 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_gr_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
67 break;
68 case 0x25:
69 device->cname = "NV25";
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
71 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
72 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
73 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
74 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
78 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
79 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
80 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_gr_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
86 break;
87 case 0x28:
88 device->cname = "NV28";
89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
90 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
91 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
93 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
95 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_gr_oclass;
104 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
105 break;
106 case 0x2a:
107 device->cname = "NV2A";
108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
109 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
110 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
111 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
112 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
113 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
114 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
115 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
116 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
117 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
118 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
119 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
120 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
121 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_gr_oclass;
123 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
124 break;
125 default:
126 nv_fatal(device, "unknown Kelvin chipset\n");
127 return -EINVAL;
128 }
129
130 return 0;
131}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
deleted file mode 100644
index dda09621e898..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
+++ /dev/null
@@ -1,153 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/gpio.h>
29#include <subdev/i2c.h>
30#include <subdev/clk.h>
31#include <subdev/devinit.h>
32#include <subdev/mc.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35#include <subdev/instmem.h>
36#include <subdev/mmu.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40#include <engine/sw.h>
41#include <engine/gr.h>
42#include <engine/mpeg.h>
43#include <engine/disp.h>
44
45int
46nv30_identify(struct nvkm_device *device)
47{
48 switch (device->chipset) {
49 case 0x30:
50 device->cname = "NV30";
51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
52 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
53 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
55 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
56 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
57 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_gr_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
67 break;
68 case 0x35:
69 device->cname = "NV35";
70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
71 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
72 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
73 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
74 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
75 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
78 device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
79 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
80 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_gr_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
86 break;
87 case 0x31:
88 device->cname = "NV31";
89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
90 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
91 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
92 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
93 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
94 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
95 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
97 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_gr_oclass;
104 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
105 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
106 break;
107 case 0x36:
108 device->cname = "NV36";
109 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
110 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
111 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
112 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
113 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
114 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
115 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
117 device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
118 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
119 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
120 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
121 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
122 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_gr_oclass;
124 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
125 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
126 break;
127 case 0x34:
128 device->cname = "NV34";
129 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
130 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
131 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
132 device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
133 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
134 device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
135 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
136 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
137 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
138 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
139 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
140 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
141 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
142 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_gr_oclass;
144 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
145 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
146 break;
147 default:
148 nv_fatal(device, "unknown Rankine chipset\n");
149 return -EINVAL;
150 }
151
152 return 0;
153}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
deleted file mode 100644
index c6301361d14f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
+++ /dev/null
@@ -1,427 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/mmu.h>
29#include <subdev/gpio.h>
30#include <subdev/i2c.h>
31#include <subdev/clk.h>
32#include <subdev/therm.h>
33#include <subdev/devinit.h>
34#include <subdev/mc.h>
35#include <subdev/timer.h>
36#include <subdev/fb.h>
37#include <subdev/instmem.h>
38#include <subdev/mmu.h>
39#include <subdev/volt.h>
40
41#include <engine/dmaobj.h>
42#include <engine/fifo.h>
43#include <engine/sw.h>
44#include <engine/gr.h>
45#include <engine/mpeg.h>
46#include <engine/disp.h>
47#include <engine/pm.h>
48
49int
50nv40_identify(struct nvkm_device *device)
51{
52 switch (device->chipset) {
53 case 0x40:
54 device->cname = "NV40";
55 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
56 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
57 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
58 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
59 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
60 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
61 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
62 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
63 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
64 device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
65 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
66 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
67 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
68 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
69 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
70 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
71 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
72 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
73 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
74 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
75 break;
76 case 0x41:
77 device->cname = "NV41";
78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
79 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
80 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
81 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
82 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
83 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
84 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
85 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
86 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
87 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
88 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
89 device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
90 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
91 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
92 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
93 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
94 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
95 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
96 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
97 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
98 break;
99 case 0x42:
100 device->cname = "NV42";
101 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
102 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
103 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
104 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
105 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
106 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
107 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
108 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
109 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
110 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
111 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
112 device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
113 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
115 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
116 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
117 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
118 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
119 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
120 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
121 break;
122 case 0x43:
123 device->cname = "NV43";
124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
125 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
126 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
128 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
129 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
130 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
131 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
132 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
133 device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
134 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
135 device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
136 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
137 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
138 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
139 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
140 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
141 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
142 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
143 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
144 break;
145 case 0x45:
146 device->cname = "NV45";
147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
148 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
149 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
150 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
151 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
152 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
153 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
154 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
155 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
156 device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
157 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
158 device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
159 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
160 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
161 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
162 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
163 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
164 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
165 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
166 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
167 break;
168 case 0x47:
169 device->cname = "G70";
170 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
171 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
172 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
173 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
174 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
175 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
176 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
177 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
181 device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
182 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
183 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
184 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
185 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
186 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
187 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
188 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
189 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
190 break;
191 case 0x49:
192 device->cname = "G71";
193 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
194 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
195 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
196 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
197 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
198 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
199 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
200 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
201 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
202 device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
203 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
204 device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
205 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
206 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
207 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
208 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
209 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
210 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
211 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
212 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
213 break;
214 case 0x4b:
215 device->cname = "G73";
216 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
217 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
218 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
219 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
220 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
221 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
222 device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
223 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
224 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
225 device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
226 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
227 device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
228 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
229 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
230 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
231 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
232 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
233 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
234 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
235 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
236 break;
237 case 0x44:
238 device->cname = "NV44";
239 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
240 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
241 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
242 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
243 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
244 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
245 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
246 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
247 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
248 device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
249 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
250 device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
251 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
252 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
253 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
254 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
255 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
256 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
257 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
258 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
259 break;
260 case 0x46:
261 device->cname = "G72";
262 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
263 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
264 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
265 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
266 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
267 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
268 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
269 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
270 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
271 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
272 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
273 device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
274 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
276 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
277 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
279 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
280 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
281 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
282 break;
283 case 0x4a:
284 device->cname = "NV44A";
285 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
286 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
287 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
288 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
289 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
290 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
291 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
292 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
294 device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
295 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
296 device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
299 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
300 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
301 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
302 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
303 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
304 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
305 break;
306 case 0x4c:
307 device->cname = "C61";
308 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
309 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
310 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
311 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
313 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
314 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
315 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
316 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
317 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
318 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
319 device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
320 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
321 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
322 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
323 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
324 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
325 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
326 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
327 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
328 break;
329 case 0x4e:
330 device->cname = "C51";
331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
332 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
333 device->oclass[NVDEV_SUBDEV_I2C ] = nv4e_i2c_oclass;
334 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
337 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
338 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
340 device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
341 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
342 device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
343 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
344 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
345 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
346 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
347 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
348 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
349 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
350 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
351 break;
352 case 0x63:
353 device->cname = "C73";
354 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
355 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
356 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
357 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
359 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
360 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
361 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
362 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
363 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
364 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
365 device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
366 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
367 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
368 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
369 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
370 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
371 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
372 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
373 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
374 break;
375 case 0x67:
376 device->cname = "C67";
377 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
378 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
379 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
380 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
382 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
383 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
384 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
385 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
386 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
387 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
388 device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
389 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
390 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
391 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
392 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
393 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
394 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
395 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
396 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
397 break;
398 case 0x68:
399 device->cname = "C68";
400 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
401 device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
402 device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
403 device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
405 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
406 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
407 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
408 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
409 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
410 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
411 device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
412 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
413 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
414 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
415 device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
416 device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
417 device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
418 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
419 device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
420 break;
421 default:
422 nv_fatal(device, "unknown Curie chipset\n");
423 return -EINVAL;
424 }
425
426 return 0;
427}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
deleted file mode 100644
index 249b84454612..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
+++ /dev/null
@@ -1,478 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/gpio.h>
29#include <subdev/i2c.h>
30#include <subdev/fuse.h>
31#include <subdev/clk.h>
32#include <subdev/therm.h>
33#include <subdev/mxm.h>
34#include <subdev/devinit.h>
35#include <subdev/mc.h>
36#include <subdev/timer.h>
37#include <subdev/fb.h>
38#include <subdev/instmem.h>
39#include <subdev/mmu.h>
40#include <subdev/bar.h>
41#include <subdev/pmu.h>
42#include <subdev/volt.h>
43
44#include <engine/dmaobj.h>
45#include <engine/fifo.h>
46#include <engine/sw.h>
47#include <engine/gr.h>
48#include <engine/mpeg.h>
49#include <engine/vp.h>
50#include <engine/cipher.h>
51#include <engine/sec.h>
52#include <engine/bsp.h>
53#include <engine/msvld.h>
54#include <engine/mspdec.h>
55#include <engine/msppp.h>
56#include <engine/ce.h>
57#include <engine/disp.h>
58#include <engine/pm.h>
59
60int
61nv50_identify(struct nvkm_device *device)
62{
63 switch (device->chipset) {
64 case 0x50:
65 device->cname = "G80";
66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
67 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
68 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
69 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
70 device->oclass[NVDEV_SUBDEV_CLK ] = nv50_clk_oclass;
71 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
72 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
73 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass;
74 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
75 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
76 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
77 device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
78 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
79 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
80 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
81 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
82 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
83 device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
84 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
85 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
86 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
87 device->oclass[NVDEV_ENGINE_DISP ] = nv50_disp_oclass;
88 device->oclass[NVDEV_ENGINE_PM ] = nv50_pm_oclass;
89 break;
90 case 0x84:
91 device->cname = "G84";
92 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
93 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
94 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
95 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
96 device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
97 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
98 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
99 device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
100 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
101 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
102 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
103 device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
104 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
105 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
106 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
107 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
108 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
109 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
110 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
111 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
112 device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
113 device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
114 device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
115 device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
116 device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
117 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
118 break;
119 case 0x86:
120 device->cname = "G86";
121 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
122 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
123 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
124 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
125 device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
126 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
127 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
131 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
132 device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
133 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
134 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
135 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
136 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
137 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
138 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
139 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
140 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
141 device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
142 device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
143 device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
144 device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
145 device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
146 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
147 break;
148 case 0x92:
149 device->cname = "G92";
150 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
151 device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
152 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
153 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
154 device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
155 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
156 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
157 device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
158 device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
159 device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
160 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
161 device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
162 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
163 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
164 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
165 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
166 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
167 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
168 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
169 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
170 device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
171 device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
172 device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
173 device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
174 device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
175 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
176 break;
177 case 0x94:
178 device->cname = "G94";
179 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
180 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
181 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
182 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
183 device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
184 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
185 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
186 device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
187 device->oclass[NVDEV_SUBDEV_MC ] = g94_mc_oclass;
188 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
189 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
190 device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
191 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
192 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
193 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
194 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
195 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
196 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
197 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
198 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
199 device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
200 device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
201 device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
202 device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
203 device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
204 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
205 break;
206 case 0x96:
207 device->cname = "G96";
208 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
209 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
210 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
211 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
212 device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
213 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
214 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
215 device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
216 device->oclass[NVDEV_SUBDEV_MC ] = g94_mc_oclass;
217 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
219 device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
220 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
221 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
222 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
223 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
224 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
225 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
226 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
227 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
228 device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
229 device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
230 device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
231 device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
232 device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
233 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
234 break;
235 case 0x98:
236 device->cname = "G98";
237 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
238 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
239 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
240 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
241 device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
242 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
243 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
244 device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
245 device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
246 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
247 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
248 device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
249 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
250 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
251 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
252 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
253 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
254 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
255 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
256 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
257 device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
258 device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
259 device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
260 device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
261 device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
262 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
263 break;
264 case 0xa0:
265 device->cname = "G200";
266 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
267 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
268 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
269 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
270 device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
271 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
272 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
273 device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
274 device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
275 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
276 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
277 device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
278 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
279 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
280 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
281 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
282 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
283 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
284 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
285 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
286 device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
287 device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
288 device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
289 device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
290 device->oclass[NVDEV_ENGINE_DISP ] = gt200_disp_oclass;
291 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
292 break;
293 case 0xaa:
294 device->cname = "MCP77/MCP78";
295 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
296 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
297 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
298 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
299 device->oclass[NVDEV_SUBDEV_CLK ] = mcp77_clk_oclass;
300 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
301 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
302 device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
303 device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
304 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
305 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
306 device->oclass[NVDEV_SUBDEV_FB ] = mcp77_fb_oclass;
307 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
308 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
309 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
310 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
311 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
312 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
313 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
314 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
315 device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
316 device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
317 device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
318 device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
319 device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
320 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
321 break;
322 case 0xac:
323 device->cname = "MCP79/MCP7A";
324 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
325 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
326 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
327 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
328 device->oclass[NVDEV_SUBDEV_CLK ] = mcp77_clk_oclass;
329 device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
330 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
331 device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
332 device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
333 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
334 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
335 device->oclass[NVDEV_SUBDEV_FB ] = mcp77_fb_oclass;
336 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
337 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
338 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
339 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
340 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
341 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
342 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
343 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
344 device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
345 device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
346 device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
347 device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
348 device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
349 device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
350 break;
351 case 0xa3:
352 device->cname = "GT215";
353 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
354 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
355 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
356 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
357 device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
358 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
359 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
360 device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
361 device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
362 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
363 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
364 device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
365 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
366 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
367 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
368 device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
369 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
370 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
371 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
372 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
373 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
374 device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
375 device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
376 device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
377 device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
378 device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
379 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
380 device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
381 break;
382 case 0xa5:
383 device->cname = "GT216";
384 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
385 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
386 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
387 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
388 device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
389 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
390 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
391 device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
392 device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
393 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
394 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
395 device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
396 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
397 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
398 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
399 device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
400 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
401 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
402 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
403 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
404 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
405 device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
406 device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
407 device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
408 device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
409 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
410 device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
411 break;
412 case 0xa8:
413 device->cname = "GT218";
414 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
415 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
416 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
417 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
418 device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
419 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
420 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
421 device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
422 device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
423 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
424 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
425 device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
426 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
427 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
428 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
429 device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
430 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
431 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
432 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
433 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
434 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
435 device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
436 device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
437 device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
438 device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
439 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
440 device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
441 break;
442 case 0xaf:
443 device->cname = "MCP89";
444 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
445 device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
446 device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
447 device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
448 device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
449 device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
450 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
451 device->oclass[NVDEV_SUBDEV_DEVINIT] = mcp89_devinit_oclass;
452 device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
453 device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
454 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
455 device->oclass[NVDEV_SUBDEV_FB ] = mcp89_fb_oclass;
456 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
457 device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
458 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
459 device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
460 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
461 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
462 device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
463 device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
464 device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
465 device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
466 device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
467 device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
468 device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
469 device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
470 device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
471 break;
472 default:
473 nv_fatal(device, "unknown Tesla chipset\n");
474 return -EINVAL;
475 }
476
477 return 0;
478}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
new file mode 100644
index 000000000000..9dd1cac81e80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -0,0 +1,1685 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include <core/pci.h>
25#include "priv.h"
26
27struct nvkm_device_pci_device {
28 u16 device;
29 const char *name;
30 const struct nvkm_device_pci_vendor *vendor;
31};
32
33struct nvkm_device_pci_vendor {
34 u16 vendor;
35 u16 device;
36 const char *name;
37 const struct nvkm_device_quirk quirk;
38};
39
40static const struct nvkm_device_pci_vendor
41nvkm_device_pci_10de_0189[] = {
42 /* Apple iMac G4 NV18 */
43 { 0x10de, 0x0010, NULL, { .tv_gpio = 4 } },
44 {}
45};
46
47static const struct nvkm_device_pci_vendor
48nvkm_device_pci_10de_01f0[] = {
49 /* MSI nForce2 IGP */
50 { 0x1462, 0x5710, NULL, { .tv_pin_mask = 0xc } },
51 {}
52};
53
54static const struct nvkm_device_pci_vendor
55nvkm_device_pci_10de_0322[] = {
56 /* Zotac FX5200 */
57 { 0x19da, 0x1035, NULL, { .tv_pin_mask = 0xc } },
58 { 0x19da, 0x2035, NULL, { .tv_pin_mask = 0xc } },
59 {}
60};
61
62static const struct nvkm_device_pci_vendor
63nvkm_device_pci_10de_05e7[] = {
64 { 0x10de, 0x0595, "Tesla T10 Processor" },
65 { 0x10de, 0x068f, "Tesla T10 Processor" },
66 { 0x10de, 0x0697, "Tesla M1060" },
67 { 0x10de, 0x0714, "Tesla M1060" },
68 { 0x10de, 0x0743, "Tesla M1060" },
69 {}
70};
71
72static const struct nvkm_device_pci_vendor
73nvkm_device_pci_10de_0609[] = {
74 { 0x106b, 0x00a7, "GeForce 8800 GS" },
75 {}
76};
77
78static const struct nvkm_device_pci_vendor
79nvkm_device_pci_10de_062e[] = {
80 { 0x106b, 0x0605, "GeForce GT 130" },
81 {}
82};
83
84static const struct nvkm_device_pci_vendor
85nvkm_device_pci_10de_0649[] = {
86 { 0x1043, 0x202d, "GeForce GT 220M" },
87 {}
88};
89
90static const struct nvkm_device_pci_vendor
91nvkm_device_pci_10de_0652[] = {
92 { 0x152d, 0x0850, "GeForce GT 240M LE" },
93 {}
94};
95
96static const struct nvkm_device_pci_vendor
97nvkm_device_pci_10de_0654[] = {
98 { 0x1043, 0x14a2, "GeForce GT 320M" },
99 { 0x1043, 0x14d2, "GeForce GT 320M" },
100 {}
101};
102
103static const struct nvkm_device_pci_vendor
104nvkm_device_pci_10de_0655[] = {
105 { 0x106b, 0x0633, "GeForce GT 120" },
106 {}
107};
108
109static const struct nvkm_device_pci_vendor
110nvkm_device_pci_10de_0656[] = {
111 { 0x106b, 0x0693, "GeForce GT 120" },
112 {}
113};
114
115static const struct nvkm_device_pci_vendor
116nvkm_device_pci_10de_06d1[] = {
117 { 0x10de, 0x0771, "Tesla C2050" },
118 { 0x10de, 0x0772, "Tesla C2070" },
119 {}
120};
121
122static const struct nvkm_device_pci_vendor
123nvkm_device_pci_10de_06d2[] = {
124 { 0x10de, 0x088f, "Tesla X2070" },
125 {}
126};
127
128static const struct nvkm_device_pci_vendor
129nvkm_device_pci_10de_06de[] = {
130 { 0x10de, 0x0773, "Tesla S2050" },
131 { 0x10de, 0x082f, "Tesla M2050" },
132 { 0x10de, 0x0840, "Tesla X2070" },
133 { 0x10de, 0x0842, "Tesla M2050" },
134 { 0x10de, 0x0846, "Tesla M2050" },
135 { 0x10de, 0x0866, "Tesla M2050" },
136 { 0x10de, 0x0907, "Tesla M2050" },
137 { 0x10de, 0x091e, "Tesla M2050" },
138 {}
139};
140
141static const struct nvkm_device_pci_vendor
142nvkm_device_pci_10de_06e8[] = {
143 { 0x103c, 0x360b, "GeForce 9200M GE" },
144 {}
145};
146
147static const struct nvkm_device_pci_vendor
148nvkm_device_pci_10de_06f9[] = {
149 { 0x10de, 0x060d, "Quadro FX 370 Low Profile" },
150 {}
151};
152
153static const struct nvkm_device_pci_vendor
154nvkm_device_pci_10de_06ff[] = {
155 { 0x10de, 0x0711, "HICx8 + Graphics" },
156 {}
157};
158
159static const struct nvkm_device_pci_vendor
160nvkm_device_pci_10de_0866[] = {
161 { 0x106b, 0x00b1, "GeForce 9400M" },
162 {}
163};
164
165static const struct nvkm_device_pci_vendor
166nvkm_device_pci_10de_0872[] = {
167 { 0x1043, 0x1c42, "GeForce G205M" },
168 {}
169};
170
171static const struct nvkm_device_pci_vendor
172nvkm_device_pci_10de_0873[] = {
173 { 0x1043, 0x1c52, "GeForce G205M" },
174 {}
175};
176
177static const struct nvkm_device_pci_vendor
178nvkm_device_pci_10de_0a6e[] = {
179 { 0x17aa, 0x3607, "Second Generation ION" },
180 {}
181};
182
183static const struct nvkm_device_pci_vendor
184nvkm_device_pci_10de_0a70[] = {
185 { 0x17aa, 0x3605, "Second Generation ION" },
186 { 0x17aa, 0x3617, "Second Generation ION" },
187 {}
188};
189
190static const struct nvkm_device_pci_vendor
191nvkm_device_pci_10de_0a73[] = {
192 { 0x17aa, 0x3607, "Second Generation ION" },
193 { 0x17aa, 0x3610, "Second Generation ION" },
194 {}
195};
196
197static const struct nvkm_device_pci_vendor
198nvkm_device_pci_10de_0a74[] = {
199 { 0x17aa, 0x903a, "GeForce G210" },
200 {}
201};
202
203static const struct nvkm_device_pci_vendor
204nvkm_device_pci_10de_0a75[] = {
205 { 0x17aa, 0x3605, "Second Generation ION" },
206 {}
207};
208
209static const struct nvkm_device_pci_vendor
210nvkm_device_pci_10de_0a7a[] = {
211 { 0x1462, 0xaa51, "GeForce 405" },
212 { 0x1462, 0xaa58, "GeForce 405" },
213 { 0x1462, 0xac71, "GeForce 405" },
214 { 0x1462, 0xac82, "GeForce 405" },
215 { 0x1642, 0x3980, "GeForce 405" },
216 { 0x17aa, 0x3950, "GeForce 405M" },
217 { 0x17aa, 0x397d, "GeForce 405M" },
218 { 0x1b0a, 0x90b4, "GeForce 405" },
219 { 0x1bfd, 0x0003, "GeForce 405" },
220 { 0x1bfd, 0x8006, "GeForce 405" },
221 {}
222};
223
224static const struct nvkm_device_pci_vendor
225nvkm_device_pci_10de_0dd8[] = {
226 { 0x10de, 0x0914, "Quadro 2000D" },
227 {}
228};
229
230static const struct nvkm_device_pci_vendor
231nvkm_device_pci_10de_0de9[] = {
232 { 0x1025, 0x0692, "GeForce GT 620M" },
233 { 0x1025, 0x0725, "GeForce GT 620M" },
234 { 0x1025, 0x0728, "GeForce GT 620M" },
235 { 0x1025, 0x072b, "GeForce GT 620M" },
236 { 0x1025, 0x072e, "GeForce GT 620M" },
237 { 0x1025, 0x0753, "GeForce GT 620M" },
238 { 0x1025, 0x0754, "GeForce GT 620M" },
239 { 0x17aa, 0x3977, "GeForce GT 640M LE" },
240 { 0x1b0a, 0x2210, "GeForce GT 635M" },
241 {}
242};
243
244static const struct nvkm_device_pci_vendor
245nvkm_device_pci_10de_0dea[] = {
246 { 0x17aa, 0x365a, "GeForce 615" },
247 { 0x17aa, 0x365b, "GeForce 615" },
248 { 0x17aa, 0x365e, "GeForce 615" },
249 { 0x17aa, 0x3660, "GeForce 615" },
250 { 0x17aa, 0x366c, "GeForce 615" },
251 {}
252};
253
254static const struct nvkm_device_pci_vendor
255nvkm_device_pci_10de_0df4[] = {
256 { 0x152d, 0x0952, "GeForce GT 630M" },
257 { 0x152d, 0x0953, "GeForce GT 630M" },
258 {}
259};
260
261static const struct nvkm_device_pci_vendor
262nvkm_device_pci_10de_0fd2[] = {
263 { 0x1028, 0x0595, "GeForce GT 640M LE" },
264 { 0x1028, 0x05b2, "GeForce GT 640M LE" },
265 {}
266};
267
268static const struct nvkm_device_pci_vendor
269nvkm_device_pci_10de_0fe3[] = {
270 { 0x103c, 0x2b16, "GeForce GT 745A" },
271 { 0x17aa, 0x3675, "GeForce GT 745A" },
272 {}
273};
274
275static const struct nvkm_device_pci_vendor
276nvkm_device_pci_10de_104b[] = {
277 { 0x1043, 0x844c, "GeForce GT 625" },
278 { 0x1043, 0x846b, "GeForce GT 625" },
279 { 0x1462, 0xb590, "GeForce GT 625" },
280 { 0x174b, 0x0625, "GeForce GT 625" },
281 { 0x174b, 0xa625, "GeForce GT 625" },
282 {}
283};
284
285static const struct nvkm_device_pci_vendor
286nvkm_device_pci_10de_1058[] = {
287 { 0x103c, 0x2af1, "GeForce 610" },
288 { 0x17aa, 0x3682, "GeForce 800A" },
289 { 0x17aa, 0x3692, "GeForce 705A" },
290 { 0x17aa, 0x3695, "GeForce 800A" },
291 { 0x17aa, 0x36a8, "GeForce 800A" },
292 { 0x17aa, 0x36ac, "GeForce 800A" },
293 { 0x17aa, 0x36ad, "GeForce 800A" },
294 { 0x705a, 0x3682, "GeForce 800A" },
295 {}
296};
297
298static const struct nvkm_device_pci_vendor
299nvkm_device_pci_10de_105b[] = {
300 { 0x103c, 0x2afb, "GeForce 705A" },
301 { 0x17aa, 0x36a1, "GeForce 800A" },
302 {}
303};
304
305static const struct nvkm_device_pci_vendor
306nvkm_device_pci_10de_1091[] = {
307 { 0x10de, 0x088e, "Tesla X2090" },
308 { 0x10de, 0x0891, "Tesla X2090" },
309 { 0x10de, 0x0974, "Tesla X2090" },
310 { 0x10de, 0x098d, "Tesla X2090" },
311 {}
312};
313
314static const struct nvkm_device_pci_vendor
315nvkm_device_pci_10de_1096[] = {
316 { 0x10de, 0x0911, "Tesla C2050" },
317 {}
318};
319
320static const struct nvkm_device_pci_vendor
321nvkm_device_pci_10de_1140[] = {
322 { 0x1019, 0x999f, "GeForce GT 720M" },
323 { 0x1025, 0x0600, "GeForce GT 620M" },
324 { 0x1025, 0x0606, "GeForce GT 620M" },
325 { 0x1025, 0x064a, "GeForce GT 620M" },
326 { 0x1025, 0x064c, "GeForce GT 620M" },
327 { 0x1025, 0x067a, "GeForce GT 620M" },
328 { 0x1025, 0x0680, "GeForce GT 620M" },
329 { 0x1025, 0x0686, "GeForce 710M" },
330 { 0x1025, 0x0689, "GeForce 710M" },
331 { 0x1025, 0x068b, "GeForce 710M" },
332 { 0x1025, 0x068d, "GeForce 710M" },
333 { 0x1025, 0x068e, "GeForce 710M" },
334 { 0x1025, 0x0691, "GeForce 710M" },
335 { 0x1025, 0x0692, "GeForce GT 620M" },
336 { 0x1025, 0x0694, "GeForce GT 620M" },
337 { 0x1025, 0x0702, "GeForce GT 620M" },
338 { 0x1025, 0x0719, "GeForce GT 620M" },
339 { 0x1025, 0x0725, "GeForce GT 620M" },
340 { 0x1025, 0x0728, "GeForce GT 620M" },
341 { 0x1025, 0x072b, "GeForce GT 620M" },
342 { 0x1025, 0x072e, "GeForce GT 620M" },
343 { 0x1025, 0x0732, "GeForce GT 620M" },
344 { 0x1025, 0x0763, "GeForce GT 720M" },
345 { 0x1025, 0x0773, "GeForce 710M" },
346 { 0x1025, 0x0774, "GeForce 710M" },
347 { 0x1025, 0x0776, "GeForce GT 720M" },
348 { 0x1025, 0x077a, "GeForce 710M" },
349 { 0x1025, 0x077b, "GeForce 710M" },
350 { 0x1025, 0x077c, "GeForce 710M" },
351 { 0x1025, 0x077d, "GeForce 710M" },
352 { 0x1025, 0x077e, "GeForce 710M" },
353 { 0x1025, 0x077f, "GeForce 710M" },
354 { 0x1025, 0x0781, "GeForce GT 720M" },
355 { 0x1025, 0x0798, "GeForce GT 720M" },
356 { 0x1025, 0x0799, "GeForce GT 720M" },
357 { 0x1025, 0x079b, "GeForce GT 720M" },
358 { 0x1025, 0x079c, "GeForce GT 720M" },
359 { 0x1025, 0x0807, "GeForce GT 720M" },
360 { 0x1025, 0x0821, "GeForce 820M" },
361 { 0x1025, 0x0823, "GeForce GT 720M" },
362 { 0x1025, 0x0830, "GeForce GT 720M" },
363 { 0x1025, 0x0833, "GeForce GT 720M" },
364 { 0x1025, 0x0837, "GeForce GT 720M" },
365 { 0x1025, 0x083e, "GeForce 820M" },
366 { 0x1025, 0x0841, "GeForce 710M" },
367 { 0x1025, 0x0853, "GeForce 820M" },
368 { 0x1025, 0x0854, "GeForce 820M" },
369 { 0x1025, 0x0855, "GeForce 820M" },
370 { 0x1025, 0x0856, "GeForce 820M" },
371 { 0x1025, 0x0857, "GeForce 820M" },
372 { 0x1025, 0x0858, "GeForce 820M" },
373 { 0x1025, 0x0863, "GeForce 820M" },
374 { 0x1025, 0x0868, "GeForce 820M" },
375 { 0x1025, 0x0869, "GeForce 810M" },
376 { 0x1025, 0x0873, "GeForce 820M" },
377 { 0x1025, 0x0878, "GeForce 820M" },
378 { 0x1025, 0x087b, "GeForce 820M" },
379 { 0x1025, 0x087f, "GeForce 820M" },
380 { 0x1025, 0x0881, "GeForce 820M" },
381 { 0x1025, 0x0885, "GeForce 820M" },
382 { 0x1025, 0x088a, "GeForce 820M" },
383 { 0x1025, 0x089b, "GeForce 820M" },
384 { 0x1025, 0x0921, "GeForce 820M" },
385 { 0x1025, 0x092e, "GeForce 810M" },
386 { 0x1025, 0x092f, "GeForce 820M" },
387 { 0x1025, 0x0932, "GeForce 820M" },
388 { 0x1025, 0x093a, "GeForce 820M" },
389 { 0x1025, 0x093c, "GeForce 820M" },
390 { 0x1025, 0x093f, "GeForce 820M" },
391 { 0x1025, 0x0941, "GeForce 820M" },
392 { 0x1025, 0x0945, "GeForce 820M" },
393 { 0x1025, 0x0954, "GeForce 820M" },
394 { 0x1025, 0x0965, "GeForce 820M" },
395 { 0x1028, 0x054d, "GeForce GT 630M" },
396 { 0x1028, 0x054e, "GeForce GT 630M" },
397 { 0x1028, 0x0554, "GeForce GT 620M" },
398 { 0x1028, 0x0557, "GeForce GT 620M" },
399 { 0x1028, 0x0562, "GeForce GT625M" },
400 { 0x1028, 0x0565, "GeForce GT 630M" },
401 { 0x1028, 0x0568, "GeForce GT 630M" },
402 { 0x1028, 0x0590, "GeForce GT 630M" },
403 { 0x1028, 0x0592, "GeForce GT625M" },
404 { 0x1028, 0x0594, "GeForce GT625M" },
405 { 0x1028, 0x0595, "GeForce GT625M" },
406 { 0x1028, 0x05a2, "GeForce GT625M" },
407 { 0x1028, 0x05b1, "GeForce GT625M" },
408 { 0x1028, 0x05b3, "GeForce GT625M" },
409 { 0x1028, 0x05da, "GeForce GT 630M" },
410 { 0x1028, 0x05de, "GeForce GT 720M" },
411 { 0x1028, 0x05e0, "GeForce GT 720M" },
412 { 0x1028, 0x05e8, "GeForce GT 630M" },
413 { 0x1028, 0x05f4, "GeForce GT 720M" },
414 { 0x1028, 0x060f, "GeForce GT 720M" },
415 { 0x1028, 0x062f, "GeForce GT 720M" },
416 { 0x1028, 0x064e, "GeForce 820M" },
417 { 0x1028, 0x0652, "GeForce 820M" },
418 { 0x1028, 0x0653, "GeForce 820M" },
419 { 0x1028, 0x0655, "GeForce 820M" },
420 { 0x1028, 0x065e, "GeForce 820M" },
421 { 0x1028, 0x0662, "GeForce 820M" },
422 { 0x1028, 0x068d, "GeForce 820M" },
423 { 0x1028, 0x06ad, "GeForce 820M" },
424 { 0x1028, 0x06ae, "GeForce 820M" },
425 { 0x1028, 0x06af, "GeForce 820M" },
426 { 0x1028, 0x06b0, "GeForce 820M" },
427 { 0x1028, 0x06c0, "GeForce 820M" },
428 { 0x1028, 0x06c1, "GeForce 820M" },
429 { 0x103c, 0x18ef, "GeForce GT 630M" },
430 { 0x103c, 0x18f9, "GeForce GT 630M" },
431 { 0x103c, 0x18fb, "GeForce GT 630M" },
432 { 0x103c, 0x18fd, "GeForce GT 630M" },
433 { 0x103c, 0x18ff, "GeForce GT 630M" },
434 { 0x103c, 0x218a, "GeForce 820M" },
435 { 0x103c, 0x21bb, "GeForce 820M" },
436 { 0x103c, 0x21bc, "GeForce 820M" },
437 { 0x103c, 0x220e, "GeForce 820M" },
438 { 0x103c, 0x2210, "GeForce 820M" },
439 { 0x103c, 0x2212, "GeForce 820M" },
440 { 0x103c, 0x2214, "GeForce 820M" },
441 { 0x103c, 0x2218, "GeForce 820M" },
442 { 0x103c, 0x225b, "GeForce 820M" },
443 { 0x103c, 0x225d, "GeForce 820M" },
444 { 0x103c, 0x226d, "GeForce 820M" },
445 { 0x103c, 0x226f, "GeForce 820M" },
446 { 0x103c, 0x22d2, "GeForce 820M" },
447 { 0x103c, 0x22d9, "GeForce 820M" },
448 { 0x103c, 0x2335, "GeForce 820M" },
449 { 0x103c, 0x2337, "GeForce 820M" },
450 { 0x103c, 0x2aef, "GeForce GT 720A" },
451 { 0x103c, 0x2af9, "GeForce 710A" },
452 { 0x1043, 0x10dd, "NVS 5200M" },
453 { 0x1043, 0x10ed, "NVS 5200M" },
454 { 0x1043, 0x11fd, "GeForce GT 720M" },
455 { 0x1043, 0x124d, "GeForce GT 720M" },
456 { 0x1043, 0x126d, "GeForce GT 720M" },
457 { 0x1043, 0x131d, "GeForce GT 720M" },
458 { 0x1043, 0x13fd, "GeForce GT 720M" },
459 { 0x1043, 0x14c7, "GeForce GT 720M" },
460 { 0x1043, 0x1507, "GeForce GT 620M" },
461 { 0x1043, 0x15ad, "GeForce 820M" },
462 { 0x1043, 0x15ed, "GeForce 820M" },
463 { 0x1043, 0x160d, "GeForce 820M" },
464 { 0x1043, 0x163d, "GeForce 820M" },
465 { 0x1043, 0x165d, "GeForce 820M" },
466 { 0x1043, 0x166d, "GeForce 820M" },
467 { 0x1043, 0x16cd, "GeForce 820M" },
468 { 0x1043, 0x16dd, "GeForce 820M" },
469 { 0x1043, 0x170d, "GeForce 820M" },
470 { 0x1043, 0x176d, "GeForce 820M" },
471 { 0x1043, 0x178d, "GeForce 820M" },
472 { 0x1043, 0x179d, "GeForce 820M" },
473 { 0x1043, 0x2132, "GeForce GT 620M" },
474 { 0x1043, 0x2136, "NVS 5200M" },
475 { 0x1043, 0x21ba, "GeForce GT 720M" },
476 { 0x1043, 0x21fa, "GeForce GT 720M" },
477 { 0x1043, 0x220a, "GeForce GT 720M" },
478 { 0x1043, 0x221a, "GeForce GT 720M" },
479 { 0x1043, 0x223a, "GeForce GT 710M" },
480 { 0x1043, 0x224a, "GeForce GT 710M" },
481 { 0x1043, 0x227a, "GeForce 820M" },
482 { 0x1043, 0x228a, "GeForce 820M" },
483 { 0x1043, 0x22fa, "GeForce 820M" },
484 { 0x1043, 0x232a, "GeForce 820M" },
485 { 0x1043, 0x233a, "GeForce 820M" },
486 { 0x1043, 0x235a, "GeForce 820M" },
487 { 0x1043, 0x236a, "GeForce 820M" },
488 { 0x1043, 0x238a, "GeForce 820M" },
489 { 0x1043, 0x8595, "GeForce GT 720M" },
490 { 0x1043, 0x85ea, "GeForce GT 720M" },
491 { 0x1043, 0x85eb, "GeForce 820M" },
492 { 0x1043, 0x85ec, "GeForce 820M" },
493 { 0x1043, 0x85ee, "GeForce GT 720M" },
494 { 0x1043, 0x85f3, "GeForce 820M" },
495 { 0x1043, 0x860e, "GeForce 820M" },
496 { 0x1043, 0x861a, "GeForce 820M" },
497 { 0x1043, 0x861b, "GeForce 820M" },
498 { 0x1043, 0x8628, "GeForce 820M" },
499 { 0x1043, 0x8643, "GeForce 820M" },
500 { 0x1043, 0x864c, "GeForce 820M" },
501 { 0x1043, 0x8652, "GeForce 820M" },
502 { 0x1043, 0x8660, "GeForce 820M" },
503 { 0x1043, 0x8661, "GeForce 820M" },
504 { 0x105b, 0x0dac, "GeForce GT 720M" },
505 { 0x105b, 0x0dad, "GeForce GT 720M" },
506 { 0x105b, 0x0ef3, "GeForce GT 720M" },
507 { 0x10cf, 0x17f5, "GeForce GT 720M" },
508 { 0x1179, 0xfa01, "GeForce 710M" },
509 { 0x1179, 0xfa02, "GeForce 710M" },
510 { 0x1179, 0xfa03, "GeForce 710M" },
511 { 0x1179, 0xfa05, "GeForce 710M" },
512 { 0x1179, 0xfa11, "GeForce 710M" },
513 { 0x1179, 0xfa13, "GeForce 710M" },
514 { 0x1179, 0xfa18, "GeForce 710M" },
515 { 0x1179, 0xfa19, "GeForce 710M" },
516 { 0x1179, 0xfa21, "GeForce 710M" },
517 { 0x1179, 0xfa23, "GeForce 710M" },
518 { 0x1179, 0xfa2a, "GeForce 710M" },
519 { 0x1179, 0xfa32, "GeForce 710M" },
520 { 0x1179, 0xfa33, "GeForce 710M" },
521 { 0x1179, 0xfa36, "GeForce 710M" },
522 { 0x1179, 0xfa38, "GeForce 710M" },
523 { 0x1179, 0xfa42, "GeForce 710M" },
524 { 0x1179, 0xfa43, "GeForce 710M" },
525 { 0x1179, 0xfa45, "GeForce 710M" },
526 { 0x1179, 0xfa47, "GeForce 710M" },
527 { 0x1179, 0xfa49, "GeForce 710M" },
528 { 0x1179, 0xfa58, "GeForce 710M" },
529 { 0x1179, 0xfa59, "GeForce 710M" },
530 { 0x1179, 0xfa88, "GeForce 710M" },
531 { 0x1179, 0xfa89, "GeForce 710M" },
532 { 0x144d, 0xb092, "GeForce GT 620M" },
533 { 0x144d, 0xc0d5, "GeForce GT 630M" },
534 { 0x144d, 0xc0d7, "GeForce GT 620M" },
535 { 0x144d, 0xc0e2, "NVS 5200M" },
536 { 0x144d, 0xc0e3, "NVS 5200M" },
537 { 0x144d, 0xc0e4, "NVS 5200M" },
538 { 0x144d, 0xc10d, "GeForce 820M" },
539 { 0x144d, 0xc652, "GeForce GT 620M" },
540 { 0x144d, 0xc709, "GeForce 710M" },
541 { 0x144d, 0xc711, "GeForce 710M" },
542 { 0x144d, 0xc736, "GeForce 710M" },
543 { 0x144d, 0xc737, "GeForce 710M" },
544 { 0x144d, 0xc745, "GeForce 820M" },
545 { 0x144d, 0xc750, "GeForce 820M" },
546 { 0x1462, 0x10b8, "GeForce GT 710M" },
547 { 0x1462, 0x10e9, "GeForce GT 720M" },
548 { 0x1462, 0x1116, "GeForce 820M" },
549 { 0x1462, 0xaa33, "GeForce 720M" },
550 { 0x1462, 0xaaa2, "GeForce GT 720M" },
551 { 0x1462, 0xaaa3, "GeForce 820M" },
552 { 0x1462, 0xacb2, "GeForce GT 720M" },
553 { 0x1462, 0xacc1, "GeForce GT 720M" },
554 { 0x1462, 0xae61, "GeForce 720M" },
555 { 0x1462, 0xae65, "GeForce GT 720M" },
556 { 0x1462, 0xae6a, "GeForce 820M" },
557 { 0x1462, 0xae71, "GeForce GT 720M" },
558 { 0x14c0, 0x0083, "GeForce 820M" },
559 { 0x152d, 0x0926, "GeForce 620M" },
560 { 0x152d, 0x0982, "GeForce GT 630M" },
561 { 0x152d, 0x0983, "GeForce GT 630M" },
562 { 0x152d, 0x1005, "GeForce GT820M" },
563 { 0x152d, 0x1012, "GeForce 710M" },
564 { 0x152d, 0x1019, "GeForce 820M" },
565 { 0x152d, 0x1030, "GeForce GT 630M" },
566 { 0x152d, 0x1055, "GeForce 710M" },
567 { 0x152d, 0x1067, "GeForce GT 720M" },
568 { 0x152d, 0x1092, "GeForce 820M" },
569 { 0x17aa, 0x2200, "NVS 5200M" },
570 { 0x17aa, 0x2213, "GeForce GT 720M" },
571 { 0x17aa, 0x2220, "GeForce GT 720M" },
572 { 0x17aa, 0x309c, "GeForce GT 720A" },
573 { 0x17aa, 0x30b4, "GeForce 820A" },
574 { 0x17aa, 0x30b7, "GeForce 720A" },
575 { 0x17aa, 0x30e4, "GeForce 820A" },
576 { 0x17aa, 0x361b, "GeForce 820A" },
577 { 0x17aa, 0x361c, "GeForce 820A" },
578 { 0x17aa, 0x361d, "GeForce 820A" },
579 { 0x17aa, 0x3656, "GeForce GT620M" },
580 { 0x17aa, 0x365a, "GeForce 705M" },
581 { 0x17aa, 0x365e, "GeForce 800M" },
582 { 0x17aa, 0x3661, "GeForce 820A" },
583 { 0x17aa, 0x366c, "GeForce 800M" },
584 { 0x17aa, 0x3685, "GeForce 800M" },
585 { 0x17aa, 0x3686, "GeForce 800M" },
586 { 0x17aa, 0x3687, "GeForce 705A" },
587 { 0x17aa, 0x3696, "GeForce 820A" },
588 { 0x17aa, 0x369b, "GeForce 820A" },
589 { 0x17aa, 0x369c, "GeForce 820A" },
590 { 0x17aa, 0x369d, "GeForce 820A" },
591 { 0x17aa, 0x369e, "GeForce 820A" },
592 { 0x17aa, 0x36a6, "GeForce 820A" },
593 { 0x17aa, 0x36a7, "GeForce 820A" },
594 { 0x17aa, 0x36a9, "GeForce 820A" },
595 { 0x17aa, 0x36af, "GeForce 820A" },
596 { 0x17aa, 0x36b0, "GeForce 820A" },
597 { 0x17aa, 0x36b6, "GeForce 820A" },
598 { 0x17aa, 0x3800, "GeForce GT 720M" },
599 { 0x17aa, 0x3801, "GeForce GT 720M" },
600 { 0x17aa, 0x3802, "GeForce GT 720M" },
601 { 0x17aa, 0x3803, "GeForce GT 720M" },
602 { 0x17aa, 0x3804, "GeForce GT 720M" },
603 { 0x17aa, 0x3806, "GeForce GT 720M" },
604 { 0x17aa, 0x3808, "GeForce GT 720M" },
605 { 0x17aa, 0x380d, "GeForce 820M" },
606 { 0x17aa, 0x380e, "GeForce 820M" },
607 { 0x17aa, 0x380f, "GeForce 820M" },
608 { 0x17aa, 0x3811, "GeForce 820M" },
609 { 0x17aa, 0x3812, "GeForce 820M" },
610 { 0x17aa, 0x3813, "GeForce 820M" },
611 { 0x17aa, 0x3816, "GeForce 820M" },
612 { 0x17aa, 0x3817, "GeForce 820M" },
613 { 0x17aa, 0x3818, "GeForce 820M" },
614 { 0x17aa, 0x381a, "GeForce 820M" },
615 { 0x17aa, 0x381c, "GeForce 820M" },
616 { 0x17aa, 0x381d, "GeForce 820M" },
617 { 0x17aa, 0x3901, "GeForce 610M" },
618 { 0x17aa, 0x3902, "GeForce 710M" },
619 { 0x17aa, 0x3903, "GeForce 710M" },
620 { 0x17aa, 0x3904, "GeForce GT 625M" },
621 { 0x17aa, 0x3905, "GeForce GT 720M" },
622 { 0x17aa, 0x3907, "GeForce 820M" },
623 { 0x17aa, 0x3910, "GeForce GT 720M" },
624 { 0x17aa, 0x3912, "GeForce GT 720M" },
625 { 0x17aa, 0x3913, "GeForce 820M" },
626 { 0x17aa, 0x3915, "GeForce 820M" },
627 { 0x17aa, 0x3983, "GeForce 610M" },
628 { 0x17aa, 0x5001, "GeForce 610M" },
629 { 0x17aa, 0x5003, "GeForce GT 720M" },
630 { 0x17aa, 0x5005, "GeForce 705M" },
631 { 0x17aa, 0x500d, "GeForce GT 620M" },
632 { 0x17aa, 0x5014, "GeForce 710M" },
633 { 0x17aa, 0x5017, "GeForce 710M" },
634 { 0x17aa, 0x5019, "GeForce 710M" },
635 { 0x17aa, 0x501a, "GeForce 710M" },
636 { 0x17aa, 0x501f, "GeForce GT 720M" },
637 { 0x17aa, 0x5025, "GeForce 710M" },
638 { 0x17aa, 0x5027, "GeForce 710M" },
639 { 0x17aa, 0x502a, "GeForce 710M" },
640 { 0x17aa, 0x502b, "GeForce GT 720M" },
641 { 0x17aa, 0x502d, "GeForce 710M" },
642 { 0x17aa, 0x502e, "GeForce GT 720M" },
643 { 0x17aa, 0x502f, "GeForce GT 720M" },
644 { 0x17aa, 0x5030, "GeForce 705M" },
645 { 0x17aa, 0x5031, "GeForce 705M" },
646 { 0x17aa, 0x5032, "GeForce 820M" },
647 { 0x17aa, 0x5033, "GeForce 820M" },
648 { 0x17aa, 0x503e, "GeForce 710M" },
649 { 0x17aa, 0x503f, "GeForce 820M" },
650 { 0x17aa, 0x5040, "GeForce 820M" },
651 { 0x1854, 0x0177, "GeForce 710M" },
652 { 0x1854, 0x0180, "GeForce 710M" },
653 { 0x1854, 0x0190, "GeForce GT 720M" },
654 { 0x1854, 0x0192, "GeForce GT 720M" },
655 { 0x1854, 0x0224, "GeForce 820M" },
656 { 0x1b0a, 0x20dd, "GeForce GT 620M" },
657 { 0x1b0a, 0x20df, "GeForce GT 620M" },
658 { 0x1b0a, 0x210e, "GeForce 820M" },
659 { 0x1b0a, 0x2202, "GeForce GT 720M" },
660 { 0x1b0a, 0x90d7, "GeForce 820M" },
661 { 0x1b0a, 0x90dd, "GeForce 820M" },
662 { 0x1b50, 0x5530, "GeForce 820M" },
663 {}
664};
665
666static const struct nvkm_device_pci_vendor
667nvkm_device_pci_10de_1185[] = {
668 { 0x10de, 0x106f, "GeForce GTX 760" },
669 {}
670};
671
672static const struct nvkm_device_pci_vendor
673nvkm_device_pci_10de_1189[] = {
674 { 0x10de, 0x1074, "GeForce GTX 760 Ti OEM" },
675 {}
676};
677
678static const struct nvkm_device_pci_vendor
679nvkm_device_pci_10de_1199[] = {
680 { 0x1458, 0xd001, "GeForce GTX 760" },
681 {}
682};
683
684static const struct nvkm_device_pci_vendor
685nvkm_device_pci_10de_11e3[] = {
686 { 0x17aa, 0x3683, "GeForce GTX 760A" },
687 {}
688};
689
690static const struct nvkm_device_pci_vendor
691nvkm_device_pci_10de_11fc[] = {
692 { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
693 { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
694 {}
695};
696
697static const struct nvkm_device_pci_vendor
698nvkm_device_pci_10de_1247[] = {
699 { 0x1043, 0x212a, "GeForce GT 635M" },
700 { 0x1043, 0x212b, "GeForce GT 635M" },
701 { 0x1043, 0x212c, "GeForce GT 635M" },
702 {}
703};
704
705static const struct nvkm_device_pci_vendor
706nvkm_device_pci_10de_124d[] = {
707 { 0x1462, 0x10cc, "GeForce GT 635M" },
708 {}
709};
710
711static const struct nvkm_device_pci_vendor
712nvkm_device_pci_10de_1290[] = {
713 { 0x103c, 0x2afa, "GeForce 730A" },
714 {}
715};
716
717static const struct nvkm_device_pci_vendor
718nvkm_device_pci_10de_1292[] = {
719 { 0x17aa, 0x3675, "GeForce GT 740A" },
720 { 0x17aa, 0x367c, "GeForce GT 740A" },
721 { 0x17aa, 0x3684, "GeForce GT 740A" },
722 {}
723};
724
725static const struct nvkm_device_pci_vendor
726nvkm_device_pci_10de_1295[] = {
727 { 0x103c, 0x2b0d, "GeForce 710A" },
728 { 0x103c, 0x2b0f, "GeForce 710A" },
729 { 0x103c, 0x2b20, "GeForce 810A" },
730 { 0x103c, 0x2b21, "GeForce 810A" },
731 {}
732};
733
734static const struct nvkm_device_pci_vendor
735nvkm_device_pci_10de_1299[] = {
736 { 0x17aa, 0x369b, "GeForce 920A" },
737 {}
738};
739
740static const struct nvkm_device_pci_vendor
741nvkm_device_pci_10de_1340[] = {
742 { 0x103c, 0x2b2b, "GeForce 830A" },
743 {}
744};
745
746static const struct nvkm_device_pci_vendor
747nvkm_device_pci_10de_1341[] = {
748 { 0x17aa, 0x3697, "GeForce 840A" },
749 { 0x17aa, 0x3699, "GeForce 840A" },
750 { 0x17aa, 0x369c, "GeForce 840A" },
751 { 0x17aa, 0x36af, "GeForce 840A" },
752 {}
753};
754
755static const struct nvkm_device_pci_vendor
756nvkm_device_pci_10de_1346[] = {
757 { 0x17aa, 0x30ba, "GeForce 930A" },
758 { 0x17aa, 0x362c, "GeForce 930A" },
759 {}
760};
761
762static const struct nvkm_device_pci_vendor
763nvkm_device_pci_10de_1347[] = {
764 { 0x17aa, 0x36b9, "GeForce 940A" },
765 { 0x17aa, 0x36ba, "GeForce 940A" },
766 {}
767};
768
769static const struct nvkm_device_pci_vendor
770nvkm_device_pci_10de_137a[] = {
771 { 0x17aa, 0x2225, "Quadro K620M" },
772 {}
773};
774
775static const struct nvkm_device_pci_vendor
776nvkm_device_pci_10de_137d[] = {
777 { 0x17aa, 0x3699, "GeForce 940A" },
778 {}
779};
780
781static const struct nvkm_device_pci_vendor
782nvkm_device_pci_10de_1391[] = {
783 { 0x17aa, 0x3697, "GeForce GTX 850A" },
784 {}
785};
786
787static const struct nvkm_device_pci_vendor
788nvkm_device_pci_10de_1392[] = {
789 { 0x1028, 0x066a, "GeForce GPU" },
790 { 0x1043, 0x861e, "GeForce GTX 750 Ti" },
791 {}
792};
793
794static const struct nvkm_device_pci_vendor
795nvkm_device_pci_10de_139a[] = {
796 { 0x17aa, 0x36b9, "GeForce GTX 950A" },
797 {}
798};
799
800static const struct nvkm_device_pci_vendor
801nvkm_device_pci_10de_139b[] = {
802 { 0x1028, 0x06a3, "GeForce GTX 860M" },
803 { 0x19da, 0xc248, "GeForce GTX 750 Ti" },
804 {}
805};
806
807static const struct nvkm_device_pci_device
808nvkm_device_pci_10de[] = {
809 { 0x0020, "RIVA TNT" },
810 { 0x0028, "RIVA TNT2/TNT2 Pro" },
811 { 0x0029, "RIVA TNT2 Ultra" },
812 { 0x002c, "Vanta/Vanta LT" },
813 { 0x002d, "RIVA TNT2 Model 64/Model 64 Pro" },
814 { 0x0040, "GeForce 6800 Ultra" },
815 { 0x0041, "GeForce 6800" },
816 { 0x0042, "GeForce 6800 LE" },
817 { 0x0043, "GeForce 6800 XE" },
818 { 0x0044, "GeForce 6800 XT" },
819 { 0x0045, "GeForce 6800 GT" },
820 { 0x0046, "GeForce 6800 GT" },
821 { 0x0047, "GeForce 6800 GS" },
822 { 0x0048, "GeForce 6800 XT" },
823 { 0x004e, "Quadro FX 4000" },
824 { 0x0090, "GeForce 7800 GTX" },
825 { 0x0091, "GeForce 7800 GTX" },
826 { 0x0092, "GeForce 7800 GT" },
827 { 0x0093, "GeForce 7800 GS" },
828 { 0x0095, "GeForce 7800 SLI" },
829 { 0x0098, "GeForce Go 7800" },
830 { 0x0099, "GeForce Go 7800 GTX" },
831 { 0x009d, "Quadro FX 4500" },
832 { 0x00a0, "Aladdin TNT2" },
833 { 0x00c0, "GeForce 6800 GS" },
834 { 0x00c1, "GeForce 6800" },
835 { 0x00c2, "GeForce 6800 LE" },
836 { 0x00c3, "GeForce 6800 XT" },
837 { 0x00c8, "GeForce Go 6800" },
838 { 0x00c9, "GeForce Go 6800 Ultra" },
839 { 0x00cc, "Quadro FX Go1400" },
840 { 0x00cd, "Quadro FX 3450/4000 SDI" },
841 { 0x00ce, "Quadro FX 1400" },
842 { 0x00f1, "GeForce 6600 GT" },
843 { 0x00f2, "GeForce 6600" },
844 { 0x00f3, "GeForce 6200" },
845 { 0x00f4, "GeForce 6600 LE" },
846 { 0x00f5, "GeForce 7800 GS" },
847 { 0x00f6, "GeForce 6800 GS" },
848 { 0x00f8, "Quadro FX 3400/Quadro FX 4000" },
849 { 0x00f9, "GeForce 6800 Ultra" },
850 { 0x00fa, "GeForce PCX 5750" },
851 { 0x00fb, "GeForce PCX 5900" },
852 { 0x00fc, "Quadro FX 330/GeForce PCX 5300" },
853 { 0x00fd, "Quadro FX 330/Quadro NVS 280 PCI-E" },
854 { 0x00fe, "Quadro FX 1300" },
855 { 0x0100, "GeForce 256" },
856 { 0x0101, "GeForce DDR" },
857 { 0x0103, "Quadro" },
858 { 0x0110, "GeForce2 MX/MX 400" },
859 { 0x0111, "GeForce2 MX 100/200" },
860 { 0x0112, "GeForce2 Go" },
861 { 0x0113, "Quadro2 MXR/EX/Go" },
862 { 0x0140, "GeForce 6600 GT" },
863 { 0x0141, "GeForce 6600" },
864 { 0x0142, "GeForce 6600 LE" },
865 { 0x0143, "GeForce 6600 VE" },
866 { 0x0144, "GeForce Go 6600" },
867 { 0x0145, "GeForce 6610 XL" },
868 { 0x0146, "GeForce Go 6600 TE/6200 TE" },
869 { 0x0147, "GeForce 6700 XL" },
870 { 0x0148, "GeForce Go 6600" },
871 { 0x0149, "GeForce Go 6600 GT" },
872 { 0x014a, "Quadro NVS 440" },
873 { 0x014c, "Quadro FX 540M" },
874 { 0x014d, "Quadro FX 550" },
875 { 0x014e, "Quadro FX 540" },
876 { 0x014f, "GeForce 6200" },
877 { 0x0150, "GeForce2 GTS/GeForce2 Pro" },
878 { 0x0151, "GeForce2 Ti" },
879 { 0x0152, "GeForce2 Ultra" },
880 { 0x0153, "Quadro2 Pro" },
881 { 0x0160, "GeForce 6500" },
882 { 0x0161, "GeForce 6200 TurboCache(TM)" },
883 { 0x0162, "GeForce 6200SE TurboCache(TM)" },
884 { 0x0163, "GeForce 6200 LE" },
885 { 0x0164, "GeForce Go 6200" },
886 { 0x0165, "Quadro NVS 285" },
887 { 0x0166, "GeForce Go 6400" },
888 { 0x0167, "GeForce Go 6200" },
889 { 0x0168, "GeForce Go 6400" },
890 { 0x0169, "GeForce 6250" },
891 { 0x016a, "GeForce 7100 GS" },
892 { 0x0170, "GeForce4 MX 460" },
893 { 0x0171, "GeForce4 MX 440" },
894 { 0x0172, "GeForce4 MX 420" },
895 { 0x0173, "GeForce4 MX 440-SE" },
896 { 0x0174, "GeForce4 440 Go" },
897 { 0x0175, "GeForce4 420 Go" },
898 { 0x0176, "GeForce4 420 Go 32M" },
899 { 0x0177, "GeForce4 460 Go" },
900 { 0x0178, "Quadro4 550 XGL" },
901 { 0x0179, "GeForce4 440 Go 64M" },
902 { 0x017a, "Quadro NVS 400" },
903 { 0x017c, "Quadro4 500 GoGL" },
904 { 0x017d, "GeForce4 410 Go 16M" },
905 { 0x0181, "GeForce4 MX 440 with AGP8X" },
906 { 0x0182, "GeForce4 MX 440SE with AGP8X" },
907 { 0x0183, "GeForce4 MX 420 with AGP8X" },
908 { 0x0185, "GeForce4 MX 4000" },
909 { 0x0188, "Quadro4 580 XGL" },
910 { 0x0189, "GeForce4 MX with AGP8X (Mac)", nvkm_device_pci_10de_0189 },
911 { 0x018a, "Quadro NVS 280 SD" },
912 { 0x018b, "Quadro4 380 XGL" },
913 { 0x018c, "Quadro NVS 50 PCI" },
914 { 0x0191, "GeForce 8800 GTX" },
915 { 0x0193, "GeForce 8800 GTS" },
916 { 0x0194, "GeForce 8800 Ultra" },
917 { 0x0197, "Tesla C870" },
918 { 0x019d, "Quadro FX 5600" },
919 { 0x019e, "Quadro FX 4600" },
920 { 0x01a0, "GeForce2 Integrated GPU" },
921 { 0x01d0, "GeForce 7350 LE" },
922 { 0x01d1, "GeForce 7300 LE" },
923 { 0x01d2, "GeForce 7550 LE" },
924 { 0x01d3, "GeForce 7300 SE/7200 GS" },
925 { 0x01d6, "GeForce Go 7200" },
926 { 0x01d7, "GeForce Go 7300" },
927 { 0x01d8, "GeForce Go 7400" },
928 { 0x01da, "Quadro NVS 110M" },
929 { 0x01db, "Quadro NVS 120M" },
930 { 0x01dc, "Quadro FX 350M" },
931 { 0x01dd, "GeForce 7500 LE" },
932 { 0x01de, "Quadro FX 350" },
933 { 0x01df, "GeForce 7300 GS" },
934 { 0x01f0, "GeForce4 MX Integrated GPU", nvkm_device_pci_10de_01f0 },
935 { 0x0200, "GeForce3" },
936 { 0x0201, "GeForce3 Ti 200" },
937 { 0x0202, "GeForce3 Ti 500" },
938 { 0x0203, "Quadro DCC" },
939 { 0x0211, "GeForce 6800" },
940 { 0x0212, "GeForce 6800 LE" },
941 { 0x0215, "GeForce 6800 GT" },
942 { 0x0218, "GeForce 6800 XT" },
943 { 0x0221, "GeForce 6200" },
944 { 0x0222, "GeForce 6200 A-LE" },
945 { 0x0240, "GeForce 6150" },
946 { 0x0241, "GeForce 6150 LE" },
947 { 0x0242, "GeForce 6100" },
948 { 0x0244, "GeForce Go 6150" },
949 { 0x0245, "Quadro NVS 210S / GeForce 6150LE" },
950 { 0x0247, "GeForce Go 6100" },
951 { 0x0250, "GeForce4 Ti 4600" },
952 { 0x0251, "GeForce4 Ti 4400" },
953 { 0x0253, "GeForce4 Ti 4200" },
954 { 0x0258, "Quadro4 900 XGL" },
955 { 0x0259, "Quadro4 750 XGL" },
956 { 0x025b, "Quadro4 700 XGL" },
957 { 0x0280, "GeForce4 Ti 4800" },
958 { 0x0281, "GeForce4 Ti 4200 with AGP8X" },
959 { 0x0282, "GeForce4 Ti 4800 SE" },
960 { 0x0286, "GeForce4 4200 Go" },
961 { 0x0288, "Quadro4 980 XGL" },
962 { 0x0289, "Quadro4 780 XGL" },
963 { 0x028c, "Quadro4 700 GoGL" },
964 { 0x0290, "GeForce 7900 GTX" },
965 { 0x0291, "GeForce 7900 GT/GTO" },
966 { 0x0292, "GeForce 7900 GS" },
967 { 0x0293, "GeForce 7950 GX2" },
968 { 0x0294, "GeForce 7950 GX2" },
969 { 0x0295, "GeForce 7950 GT" },
970 { 0x0297, "GeForce Go 7950 GTX" },
971 { 0x0298, "GeForce Go 7900 GS" },
972 { 0x0299, "Quadro NVS 510M" },
973 { 0x029a, "Quadro FX 2500M" },
974 { 0x029b, "Quadro FX 1500M" },
975 { 0x029c, "Quadro FX 5500" },
976 { 0x029d, "Quadro FX 3500" },
977 { 0x029e, "Quadro FX 1500" },
978 { 0x029f, "Quadro FX 4500 X2" },
979 { 0x02e0, "GeForce 7600 GT" },
980 { 0x02e1, "GeForce 7600 GS" },
981 { 0x02e2, "GeForce 7300 GT" },
982 { 0x02e3, "GeForce 7900 GS" },
983 { 0x02e4, "GeForce 7950 GT" },
984 { 0x0301, "GeForce FX 5800 Ultra" },
985 { 0x0302, "GeForce FX 5800" },
986 { 0x0308, "Quadro FX 2000" },
987 { 0x0309, "Quadro FX 1000" },
988 { 0x0311, "GeForce FX 5600 Ultra" },
989 { 0x0312, "GeForce FX 5600" },
990 { 0x0314, "GeForce FX 5600XT" },
991 { 0x031a, "GeForce FX Go5600" },
992 { 0x031b, "GeForce FX Go5650" },
993 { 0x031c, "Quadro FX Go700" },
994 { 0x0320, "GeForce FX 5200" },
995 { 0x0321, "GeForce FX 5200 Ultra" },
996 { 0x0322, "GeForce FX 5200", nvkm_device_pci_10de_0322 },
997 { 0x0323, "GeForce FX 5200LE" },
998 { 0x0324, "GeForce FX Go5200" },
999 { 0x0325, "GeForce FX Go5250" },
1000 { 0x0326, "GeForce FX 5500" },
1001 { 0x0327, "GeForce FX 5100" },
1002 { 0x0328, "GeForce FX Go5200 32M/64M" },
1003 { 0x032a, "Quadro NVS 55/280 PCI" },
1004 { 0x032b, "Quadro FX 500/FX 600" },
1005 { 0x032c, "GeForce FX Go53xx" },
1006 { 0x032d, "GeForce FX Go5100" },
1007 { 0x0330, "GeForce FX 5900 Ultra" },
1008 { 0x0331, "GeForce FX 5900" },
1009 { 0x0332, "GeForce FX 5900XT" },
1010 { 0x0333, "GeForce FX 5950 Ultra" },
1011 { 0x0334, "GeForce FX 5900ZT" },
1012 { 0x0338, "Quadro FX 3000" },
1013 { 0x033f, "Quadro FX 700" },
1014 { 0x0341, "GeForce FX 5700 Ultra" },
1015 { 0x0342, "GeForce FX 5700" },
1016 { 0x0343, "GeForce FX 5700LE" },
1017 { 0x0344, "GeForce FX 5700VE" },
1018 { 0x0347, "GeForce FX Go5700" },
1019 { 0x0348, "GeForce FX Go5700" },
1020 { 0x034c, "Quadro FX Go1000" },
1021 { 0x034e, "Quadro FX 1100" },
1022 { 0x038b, "GeForce 7650 GS" },
1023 { 0x0390, "GeForce 7650 GS" },
1024 { 0x0391, "GeForce 7600 GT" },
1025 { 0x0392, "GeForce 7600 GS" },
1026 { 0x0393, "GeForce 7300 GT" },
1027 { 0x0394, "GeForce 7600 LE" },
1028 { 0x0395, "GeForce 7300 GT" },
1029 { 0x0397, "GeForce Go 7700" },
1030 { 0x0398, "GeForce Go 7600" },
1031 { 0x0399, "GeForce Go 7600 GT" },
1032 { 0x039c, "Quadro FX 560M" },
1033 { 0x039e, "Quadro FX 560" },
1034 { 0x03d0, "GeForce 6150SE nForce 430" },
1035 { 0x03d1, "GeForce 6100 nForce 405" },
1036 { 0x03d2, "GeForce 6100 nForce 400" },
1037 { 0x03d5, "GeForce 6100 nForce 420" },
1038 { 0x03d6, "GeForce 7025 / nForce 630a" },
1039 { 0x0400, "GeForce 8600 GTS" },
1040 { 0x0401, "GeForce 8600 GT" },
1041 { 0x0402, "GeForce 8600 GT" },
1042 { 0x0403, "GeForce 8600 GS" },
1043 { 0x0404, "GeForce 8400 GS" },
1044 { 0x0405, "GeForce 9500M GS" },
1045 { 0x0406, "GeForce 8300 GS" },
1046 { 0x0407, "GeForce 8600M GT" },
1047 { 0x0408, "GeForce 9650M GS" },
1048 { 0x0409, "GeForce 8700M GT" },
1049 { 0x040a, "Quadro FX 370" },
1050 { 0x040b, "Quadro NVS 320M" },
1051 { 0x040c, "Quadro FX 570M" },
1052 { 0x040d, "Quadro FX 1600M" },
1053 { 0x040e, "Quadro FX 570" },
1054 { 0x040f, "Quadro FX 1700" },
1055 { 0x0410, "GeForce GT 330" },
1056 { 0x0420, "GeForce 8400 SE" },
1057 { 0x0421, "GeForce 8500 GT" },
1058 { 0x0422, "GeForce 8400 GS" },
1059 { 0x0423, "GeForce 8300 GS" },
1060 { 0x0424, "GeForce 8400 GS" },
1061 { 0x0425, "GeForce 8600M GS" },
1062 { 0x0426, "GeForce 8400M GT" },
1063 { 0x0427, "GeForce 8400M GS" },
1064 { 0x0428, "GeForce 8400M G" },
1065 { 0x0429, "Quadro NVS 140M" },
1066 { 0x042a, "Quadro NVS 130M" },
1067 { 0x042b, "Quadro NVS 135M" },
1068 { 0x042c, "GeForce 9400 GT" },
1069 { 0x042d, "Quadro FX 360M" },
1070 { 0x042e, "GeForce 9300M G" },
1071 { 0x042f, "Quadro NVS 290" },
1072 { 0x0531, "GeForce 7150M / nForce 630M" },
1073 { 0x0533, "GeForce 7000M / nForce 610M" },
1074 { 0x053a, "GeForce 7050 PV / nForce 630a" },
1075 { 0x053b, "GeForce 7050 PV / nForce 630a" },
1076 { 0x053e, "GeForce 7025 / nForce 630a" },
1077 { 0x05e0, "GeForce GTX 295" },
1078 { 0x05e1, "GeForce GTX 280" },
1079 { 0x05e2, "GeForce GTX 260" },
1080 { 0x05e3, "GeForce GTX 285" },
1081 { 0x05e6, "GeForce GTX 275" },
1082 { 0x05e7, "Tesla C1060", nvkm_device_pci_10de_05e7 },
1083 { 0x05ea, "GeForce GTX 260" },
1084 { 0x05eb, "GeForce GTX 295" },
1085 { 0x05ed, "Quadroplex 2200 D2" },
1086 { 0x05f8, "Quadroplex 2200 S4" },
1087 { 0x05f9, "Quadro CX" },
1088 { 0x05fd, "Quadro FX 5800" },
1089 { 0x05fe, "Quadro FX 4800" },
1090 { 0x05ff, "Quadro FX 3800" },
1091 { 0x0600, "GeForce 8800 GTS 512" },
1092 { 0x0601, "GeForce 9800 GT" },
1093 { 0x0602, "GeForce 8800 GT" },
1094 { 0x0603, "GeForce GT 230" },
1095 { 0x0604, "GeForce 9800 GX2" },
1096 { 0x0605, "GeForce 9800 GT" },
1097 { 0x0606, "GeForce 8800 GS" },
1098 { 0x0607, "GeForce GTS 240" },
1099 { 0x0608, "GeForce 9800M GTX" },
1100 { 0x0609, "GeForce 8800M GTS", nvkm_device_pci_10de_0609 },
1101 { 0x060a, "GeForce GTX 280M" },
1102 { 0x060b, "GeForce 9800M GT" },
1103 { 0x060c, "GeForce 8800M GTX" },
1104 { 0x060d, "GeForce 8800 GS" },
1105 { 0x060f, "GeForce GTX 285M" },
1106 { 0x0610, "GeForce 9600 GSO" },
1107 { 0x0611, "GeForce 8800 GT" },
1108 { 0x0612, "GeForce 9800 GTX/9800 GTX+" },
1109 { 0x0613, "GeForce 9800 GTX+" },
1110 { 0x0614, "GeForce 9800 GT" },
1111 { 0x0615, "GeForce GTS 250" },
1112 { 0x0617, "GeForce 9800M GTX" },
1113 { 0x0618, "GeForce GTX 260M" },
1114 { 0x0619, "Quadro FX 4700 X2" },
1115 { 0x061a, "Quadro FX 3700" },
1116 { 0x061b, "Quadro VX 200" },
1117 { 0x061c, "Quadro FX 3600M" },
1118 { 0x061d, "Quadro FX 2800M" },
1119 { 0x061e, "Quadro FX 3700M" },
1120 { 0x061f, "Quadro FX 3800M" },
1121 { 0x0621, "GeForce GT 230" },
1122 { 0x0622, "GeForce 9600 GT" },
1123 { 0x0623, "GeForce 9600 GS" },
1124 { 0x0625, "GeForce 9600 GSO 512" },
1125 { 0x0626, "GeForce GT 130" },
1126 { 0x0627, "GeForce GT 140" },
1127 { 0x0628, "GeForce 9800M GTS" },
1128 { 0x062a, "GeForce 9700M GTS" },
1129 { 0x062b, "GeForce 9800M GS" },
1130 { 0x062c, "GeForce 9800M GTS" },
1131 { 0x062d, "GeForce 9600 GT" },
1132 { 0x062e, "GeForce 9600 GT", nvkm_device_pci_10de_062e },
1133 { 0x0630, "GeForce 9700 S" },
1134 { 0x0631, "GeForce GTS 160M" },
1135 { 0x0632, "GeForce GTS 150M" },
1136 { 0x0635, "GeForce 9600 GSO" },
1137 { 0x0637, "GeForce 9600 GT" },
1138 { 0x0638, "Quadro FX 1800" },
1139 { 0x063a, "Quadro FX 2700M" },
1140 { 0x0640, "GeForce 9500 GT" },
1141 { 0x0641, "GeForce 9400 GT" },
1142 { 0x0643, "GeForce 9500 GT" },
1143 { 0x0644, "GeForce 9500 GS" },
1144 { 0x0645, "GeForce 9500 GS" },
1145 { 0x0646, "GeForce GT 120" },
1146 { 0x0647, "GeForce 9600M GT" },
1147 { 0x0648, "GeForce 9600M GS" },
1148 { 0x0649, "GeForce 9600M GT", nvkm_device_pci_10de_0649 },
1149 { 0x064a, "GeForce 9700M GT" },
1150 { 0x064b, "GeForce 9500M G" },
1151 { 0x064c, "GeForce 9650M GT" },
1152 { 0x0651, "GeForce G 110M" },
1153 { 0x0652, "GeForce GT 130M", nvkm_device_pci_10de_0652 },
1154 { 0x0653, "GeForce GT 120M" },
1155 { 0x0654, "GeForce GT 220M", nvkm_device_pci_10de_0654 },
1156 { 0x0655, NULL, nvkm_device_pci_10de_0655 },
1157 { 0x0656, NULL, nvkm_device_pci_10de_0656 },
1158 { 0x0658, "Quadro FX 380" },
1159 { 0x0659, "Quadro FX 580" },
1160 { 0x065a, "Quadro FX 1700M" },
1161 { 0x065b, "GeForce 9400 GT" },
1162 { 0x065c, "Quadro FX 770M" },
1163 { 0x06c0, "GeForce GTX 480" },
1164 { 0x06c4, "GeForce GTX 465" },
1165 { 0x06ca, "GeForce GTX 480M" },
1166 { 0x06cd, "GeForce GTX 470" },
1167 { 0x06d1, "Tesla C2050 / C2070", nvkm_device_pci_10de_06d1 },
1168 { 0x06d2, "Tesla M2070", nvkm_device_pci_10de_06d2 },
1169 { 0x06d8, "Quadro 6000" },
1170 { 0x06d9, "Quadro 5000" },
1171 { 0x06da, "Quadro 5000M" },
1172 { 0x06dc, "Quadro 6000" },
1173 { 0x06dd, "Quadro 4000" },
1174 { 0x06de, "Tesla T20 Processor", nvkm_device_pci_10de_06de },
1175 { 0x06df, "Tesla M2070-Q" },
1176 { 0x06e0, "GeForce 9300 GE" },
1177 { 0x06e1, "GeForce 9300 GS" },
1178 { 0x06e2, "GeForce 8400" },
1179 { 0x06e3, "GeForce 8400 SE" },
1180 { 0x06e4, "GeForce 8400 GS" },
1181 { 0x06e5, "GeForce 9300M GS" },
1182 { 0x06e6, "GeForce G100" },
1183 { 0x06e7, "GeForce 9300 SE" },
1184 { 0x06e8, "GeForce 9200M GS", nvkm_device_pci_10de_06e8 },
1185 { 0x06e9, "GeForce 9300M GS" },
1186 { 0x06ea, "Quadro NVS 150M" },
1187 { 0x06eb, "Quadro NVS 160M" },
1188 { 0x06ec, "GeForce G 105M" },
1189 { 0x06ef, "GeForce G 103M" },
1190 { 0x06f1, "GeForce G105M" },
1191 { 0x06f8, "Quadro NVS 420" },
1192 { 0x06f9, "Quadro FX 370 LP", nvkm_device_pci_10de_06f9 },
1193 { 0x06fa, "Quadro NVS 450" },
1194 { 0x06fb, "Quadro FX 370M" },
1195 { 0x06fd, "Quadro NVS 295" },
1196 { 0x06ff, "HICx16 + Graphics", nvkm_device_pci_10de_06ff },
1197 { 0x07e0, "GeForce 7150 / nForce 630i" },
1198 { 0x07e1, "GeForce 7100 / nForce 630i" },
1199 { 0x07e2, "GeForce 7050 / nForce 630i" },
1200 { 0x07e3, "GeForce 7050 / nForce 610i" },
1201 { 0x07e5, "GeForce 7050 / nForce 620i" },
1202 { 0x0840, "GeForce 8200M" },
1203 { 0x0844, "GeForce 9100M G" },
1204 { 0x0845, "GeForce 8200M G" },
1205 { 0x0846, "GeForce 9200" },
1206 { 0x0847, "GeForce 9100" },
1207 { 0x0848, "GeForce 8300" },
1208 { 0x0849, "GeForce 8200" },
1209 { 0x084a, "nForce 730a" },
1210 { 0x084b, "GeForce 9200" },
1211 { 0x084c, "nForce 980a/780a SLI" },
1212 { 0x084d, "nForce 750a SLI" },
1213 { 0x084f, "GeForce 8100 / nForce 720a" },
1214 { 0x0860, "GeForce 9400" },
1215 { 0x0861, "GeForce 9400" },
1216 { 0x0862, "GeForce 9400M G" },
1217 { 0x0863, "GeForce 9400M" },
1218 { 0x0864, "GeForce 9300" },
1219 { 0x0865, "ION" },
1220 { 0x0866, "GeForce 9400M G", nvkm_device_pci_10de_0866 },
1221 { 0x0867, "GeForce 9400" },
1222 { 0x0868, "nForce 760i SLI" },
1223 { 0x0869, "GeForce 9400" },
1224 { 0x086a, "GeForce 9400" },
1225 { 0x086c, "GeForce 9300 / nForce 730i" },
1226 { 0x086d, "GeForce 9200" },
1227 { 0x086e, "GeForce 9100M G" },
1228 { 0x086f, "GeForce 8200M G" },
1229 { 0x0870, "GeForce 9400M" },
1230 { 0x0871, "GeForce 9200" },
1231 { 0x0872, "GeForce G102M", nvkm_device_pci_10de_0872 },
1232 { 0x0873, "GeForce G102M", nvkm_device_pci_10de_0873 },
1233 { 0x0874, "ION" },
1234 { 0x0876, "ION" },
1235 { 0x087a, "GeForce 9400" },
1236 { 0x087d, "ION" },
1237 { 0x087e, "ION LE" },
1238 { 0x087f, "ION LE" },
1239 { 0x08a0, "GeForce 320M" },
1240 { 0x08a2, "GeForce 320M" },
1241 { 0x08a3, "GeForce 320M" },
1242 { 0x08a4, "GeForce 320M" },
1243 { 0x08a5, "GeForce 320M" },
1244 { 0x0a20, "GeForce GT 220" },
1245 { 0x0a22, "GeForce 315" },
1246 { 0x0a23, "GeForce 210" },
1247 { 0x0a26, "GeForce 405" },
1248 { 0x0a27, "GeForce 405" },
1249 { 0x0a28, "GeForce GT 230M" },
1250 { 0x0a29, "GeForce GT 330M" },
1251 { 0x0a2a, "GeForce GT 230M" },
1252 { 0x0a2b, "GeForce GT 330M" },
1253 { 0x0a2c, "NVS 5100M" },
1254 { 0x0a2d, "GeForce GT 320M" },
1255 { 0x0a32, "GeForce GT 415" },
1256 { 0x0a34, "GeForce GT 240M" },
1257 { 0x0a35, "GeForce GT 325M" },
1258 { 0x0a38, "Quadro 400" },
1259 { 0x0a3c, "Quadro FX 880M" },
1260 { 0x0a60, "GeForce G210" },
1261 { 0x0a62, "GeForce 205" },
1262 { 0x0a63, "GeForce 310" },
1263 { 0x0a64, "Second Generation ION" },
1264 { 0x0a65, "GeForce 210" },
1265 { 0x0a66, "GeForce 310" },
1266 { 0x0a67, "GeForce 315" },
1267 { 0x0a68, "GeForce G105M" },
1268 { 0x0a69, "GeForce G105M" },
1269 { 0x0a6a, "NVS 2100M" },
1270 { 0x0a6c, "NVS 3100M" },
1271 { 0x0a6e, "GeForce 305M", nvkm_device_pci_10de_0a6e },
1272 { 0x0a6f, "Second Generation ION" },
1273 { 0x0a70, "GeForce 310M", nvkm_device_pci_10de_0a70 },
1274 { 0x0a71, "GeForce 305M" },
1275 { 0x0a72, "GeForce 310M" },
1276 { 0x0a73, "GeForce 305M", nvkm_device_pci_10de_0a73 },
1277 { 0x0a74, "GeForce G210M", nvkm_device_pci_10de_0a74 },
1278 { 0x0a75, "GeForce 310M", nvkm_device_pci_10de_0a75 },
1279 { 0x0a76, "Second Generation ION" },
1280 { 0x0a78, "Quadro FX 380 LP" },
1281 { 0x0a7a, "GeForce 315M", nvkm_device_pci_10de_0a7a },
1282 { 0x0a7c, "Quadro FX 380M" },
1283 { 0x0ca0, "GeForce GT 330" },
1284 { 0x0ca2, "GeForce GT 320" },
1285 { 0x0ca3, "GeForce GT 240" },
1286 { 0x0ca4, "GeForce GT 340" },
1287 { 0x0ca5, "GeForce GT 220" },
1288 { 0x0ca7, "GeForce GT 330" },
1289 { 0x0ca8, "GeForce GTS 260M" },
1290 { 0x0ca9, "GeForce GTS 250M" },
1291 { 0x0cac, "GeForce GT 220" },
1292 { 0x0caf, "GeForce GT 335M" },
1293 { 0x0cb0, "GeForce GTS 350M" },
1294 { 0x0cb1, "GeForce GTS 360M" },
1295 { 0x0cbc, "Quadro FX 1800M" },
1296 { 0x0dc0, "GeForce GT 440" },
1297 { 0x0dc4, "GeForce GTS 450" },
1298 { 0x0dc5, "GeForce GTS 450" },
1299 { 0x0dc6, "GeForce GTS 450" },
1300 { 0x0dcd, "GeForce GT 555M" },
1301 { 0x0dce, "GeForce GT 555M" },
1302 { 0x0dd1, "GeForce GTX 460M" },
1303 { 0x0dd2, "GeForce GT 445M" },
1304 { 0x0dd3, "GeForce GT 435M" },
1305 { 0x0dd6, "GeForce GT 550M" },
1306 { 0x0dd8, "Quadro 2000", nvkm_device_pci_10de_0dd8 },
1307 { 0x0dda, "Quadro 2000M" },
1308 { 0x0de0, "GeForce GT 440" },
1309 { 0x0de1, "GeForce GT 430" },
1310 { 0x0de2, "GeForce GT 420" },
1311 { 0x0de3, "GeForce GT 635M" },
1312 { 0x0de4, "GeForce GT 520" },
1313 { 0x0de5, "GeForce GT 530" },
1314 { 0x0de7, "GeForce GT 610" },
1315 { 0x0de8, "GeForce GT 620M" },
1316 { 0x0de9, "GeForce GT 630M", nvkm_device_pci_10de_0de9 },
1317 { 0x0dea, "GeForce 610M", nvkm_device_pci_10de_0dea },
1318 { 0x0deb, "GeForce GT 555M" },
1319 { 0x0dec, "GeForce GT 525M" },
1320 { 0x0ded, "GeForce GT 520M" },
1321 { 0x0dee, "GeForce GT 415M" },
1322 { 0x0def, "NVS 5400M" },
1323 { 0x0df0, "GeForce GT 425M" },
1324 { 0x0df1, "GeForce GT 420M" },
1325 { 0x0df2, "GeForce GT 435M" },
1326 { 0x0df3, "GeForce GT 420M" },
1327 { 0x0df4, "GeForce GT 540M", nvkm_device_pci_10de_0df4 },
1328 { 0x0df5, "GeForce GT 525M" },
1329 { 0x0df6, "GeForce GT 550M" },
1330 { 0x0df7, "GeForce GT 520M" },
1331 { 0x0df8, "Quadro 600" },
1332 { 0x0df9, "Quadro 500M" },
1333 { 0x0dfa, "Quadro 1000M" },
1334 { 0x0dfc, "NVS 5200M" },
1335 { 0x0e22, "GeForce GTX 460" },
1336 { 0x0e23, "GeForce GTX 460 SE" },
1337 { 0x0e24, "GeForce GTX 460" },
1338 { 0x0e30, "GeForce GTX 470M" },
1339 { 0x0e31, "GeForce GTX 485M" },
1340 { 0x0e3a, "Quadro 3000M" },
1341 { 0x0e3b, "Quadro 4000M" },
1342 { 0x0f00, "GeForce GT 630" },
1343 { 0x0f01, "GeForce GT 620" },
1344 { 0x0f02, "GeForce GT 730" },
1345 { 0x0fc0, "GeForce GT 640" },
1346 { 0x0fc1, "GeForce GT 640" },
1347 { 0x0fc2, "GeForce GT 630" },
1348 { 0x0fc6, "GeForce GTX 650" },
1349 { 0x0fc8, "GeForce GT 740" },
1350 { 0x0fc9, "GeForce GT 730" },
1351 { 0x0fcd, "GeForce GT 755M" },
1352 { 0x0fce, "GeForce GT 640M LE" },
1353 { 0x0fd1, "GeForce GT 650M" },
1354 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
1355 { 0x0fd3, "GeForce GT 640M LE" },
1356 { 0x0fd4, "GeForce GTX 660M" },
1357 { 0x0fd5, "GeForce GT 650M" },
1358 { 0x0fd8, "GeForce GT 640M" },
1359 { 0x0fd9, "GeForce GT 645M" },
1360 { 0x0fdf, "GeForce GT 740M" },
1361 { 0x0fe0, "GeForce GTX 660M" },
1362 { 0x0fe1, "GeForce GT 730M" },
1363 { 0x0fe2, "GeForce GT 745M" },
1364 { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
1365 { 0x0fe4, "GeForce GT 750M" },
1366 { 0x0fe9, "GeForce GT 750M" },
1367 { 0x0fea, "GeForce GT 755M" },
1368 { 0x0fec, "GeForce 710A" },
1369 { 0x0fef, "GRID K340" },
1370 { 0x0ff2, "GRID K1" },
1371 { 0x0ff3, "Quadro K420" },
1372 { 0x0ff6, "Quadro K1100M" },
1373 { 0x0ff8, "Quadro K500M" },
1374 { 0x0ff9, "Quadro K2000D" },
1375 { 0x0ffa, "Quadro K600" },
1376 { 0x0ffb, "Quadro K2000M" },
1377 { 0x0ffc, "Quadro K1000M" },
1378 { 0x0ffd, "NVS 510" },
1379 { 0x0ffe, "Quadro K2000" },
1380 { 0x0fff, "Quadro 410" },
1381 { 0x1001, "GeForce GTX TITAN Z" },
1382 { 0x1004, "GeForce GTX 780" },
1383 { 0x1005, "GeForce GTX TITAN" },
1384 { 0x1007, "GeForce GTX 780" },
1385 { 0x1008, "GeForce GTX 780 Ti" },
1386 { 0x100a, "GeForce GTX 780 Ti" },
1387 { 0x100c, "GeForce GTX TITAN Black" },
1388 { 0x1021, "Tesla K20Xm" },
1389 { 0x1022, "Tesla K20c" },
1390 { 0x1023, "Tesla K40m" },
1391 { 0x1024, "Tesla K40c" },
1392 { 0x1026, "Tesla K20s" },
1393 { 0x1027, "Tesla K40st" },
1394 { 0x1028, "Tesla K20m" },
1395 { 0x1029, "Tesla K40s" },
1396 { 0x102a, "Tesla K40t" },
1397 { 0x102d, "Tesla K80" },
1398 { 0x103a, "Quadro K6000" },
1399 { 0x103c, "Quadro K5200" },
1400 { 0x1040, "GeForce GT 520" },
1401 { 0x1042, "GeForce 510" },
1402 { 0x1048, "GeForce 605" },
1403 { 0x1049, "GeForce GT 620" },
1404 { 0x104a, "GeForce GT 610" },
1405 { 0x104b, "GeForce GT 625 (OEM)", nvkm_device_pci_10de_104b },
1406 { 0x104c, "GeForce GT 705" },
1407 { 0x1050, "GeForce GT 520M" },
1408 { 0x1051, "GeForce GT 520MX" },
1409 { 0x1052, "GeForce GT 520M" },
1410 { 0x1054, "GeForce 410M" },
1411 { 0x1055, "GeForce 410M" },
1412 { 0x1056, "NVS 4200M" },
1413 { 0x1057, "NVS 4200M" },
1414 { 0x1058, "GeForce 610M", nvkm_device_pci_10de_1058 },
1415 { 0x1059, "GeForce 610M" },
1416 { 0x105a, "GeForce 610M" },
1417 { 0x105b, "GeForce 705M", nvkm_device_pci_10de_105b },
1418 { 0x107c, "NVS 315" },
1419 { 0x107d, "NVS 310" },
1420 { 0x1080, "GeForce GTX 580" },
1421 { 0x1081, "GeForce GTX 570" },
1422 { 0x1082, "GeForce GTX 560 Ti" },
1423 { 0x1084, "GeForce GTX 560" },
1424 { 0x1086, "GeForce GTX 570" },
1425 { 0x1087, "GeForce GTX 560 Ti" },
1426 { 0x1088, "GeForce GTX 590" },
1427 { 0x1089, "GeForce GTX 580" },
1428 { 0x108b, "GeForce GTX 580" },
1429 { 0x1091, "Tesla M2090", nvkm_device_pci_10de_1091 },
1430 { 0x1094, "Tesla M2075" },
1431 { 0x1096, "Tesla C2075", nvkm_device_pci_10de_1096 },
1432 { 0x109a, "Quadro 5010M" },
1433 { 0x109b, "Quadro 7000" },
1434 { 0x10c0, "GeForce 9300 GS" },
1435 { 0x10c3, "GeForce 8400GS" },
1436 { 0x10c5, "GeForce 405" },
1437 { 0x10d8, "NVS 300" },
1438 { 0x1140, NULL, nvkm_device_pci_10de_1140 },
1439 { 0x1180, "GeForce GTX 680" },
1440 { 0x1183, "GeForce GTX 660 Ti" },
1441 { 0x1184, "GeForce GTX 770" },
1442 { 0x1185, "GeForce GTX 660", nvkm_device_pci_10de_1185 },
1443 { 0x1187, "GeForce GTX 760" },
1444 { 0x1188, "GeForce GTX 690" },
1445 { 0x1189, "GeForce GTX 670", nvkm_device_pci_10de_1189 },
1446 { 0x118a, "GRID K520" },
1447 { 0x118e, "GeForce GTX 760 (192-bit)" },
1448 { 0x118f, "Tesla K10" },
1449 { 0x1193, "GeForce GTX 760 Ti OEM" },
1450 { 0x1194, "Tesla K8" },
1451 { 0x1195, "GeForce GTX 660" },
1452 { 0x1198, "GeForce GTX 880M" },
1453 { 0x1199, "GeForce GTX 870M", nvkm_device_pci_10de_1199 },
1454 { 0x119a, "GeForce GTX 860M" },
1455 { 0x119d, "GeForce GTX 775M" },
1456 { 0x119e, "GeForce GTX 780M" },
1457 { 0x119f, "GeForce GTX 780M" },
1458 { 0x11a0, "GeForce GTX 680M" },
1459 { 0x11a1, "GeForce GTX 670MX" },
1460 { 0x11a2, "GeForce GTX 675MX" },
1461 { 0x11a3, "GeForce GTX 680MX" },
1462 { 0x11a7, "GeForce GTX 675MX" },
1463 { 0x11b4, "Quadro K4200" },
1464 { 0x11b6, "Quadro K3100M" },
1465 { 0x11b7, "Quadro K4100M" },
1466 { 0x11b8, "Quadro K5100M" },
1467 { 0x11ba, "Quadro K5000" },
1468 { 0x11bc, "Quadro K5000M" },
1469 { 0x11bd, "Quadro K4000M" },
1470 { 0x11be, "Quadro K3000M" },
1471 { 0x11bf, "GRID K2" },
1472 { 0x11c0, "GeForce GTX 660" },
1473 { 0x11c2, "GeForce GTX 650 Ti BOOST" },
1474 { 0x11c3, "GeForce GTX 650 Ti" },
1475 { 0x11c4, "GeForce GTX 645" },
1476 { 0x11c5, "GeForce GT 740" },
1477 { 0x11c6, "GeForce GTX 650 Ti" },
1478 { 0x11c8, "GeForce GTX 650" },
1479 { 0x11cb, "GeForce GT 740" },
1480 { 0x11e0, "GeForce GTX 770M" },
1481 { 0x11e1, "GeForce GTX 765M" },
1482 { 0x11e2, "GeForce GTX 765M" },
1483 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
1484 { 0x11fa, "Quadro K4000" },
1485 { 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc },
1486 { 0x1200, "GeForce GTX 560 Ti" },
1487 { 0x1201, "GeForce GTX 560" },
1488 { 0x1203, "GeForce GTX 460 SE v2" },
1489 { 0x1205, "GeForce GTX 460 v2" },
1490 { 0x1206, "GeForce GTX 555" },
1491 { 0x1207, "GeForce GT 645" },
1492 { 0x1208, "GeForce GTX 560 SE" },
1493 { 0x1210, "GeForce GTX 570M" },
1494 { 0x1211, "GeForce GTX 580M" },
1495 { 0x1212, "GeForce GTX 675M" },
1496 { 0x1213, "GeForce GTX 670M" },
1497 { 0x1241, "GeForce GT 545" },
1498 { 0x1243, "GeForce GT 545" },
1499 { 0x1244, "GeForce GTX 550 Ti" },
1500 { 0x1245, "GeForce GTS 450" },
1501 { 0x1246, "GeForce GT 550M" },
1502 { 0x1247, "GeForce GT 555M", nvkm_device_pci_10de_1247 },
1503 { 0x1248, "GeForce GT 555M" },
1504 { 0x1249, "GeForce GTS 450" },
1505 { 0x124b, "GeForce GT 640" },
1506 { 0x124d, "GeForce GT 555M", nvkm_device_pci_10de_124d },
1507 { 0x1251, "GeForce GTX 560M" },
1508 { 0x1280, "GeForce GT 635" },
1509 { 0x1281, "GeForce GT 710" },
1510 { 0x1282, "GeForce GT 640" },
1511 { 0x1284, "GeForce GT 630" },
1512 { 0x1286, "GeForce GT 720" },
1513 { 0x1287, "GeForce GT 730" },
1514 { 0x1288, "GeForce GT 720" },
1515 { 0x1289, "GeForce GT 710" },
1516 { 0x1290, "GeForce GT 730M", nvkm_device_pci_10de_1290 },
1517 { 0x1291, "GeForce GT 735M" },
1518 { 0x1292, "GeForce GT 740M", nvkm_device_pci_10de_1292 },
1519 { 0x1293, "GeForce GT 730M" },
1520 { 0x1295, "GeForce 710M", nvkm_device_pci_10de_1295 },
1521 { 0x1296, "GeForce 825M" },
1522 { 0x1298, "GeForce GT 720M" },
1523 { 0x1299, "GeForce 920M", nvkm_device_pci_10de_1299 },
1524 { 0x129a, "GeForce 910M" },
1525 { 0x12b9, "Quadro K610M" },
1526 { 0x12ba, "Quadro K510M" },
1527 { 0x1340, "GeForce 830M", nvkm_device_pci_10de_1340 },
1528 { 0x1341, "GeForce 840M", nvkm_device_pci_10de_1341 },
1529 { 0x1344, "GeForce 845M" },
1530 { 0x1346, "GeForce 930M", nvkm_device_pci_10de_1346 },
1531 { 0x1347, "GeForce 940M", nvkm_device_pci_10de_1347 },
1532 { 0x137a, NULL, nvkm_device_pci_10de_137a },
1533 { 0x137d, NULL, nvkm_device_pci_10de_137d },
1534 { 0x1380, "GeForce GTX 750 Ti" },
1535 { 0x1381, "GeForce GTX 750" },
1536 { 0x1382, "GeForce GTX 745" },
1537 { 0x1390, "GeForce 845M" },
1538 { 0x1391, "GeForce GTX 850M", nvkm_device_pci_10de_1391 },
1539 { 0x1392, "GeForce GTX 860M", nvkm_device_pci_10de_1392 },
1540 { 0x1393, "GeForce 840M" },
1541 { 0x1398, "GeForce 845M" },
1542 { 0x139a, "GeForce GTX 950M", nvkm_device_pci_10de_139a },
1543 { 0x139b, "GeForce GTX 960M", nvkm_device_pci_10de_139b },
1544 { 0x139c, "GeForce 940M" },
1545 { 0x13b3, "Quadro K2200M" },
1546 { 0x13ba, "Quadro K2200" },
1547 { 0x13bb, "Quadro K620" },
1548 { 0x13bc, "Quadro K1200" },
1549 { 0x13c0, "GeForce GTX 980" },
1550 { 0x13c2, "GeForce GTX 970" },
1551 { 0x13d7, "GeForce GTX 980M" },
1552 { 0x13d8, "GeForce GTX 970M" },
1553 { 0x13d9, "GeForce GTX 965M" },
1554 { 0x1401, "GeForce GTX 960" },
1555 { 0x1617, "GeForce GTX 980M" },
1556 { 0x1618, "GeForce GTX 970M" },
1557 { 0x1619, "GeForce GTX 965M" },
1558 { 0x17c2, "GeForce GTX TITAN X" },
1559 { 0x17c8, "GeForce GTX 980 Ti" },
1560 { 0x17f0, "Quadro M6000" },
1561 {}
1562};
1563
1564static struct nvkm_device_pci *
1565nvkm_device_pci(struct nvkm_device *device)
1566{
1567 return container_of(device, struct nvkm_device_pci, device);
1568}
1569
1570static resource_size_t
1571nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
1572{
1573 struct nvkm_device_pci *pdev = nvkm_device_pci(device);
1574 return pci_resource_start(pdev->pdev, bar);
1575}
1576
1577static resource_size_t
1578nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
1579{
1580 struct nvkm_device_pci *pdev = nvkm_device_pci(device);
1581 return pci_resource_len(pdev->pdev, bar);
1582}
1583
1584static void
1585nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
1586{
1587 struct nvkm_device_pci *pdev = nvkm_device_pci(device);
1588 if (suspend) {
1589 pci_disable_device(pdev->pdev);
1590 pdev->suspend = true;
1591 }
1592}
1593
1594static int
1595nvkm_device_pci_preinit(struct nvkm_device *device)
1596{
1597 struct nvkm_device_pci *pdev = nvkm_device_pci(device);
1598 if (pdev->suspend) {
1599 int ret = pci_enable_device(pdev->pdev);
1600 if (ret)
1601 return ret;
1602 pci_set_master(pdev->pdev);
1603 pdev->suspend = false;
1604 }
1605 return 0;
1606}
1607
1608static void *
1609nvkm_device_pci_dtor(struct nvkm_device *device)
1610{
1611 struct nvkm_device_pci *pdev = nvkm_device_pci(device);
1612 pci_disable_device(pdev->pdev);
1613 return pdev;
1614}
1615
1616static const struct nvkm_device_func
1617nvkm_device_pci_func = {
1618 .pci = nvkm_device_pci,
1619 .dtor = nvkm_device_pci_dtor,
1620 .preinit = nvkm_device_pci_preinit,
1621 .fini = nvkm_device_pci_fini,
1622 .resource_addr = nvkm_device_pci_resource_addr,
1623 .resource_size = nvkm_device_pci_resource_size,
1624 .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
1625};
1626
1627int
1628nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
1629 bool detect, bool mmio, u64 subdev_mask,
1630 struct nvkm_device **pdevice)
1631{
1632 const struct nvkm_device_quirk *quirk = NULL;
1633 const struct nvkm_device_pci_device *pcid;
1634 const struct nvkm_device_pci_vendor *pciv;
1635 const char *name = NULL;
1636 struct nvkm_device_pci *pdev;
1637 int ret;
1638
1639 ret = pci_enable_device(pci_dev);
1640 if (ret)
1641 return ret;
1642
1643 switch (pci_dev->vendor) {
1644 case 0x10de: pcid = nvkm_device_pci_10de; break;
1645 default:
1646 pcid = NULL;
1647 break;
1648 }
1649
1650 while (pcid && pcid->device) {
1651 if (pciv = pcid->vendor, pcid->device == pci_dev->device) {
1652 while (pciv && pciv->vendor) {
1653 if (pciv->vendor == pci_dev->subsystem_vendor &&
1654 pciv->device == pci_dev->subsystem_device) {
1655 quirk = &pciv->quirk;
1656 name = pciv->name;
1657 break;
1658 }
1659 pciv++;
1660 }
1661 if (!name)
1662 name = pcid->name;
1663 break;
1664 }
1665 pcid++;
1666 }
1667
1668 if (!(pdev = kzalloc(sizeof(*pdev), GFP_KERNEL))) {
1669 pci_disable_device(pci_dev);
1670 return -ENOMEM;
1671 }
1672 *pdevice = &pdev->device;
1673 pdev->pdev = pci_dev;
1674
1675 return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
1676 pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
1677 pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
1678 NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
1679 (u64)pci_domain_nr(pci_dev->bus) << 32 |
1680 pci_dev->bus->number << 16 |
1681 PCI_SLOT(pci_dev->devfn) << 8 |
1682 PCI_FUNC(pci_dev->devfn), name,
1683 cfg, dbg, detect, mmio, subdev_mask,
1684 &pdev->device);
1685}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 8d3590e7bd87..ed3ad2c30e17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -2,15 +2,49 @@
2#define __NVKM_DEVICE_PRIV_H__ 2#define __NVKM_DEVICE_PRIV_H__
3#include <core/device.h> 3#include <core/device.h>
4 4
5extern struct nvkm_oclass nvkm_control_oclass[]; 5#include <subdev/bar.h>
6#include <subdev/bios.h>
7#include <subdev/bus.h>
8#include <subdev/clk.h>
9#include <subdev/devinit.h>
10#include <subdev/fb.h>
11#include <subdev/fuse.h>
12#include <subdev/gpio.h>
13#include <subdev/i2c.h>
14#include <subdev/ibus.h>
15#include <subdev/instmem.h>
16#include <subdev/ltc.h>
17#include <subdev/mc.h>
18#include <subdev/mmu.h>
19#include <subdev/mxm.h>
20#include <subdev/pci.h>
21#include <subdev/pmu.h>
22#include <subdev/therm.h>
23#include <subdev/timer.h>
24#include <subdev/volt.h>
6 25
7int nv04_identify(struct nvkm_device *); 26#include <engine/bsp.h>
8int nv10_identify(struct nvkm_device *); 27#include <engine/ce.h>
9int nv20_identify(struct nvkm_device *); 28#include <engine/cipher.h>
10int nv30_identify(struct nvkm_device *); 29#include <engine/disp.h>
11int nv40_identify(struct nvkm_device *); 30#include <engine/dma.h>
12int nv50_identify(struct nvkm_device *); 31#include <engine/fifo.h>
13int gf100_identify(struct nvkm_device *); 32#include <engine/gr.h>
14int gk104_identify(struct nvkm_device *); 33#include <engine/mpeg.h>
15int gm100_identify(struct nvkm_device *); 34#include <engine/mspdec.h>
35#include <engine/msppp.h>
36#include <engine/msvld.h>
37#include <engine/pm.h>
38#include <engine/sec.h>
39#include <engine/sw.h>
40#include <engine/vp.h>
41
42int nvkm_device_ctor(const struct nvkm_device_func *,
43 const struct nvkm_device_quirk *,
44 struct device *, enum nvkm_device_type, u64 handle,
45 const char *name, const char *cfg, const char *dbg,
46 bool detect, bool mmio, u64 subdev_mask,
47 struct nvkm_device *);
48int nvkm_device_init(struct nvkm_device *);
49int nvkm_device_fini(struct nvkm_device *, bool suspend);
16#endif 50#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
new file mode 100644
index 000000000000..da57c8a60608
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include <core/tegra.h>
23#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
24#include "priv.h"
25
26static int
27nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
28{
29 int ret;
30
31 ret = regulator_enable(tdev->vdd);
32 if (ret)
33 goto err_power;
34
35 ret = clk_prepare_enable(tdev->clk);
36 if (ret)
37 goto err_clk;
38 ret = clk_prepare_enable(tdev->clk_pwr);
39 if (ret)
40 goto err_clk_pwr;
41 clk_set_rate(tdev->clk_pwr, 204000000);
42 udelay(10);
43
44 reset_control_assert(tdev->rst);
45 udelay(10);
46
47 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
48 if (ret)
49 goto err_clamp;
50 udelay(10);
51
52 reset_control_deassert(tdev->rst);
53 udelay(10);
54
55 return 0;
56
57err_clamp:
58 clk_disable_unprepare(tdev->clk_pwr);
59err_clk_pwr:
60 clk_disable_unprepare(tdev->clk);
61err_clk:
62 regulator_disable(tdev->vdd);
63err_power:
64 return ret;
65}
66
67static int
68nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
69{
70 reset_control_assert(tdev->rst);
71 udelay(10);
72
73 clk_disable_unprepare(tdev->clk_pwr);
74 clk_disable_unprepare(tdev->clk);
75 udelay(10);
76
77 return regulator_disable(tdev->vdd);
78}
79
80static void
81nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
82{
83#if IS_ENABLED(CONFIG_IOMMU_API)
84 struct device *dev = &tdev->pdev->dev;
85 unsigned long pgsize_bitmap;
86 int ret;
87
88 mutex_init(&tdev->iommu.mutex);
89
90 if (iommu_present(&platform_bus_type)) {
91 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
92 if (IS_ERR(tdev->iommu.domain))
93 goto error;
94
95 /*
96 * A IOMMU is only usable if it supports page sizes smaller
97 * or equal to the system's PAGE_SIZE, with a preference if
98 * both are equal.
99 */
100 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
101 if (pgsize_bitmap & PAGE_SIZE) {
102 tdev->iommu.pgshift = PAGE_SHIFT;
103 } else {
104 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
105 if (tdev->iommu.pgshift == 0) {
106 dev_warn(dev, "unsupported IOMMU page size\n");
107 goto free_domain;
108 }
109 tdev->iommu.pgshift -= 1;
110 }
111
112 ret = iommu_attach_device(tdev->iommu.domain, dev);
113 if (ret)
114 goto free_domain;
115
116 ret = nvkm_mm_init(&tdev->iommu.mm, 0,
117 (1ULL << 40) >> tdev->iommu.pgshift, 1);
118 if (ret)
119 goto detach_device;
120 }
121
122 return;
123
124detach_device:
125 iommu_detach_device(tdev->iommu.domain, dev);
126
127free_domain:
128 iommu_domain_free(tdev->iommu.domain);
129
130error:
131 tdev->iommu.domain = NULL;
132 tdev->iommu.pgshift = 0;
133 dev_err(dev, "cannot initialize IOMMU MM\n");
134#endif
135}
136
137static void
138nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
139{
140#if IS_ENABLED(CONFIG_IOMMU_API)
141 if (tdev->iommu.domain) {
142 nvkm_mm_fini(&tdev->iommu.mm);
143 iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
144 iommu_domain_free(tdev->iommu.domain);
145 }
146#endif
147}
148
149static struct nvkm_device_tegra *
150nvkm_device_tegra(struct nvkm_device *device)
151{
152 return container_of(device, struct nvkm_device_tegra, device);
153}
154
155static struct resource *
156nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
157{
158 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
159 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
160}
161
162static resource_size_t
163nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
164{
165 struct resource *res = nvkm_device_tegra_resource(device, bar);
166 return res ? res->start : 0;
167}
168
169static resource_size_t
170nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
171{
172 struct resource *res = nvkm_device_tegra_resource(device, bar);
173 return res ? resource_size(res) : 0;
174}
175
176static irqreturn_t
177nvkm_device_tegra_intr(int irq, void *arg)
178{
179 struct nvkm_device_tegra *tdev = arg;
180 struct nvkm_mc *mc = tdev->device.mc;
181 bool handled = false;
182 if (likely(mc)) {
183 nvkm_mc_intr_unarm(mc);
184 nvkm_mc_intr(mc, &handled);
185 nvkm_mc_intr_rearm(mc);
186 }
187 return handled ? IRQ_HANDLED : IRQ_NONE;
188}
189
190static void
191nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
192{
193 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
194 if (tdev->irq) {
195 free_irq(tdev->irq, tdev);
196 tdev->irq = 0;
197 };
198}
199
200static int
201nvkm_device_tegra_init(struct nvkm_device *device)
202{
203 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
204 int irq, ret;
205
206 irq = platform_get_irq_byname(tdev->pdev, "stall");
207 if (irq < 0)
208 return irq;
209
210 ret = request_irq(irq, nvkm_device_tegra_intr,
211 IRQF_SHARED, "nvkm", tdev);
212 if (ret)
213 return ret;
214
215 tdev->irq = irq;
216 return 0;
217}
218
219static void *
220nvkm_device_tegra_dtor(struct nvkm_device *device)
221{
222 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
223 nvkm_device_tegra_power_down(tdev);
224 nvkm_device_tegra_remove_iommu(tdev);
225 return tdev;
226}
227
228static const struct nvkm_device_func
229nvkm_device_tegra_func = {
230 .tegra = nvkm_device_tegra,
231 .dtor = nvkm_device_tegra_dtor,
232 .init = nvkm_device_tegra_init,
233 .fini = nvkm_device_tegra_fini,
234 .resource_addr = nvkm_device_tegra_resource_addr,
235 .resource_size = nvkm_device_tegra_resource_size,
236 .cpu_coherent = false,
237};
238
239int
240nvkm_device_tegra_new(struct platform_device *pdev,
241 const char *cfg, const char *dbg,
242 bool detect, bool mmio, u64 subdev_mask,
243 struct nvkm_device **pdevice)
244{
245 struct nvkm_device_tegra *tdev;
246 int ret;
247
248 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
249 return -ENOMEM;
250 *pdevice = &tdev->device;
251 tdev->pdev = pdev;
252 tdev->irq = -1;
253
254 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
255 if (IS_ERR(tdev->vdd))
256 return PTR_ERR(tdev->vdd);
257
258 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
259 if (IS_ERR(tdev->rst))
260 return PTR_ERR(tdev->rst);
261
262 tdev->clk = devm_clk_get(&pdev->dev, "gpu");
263 if (IS_ERR(tdev->clk))
264 return PTR_ERR(tdev->clk);
265
266 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
267 if (IS_ERR(tdev->clk_pwr))
268 return PTR_ERR(tdev->clk_pwr);
269
270 nvkm_device_tegra_probe_iommu(tdev);
271
272 ret = nvkm_device_tegra_power_up(tdev);
273 if (ret)
274 return ret;
275
276 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
277 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
278 NVKM_DEVICE_TEGRA, pdev->id, NULL,
279 cfg, dbg, detect, mmio, subdev_mask,
280 &tdev->device);
281 if (ret)
282 return ret;
283
284 return 0;
285}
286#else
287int
288nvkm_device_tegra_new(struct platform_device *pdev,
289 const char *cfg, const char *dbg,
290 bool detect, bool mmio, u64 subdev_mask,
291 struct nvkm_device **pdevice)
292{
293 return -ENOSYS;
294}
295#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
new file mode 100644
index 000000000000..1ae48f27029d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -0,0 +1,371 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#define nvkm_udevice(p) container_of((p), struct nvkm_udevice, object)
25#include "priv.h"
26#include "ctrl.h"
27
28#include <core/client.h>
29#include <subdev/fb.h>
30#include <subdev/instmem.h>
31#include <subdev/timer.h>
32
33#include <nvif/class.h>
34#include <nvif/unpack.h>
35
36struct nvkm_udevice {
37 struct nvkm_object object;
38 struct nvkm_device *device;
39};
40
41static int
42nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
43{
44 struct nvkm_object *object = &udev->object;
45 struct nvkm_device *device = udev->device;
46 struct nvkm_fb *fb = device->fb;
47 struct nvkm_instmem *imem = device->imem;
48 union {
49 struct nv_device_info_v0 v0;
50 } *args = data;
51 int ret;
52
53 nvif_ioctl(object, "device info size %d\n", size);
54 if (nvif_unpack(args->v0, 0, 0, false)) {
55 nvif_ioctl(object, "device info vers %d\n", args->v0.version);
56 } else
57 return ret;
58
59 switch (device->chipset) {
60 case 0x01a:
61 case 0x01f:
62 case 0x04c:
63 case 0x04e:
64 case 0x063:
65 case 0x067:
66 case 0x068:
67 case 0x0aa:
68 case 0x0ac:
69 case 0x0af:
70 args->v0.platform = NV_DEVICE_INFO_V0_IGP;
71 break;
72 default:
73 switch (device->type) {
74 case NVKM_DEVICE_PCI:
75 args->v0.platform = NV_DEVICE_INFO_V0_PCI;
76 break;
77 case NVKM_DEVICE_AGP:
78 args->v0.platform = NV_DEVICE_INFO_V0_AGP;
79 break;
80 case NVKM_DEVICE_PCIE:
81 args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
82 break;
83 case NVKM_DEVICE_TEGRA:
84 args->v0.platform = NV_DEVICE_INFO_V0_SOC;
85 break;
86 default:
87 WARN_ON(1);
88 break;
89 }
90 break;
91 }
92
93 switch (device->card_type) {
94 case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
95 case NV_10:
96 case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
97 case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
98 case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
99 case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
100 case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
101 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
102 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
103 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
104 default:
105 args->v0.family = 0;
106 break;
107 }
108
109 args->v0.chipset = device->chipset;
110 args->v0.revision = device->chiprev;
111 if (fb && fb->ram)
112 args->v0.ram_size = args->v0.ram_user = fb->ram->size;
113 else
114 args->v0.ram_size = args->v0.ram_user = 0;
115 if (imem && args->v0.ram_size > 0)
116 args->v0.ram_user = args->v0.ram_user - imem->reserved;
117
118 strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip));
119 strncpy(args->v0.name, device->name, sizeof(args->v0.name));
120 return 0;
121}
122
123static int
124nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size)
125{
126 struct nvkm_device *device = udev->device;
127 union {
128 struct nv_device_time_v0 v0;
129 } *args = data;
130 int ret;
131
132 if (nvif_unpack(args->v0, 0, 0, false)) {
133 args->v0.time = nvkm_timer_read(device->timer);
134 }
135
136 return ret;
137}
138
139static int
140nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
141{
142 struct nvkm_udevice *udev = nvkm_udevice(object);
143 switch (mthd) {
144 case NV_DEVICE_V0_INFO:
145 return nvkm_udevice_info(udev, data, size);
146 case NV_DEVICE_V0_TIME:
147 return nvkm_udevice_time(udev, data, size);
148 default:
149 break;
150 }
151 return -EINVAL;
152}
153
154static int
155nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
156{
157 struct nvkm_udevice *udev = nvkm_udevice(object);
158 *data = nvkm_rd08(udev->device, addr);
159 return 0;
160}
161
162static int
163nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
164{
165 struct nvkm_udevice *udev = nvkm_udevice(object);
166 *data = nvkm_rd16(udev->device, addr);
167 return 0;
168}
169
170static int
171nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
172{
173 struct nvkm_udevice *udev = nvkm_udevice(object);
174 *data = nvkm_rd32(udev->device, addr);
175 return 0;
176}
177
178static int
179nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
180{
181 struct nvkm_udevice *udev = nvkm_udevice(object);
182 nvkm_wr08(udev->device, addr, data);
183 return 0;
184}
185
186static int
187nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
188{
189 struct nvkm_udevice *udev = nvkm_udevice(object);
190 nvkm_wr16(udev->device, addr, data);
191 return 0;
192}
193
194static int
195nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
196{
197 struct nvkm_udevice *udev = nvkm_udevice(object);
198 nvkm_wr32(udev->device, addr, data);
199 return 0;
200}
201
202static int
203nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
204{
205 struct nvkm_udevice *udev = nvkm_udevice(object);
206 struct nvkm_device *device = udev->device;
207 *addr = device->func->resource_addr(device, 0);
208 *size = device->func->resource_size(device, 0);
209 return 0;
210}
211
212static int
213nvkm_udevice_fini(struct nvkm_object *object, bool suspend)
214{
215 struct nvkm_udevice *udev = nvkm_udevice(object);
216 struct nvkm_device *device = udev->device;
217 int ret = 0;
218
219 mutex_lock(&device->mutex);
220 if (!--device->refcount) {
221 ret = nvkm_device_fini(device, suspend);
222 if (ret && suspend) {
223 device->refcount++;
224 goto done;
225 }
226 }
227
228done:
229 mutex_unlock(&device->mutex);
230 return ret;
231}
232
233static int
234nvkm_udevice_init(struct nvkm_object *object)
235{
236 struct nvkm_udevice *udev = nvkm_udevice(object);
237 struct nvkm_device *device = udev->device;
238 int ret = 0;
239
240 mutex_lock(&device->mutex);
241 if (!device->refcount++) {
242 ret = nvkm_device_init(device);
243 if (ret) {
244 device->refcount--;
245 goto done;
246 }
247 }
248
249done:
250 mutex_unlock(&device->mutex);
251 return ret;
252}
253
254static int
255nvkm_udevice_child_new(const struct nvkm_oclass *oclass,
256 void *data, u32 size, struct nvkm_object **pobject)
257{
258 struct nvkm_udevice *udev = nvkm_udevice(oclass->parent);
259 const struct nvkm_device_oclass *sclass = oclass->priv;
260 return sclass->ctor(udev->device, oclass, data, size, pobject);
261}
262
263static int
264nvkm_udevice_child_get(struct nvkm_object *object, int index,
265 struct nvkm_oclass *oclass)
266{
267 struct nvkm_udevice *udev = nvkm_udevice(object);
268 struct nvkm_device *device = udev->device;
269 struct nvkm_engine *engine;
270 u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
271 (1ULL << NVKM_ENGINE_FIFO) |
272 (1ULL << NVKM_ENGINE_DISP) |
273 (1ULL << NVKM_ENGINE_PM);
274 const struct nvkm_device_oclass *sclass = NULL;
275 int i;
276
277 for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
278 if (!(engine = nvkm_device_engine(device, i)) ||
279 !(engine->func->base.sclass))
280 continue;
281 oclass->engine = engine;
282
283 index -= engine->func->base.sclass(oclass, index, &sclass);
284 }
285
286 if (!sclass) {
287 switch (index) {
288 case 0: sclass = &nvkm_control_oclass; break;
289 default:
290 return -EINVAL;
291 }
292 oclass->base = sclass->base;
293 }
294
295 oclass->ctor = nvkm_udevice_child_new;
296 oclass->priv = sclass;
297 return 0;
298}
299
300static const struct nvkm_object_func
301nvkm_udevice_super = {
302 .init = nvkm_udevice_init,
303 .fini = nvkm_udevice_fini,
304 .mthd = nvkm_udevice_mthd,
305 .map = nvkm_udevice_map,
306 .rd08 = nvkm_udevice_rd08,
307 .rd16 = nvkm_udevice_rd16,
308 .rd32 = nvkm_udevice_rd32,
309 .wr08 = nvkm_udevice_wr08,
310 .wr16 = nvkm_udevice_wr16,
311 .wr32 = nvkm_udevice_wr32,
312 .sclass = nvkm_udevice_child_get,
313};
314
315static const struct nvkm_object_func
316nvkm_udevice = {
317 .init = nvkm_udevice_init,
318 .fini = nvkm_udevice_fini,
319 .mthd = nvkm_udevice_mthd,
320 .sclass = nvkm_udevice_child_get,
321};
322
323int
324nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
325 struct nvkm_object **pobject)
326{
327 union {
328 struct nv_device_v0 v0;
329 } *args = data;
330 struct nvkm_client *client = oclass->client;
331 struct nvkm_object *parent = &client->object;
332 const struct nvkm_object_func *func;
333 struct nvkm_udevice *udev;
334 int ret;
335
336 nvif_ioctl(parent, "create device size %d\n", size);
337 if (nvif_unpack(args->v0, 0, 0, false)) {
338 nvif_ioctl(parent, "create device v%d device %016llx\n",
339 args->v0.version, args->v0.device);
340 } else
341 return ret;
342
343 /* give priviledged clients register access */
344 if (client->super)
345 func = &nvkm_udevice_super;
346 else
347 func = &nvkm_udevice;
348
349 if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
350 return -ENOMEM;
351 nvkm_object_ctor(func, oclass, &udev->object);
352 *pobject = &udev->object;
353
354 /* find the device that matches what the client requested */
355 if (args->v0.device != ~0)
356 udev->device = nvkm_device_find(args->v0.device);
357 else
358 udev->device = nvkm_device_find(client->device);
359 if (!udev->device)
360 return -ENODEV;
361
362 return 0;
363}
364
365const struct nvkm_sclass
366nvkm_udevice_sclass = {
367 .oclass = NV_DEVICE,
368 .minver = 0,
369 .maxver = 0,
370 .ctor = nvkm_udevice_new,
371};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 16a4e2a37008..04f60452011e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -1,29 +1,93 @@
1nvkm-y += nvkm/engine/disp/base.o 1nvkm-y += nvkm/engine/disp/base.o
2nvkm-y += nvkm/engine/disp/conn.o
3nvkm-y += nvkm/engine/disp/outp.o
4nvkm-y += nvkm/engine/disp/outpdp.o
5nvkm-y += nvkm/engine/disp/nv04.o 2nvkm-y += nvkm/engine/disp/nv04.o
6nvkm-y += nvkm/engine/disp/nv50.o 3nvkm-y += nvkm/engine/disp/nv50.o
7nvkm-y += nvkm/engine/disp/g84.o 4nvkm-y += nvkm/engine/disp/g84.o
8nvkm-y += nvkm/engine/disp/g94.o 5nvkm-y += nvkm/engine/disp/g94.o
9nvkm-y += nvkm/engine/disp/gt200.o 6nvkm-y += nvkm/engine/disp/gt200.o
10nvkm-y += nvkm/engine/disp/gt215.o 7nvkm-y += nvkm/engine/disp/gt215.o
11nvkm-y += nvkm/engine/disp/gf110.o 8nvkm-y += nvkm/engine/disp/gf119.o
12nvkm-y += nvkm/engine/disp/gk104.o 9nvkm-y += nvkm/engine/disp/gk104.o
13nvkm-y += nvkm/engine/disp/gk110.o 10nvkm-y += nvkm/engine/disp/gk110.o
14nvkm-y += nvkm/engine/disp/gm107.o 11nvkm-y += nvkm/engine/disp/gm107.o
15nvkm-y += nvkm/engine/disp/gm204.o 12nvkm-y += nvkm/engine/disp/gm204.o
13
14nvkm-y += nvkm/engine/disp/outp.o
15nvkm-y += nvkm/engine/disp/outpdp.o
16nvkm-y += nvkm/engine/disp/dacnv50.o 16nvkm-y += nvkm/engine/disp/dacnv50.o
17nvkm-y += nvkm/engine/disp/piornv50.o
18nvkm-y += nvkm/engine/disp/sornv50.o
19nvkm-y += nvkm/engine/disp/sorg94.o
20nvkm-y += nvkm/engine/disp/sorgf119.o
21nvkm-y += nvkm/engine/disp/sorgm204.o
17nvkm-y += nvkm/engine/disp/dport.o 22nvkm-y += nvkm/engine/disp/dport.o
23
24nvkm-y += nvkm/engine/disp/conn.o
25
18nvkm-y += nvkm/engine/disp/hdagt215.o 26nvkm-y += nvkm/engine/disp/hdagt215.o
19nvkm-y += nvkm/engine/disp/hdagf110.o 27nvkm-y += nvkm/engine/disp/hdagf119.o
28
20nvkm-y += nvkm/engine/disp/hdmig84.o 29nvkm-y += nvkm/engine/disp/hdmig84.o
21nvkm-y += nvkm/engine/disp/hdmigt215.o 30nvkm-y += nvkm/engine/disp/hdmigt215.o
22nvkm-y += nvkm/engine/disp/hdmigf110.o 31nvkm-y += nvkm/engine/disp/hdmigf119.o
23nvkm-y += nvkm/engine/disp/hdmigk104.o 32nvkm-y += nvkm/engine/disp/hdmigk104.o
24nvkm-y += nvkm/engine/disp/piornv50.o 33
25nvkm-y += nvkm/engine/disp/sornv50.o
26nvkm-y += nvkm/engine/disp/sorg94.o
27nvkm-y += nvkm/engine/disp/sorgf110.o
28nvkm-y += nvkm/engine/disp/sorgm204.o
29nvkm-y += nvkm/engine/disp/vga.o 34nvkm-y += nvkm/engine/disp/vga.o
35
36nvkm-y += nvkm/engine/disp/rootnv04.o
37nvkm-y += nvkm/engine/disp/rootnv50.o
38nvkm-y += nvkm/engine/disp/rootg84.o
39nvkm-y += nvkm/engine/disp/rootg94.o
40nvkm-y += nvkm/engine/disp/rootgt200.o
41nvkm-y += nvkm/engine/disp/rootgt215.o
42nvkm-y += nvkm/engine/disp/rootgf119.o
43nvkm-y += nvkm/engine/disp/rootgk104.o
44nvkm-y += nvkm/engine/disp/rootgk110.o
45nvkm-y += nvkm/engine/disp/rootgm107.o
46nvkm-y += nvkm/engine/disp/rootgm204.o
47
48nvkm-y += nvkm/engine/disp/channv50.o
49nvkm-y += nvkm/engine/disp/changf119.o
50
51nvkm-y += nvkm/engine/disp/dmacnv50.o
52nvkm-y += nvkm/engine/disp/dmacgf119.o
53
54nvkm-y += nvkm/engine/disp/basenv50.o
55nvkm-y += nvkm/engine/disp/baseg84.o
56nvkm-y += nvkm/engine/disp/basegt200.o
57nvkm-y += nvkm/engine/disp/basegt215.o
58nvkm-y += nvkm/engine/disp/basegf119.o
59nvkm-y += nvkm/engine/disp/basegk104.o
60nvkm-y += nvkm/engine/disp/basegk110.o
61
62nvkm-y += nvkm/engine/disp/corenv50.o
63nvkm-y += nvkm/engine/disp/coreg84.o
64nvkm-y += nvkm/engine/disp/coreg94.o
65nvkm-y += nvkm/engine/disp/coregt200.o
66nvkm-y += nvkm/engine/disp/coregt215.o
67nvkm-y += nvkm/engine/disp/coregf119.o
68nvkm-y += nvkm/engine/disp/coregk104.o
69nvkm-y += nvkm/engine/disp/coregk110.o
70nvkm-y += nvkm/engine/disp/coregm107.o
71nvkm-y += nvkm/engine/disp/coregm204.o
72
73nvkm-y += nvkm/engine/disp/ovlynv50.o
74nvkm-y += nvkm/engine/disp/ovlyg84.o
75nvkm-y += nvkm/engine/disp/ovlygt200.o
76nvkm-y += nvkm/engine/disp/ovlygt215.o
77nvkm-y += nvkm/engine/disp/ovlygf119.o
78nvkm-y += nvkm/engine/disp/ovlygk104.o
79
80nvkm-y += nvkm/engine/disp/piocnv50.o
81nvkm-y += nvkm/engine/disp/piocgf119.o
82
83nvkm-y += nvkm/engine/disp/cursnv50.o
84nvkm-y += nvkm/engine/disp/cursg84.o
85nvkm-y += nvkm/engine/disp/cursgt215.o
86nvkm-y += nvkm/engine/disp/cursgf119.o
87nvkm-y += nvkm/engine/disp/cursgk104.o
88
89nvkm-y += nvkm/engine/disp/oimmnv50.o
90nvkm-y += nvkm/engine/disp/oimmg84.o
91nvkm-y += nvkm/engine/disp/oimmgt215.o
92nvkm-y += nvkm/engine/disp/oimmgf119.o
93nvkm-y += nvkm/engine/disp/oimmgk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 23d1b5c0dc16..44b67719f64d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -25,7 +25,9 @@
25#include "conn.h" 25#include "conn.h"
26#include "outp.h" 26#include "outp.h"
27 27
28#include <core/client.h>
28#include <core/notify.h> 29#include <core/notify.h>
30#include <core/oproxy.h>
29#include <subdev/bios.h> 31#include <subdev/bios.h>
30#include <subdev/bios/dcb.h> 32#include <subdev/bios/dcb.h>
31 33
@@ -33,7 +35,21 @@
33#include <nvif/event.h> 35#include <nvif/event.h>
34#include <nvif/unpack.h> 36#include <nvif/unpack.h>
35 37
36int 38static void
39nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head)
40{
41 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
42 disp->func->head.vblank_fini(disp, head);
43}
44
45static void
46nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head)
47{
48 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
49 disp->func->head.vblank_init(disp, head);
50}
51
52static int
37nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size, 53nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
38 struct nvkm_notify *notify) 54 struct nvkm_notify *notify)
39{ 55{
@@ -56,6 +72,13 @@ nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
56 return ret; 72 return ret;
57} 73}
58 74
75static const struct nvkm_event_func
76nvkm_disp_vblank_func = {
77 .ctor = nvkm_disp_vblank_ctor,
78 .init = nvkm_disp_vblank_init,
79 .fini = nvkm_disp_vblank_fini,
80};
81
59void 82void
60nvkm_disp_vblank(struct nvkm_disp *disp, int head) 83nvkm_disp_vblank(struct nvkm_disp *disp, int head)
61{ 84{
@@ -100,7 +123,7 @@ nvkm_disp_hpd_func = {
100int 123int
101nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event) 124nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
102{ 125{
103 struct nvkm_disp *disp = (void *)object->engine; 126 struct nvkm_disp *disp = nvkm_disp(object->engine);
104 switch (type) { 127 switch (type) {
105 case NV04_DISP_NTFY_VBLANK: 128 case NV04_DISP_NTFY_VBLANK:
106 *event = &disp->vblank; 129 *event = &disp->vblank;
@@ -114,127 +137,303 @@ nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
114 return -EINVAL; 137 return -EINVAL;
115} 138}
116 139
117int 140static void
118_nvkm_disp_fini(struct nvkm_object *object, bool suspend) 141nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
119{ 142{
120 struct nvkm_disp *disp = (void *)object; 143 struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
121 struct nvkm_output *outp; 144 mutex_lock(&disp->engine.subdev.mutex);
145 if (disp->client == oproxy)
146 disp->client = NULL;
147 mutex_unlock(&disp->engine.subdev.mutex);
148}
149
150static const struct nvkm_oproxy_func
151nvkm_disp_class = {
152 .dtor[1] = nvkm_disp_class_del,
153};
154
155static int
156nvkm_disp_class_new(struct nvkm_device *device,
157 const struct nvkm_oclass *oclass, void *data, u32 size,
158 struct nvkm_object **pobject)
159{
160 const struct nvkm_disp_oclass *sclass = oclass->engn;
161 struct nvkm_disp *disp = nvkm_disp(oclass->engine);
162 struct nvkm_oproxy *oproxy;
122 int ret; 163 int ret;
123 164
124 list_for_each_entry(outp, &disp->outp, head) { 165 ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
125 ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend); 166 if (ret)
126 if (ret && suspend) 167 return ret;
127 goto fail_outp; 168 *pobject = &oproxy->base;
169
170 mutex_lock(&disp->engine.subdev.mutex);
171 if (disp->client) {
172 mutex_unlock(&disp->engine.subdev.mutex);
173 return -EBUSY;
128 } 174 }
175 disp->client = oproxy;
176 mutex_unlock(&disp->engine.subdev.mutex);
129 177
130 return nvkm_engine_fini(&disp->base, suspend); 178 return sclass->ctor(disp, oclass, data, size, &oproxy->object);
179}
131 180
132fail_outp: 181static const struct nvkm_device_oclass
133 list_for_each_entry_continue_reverse(outp, &disp->outp, head) { 182nvkm_disp_sclass = {
134 nv_ofuncs(outp)->init(nv_object(outp)); 183 .ctor = nvkm_disp_class_new,
184};
185
186static int
187nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
188 const struct nvkm_device_oclass **class)
189{
190 struct nvkm_disp *disp = nvkm_disp(oclass->engine);
191 if (index == 0) {
192 const struct nvkm_disp_oclass *root = disp->func->root(disp);
193 oclass->base = root->base;
194 oclass->engn = root;
195 *class = &nvkm_disp_sclass;
196 return 0;
135 } 197 }
198 return 1;
199}
136 200
137 return ret; 201static void
202nvkm_disp_intr(struct nvkm_engine *engine)
203{
204 struct nvkm_disp *disp = nvkm_disp(engine);
205 disp->func->intr(disp);
138} 206}
139 207
140int 208static int
141_nvkm_disp_init(struct nvkm_object *object) 209nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
142{ 210{
143 struct nvkm_disp *disp = (void *)object; 211 struct nvkm_disp *disp = nvkm_disp(engine);
212 struct nvkm_connector *conn;
144 struct nvkm_output *outp; 213 struct nvkm_output *outp;
145 int ret;
146
147 ret = nvkm_engine_init(&disp->base);
148 if (ret)
149 return ret;
150 214
151 list_for_each_entry(outp, &disp->outp, head) { 215 list_for_each_entry(outp, &disp->outp, head) {
152 ret = nv_ofuncs(outp)->init(nv_object(outp)); 216 nvkm_output_fini(outp);
153 if (ret)
154 goto fail_outp;
155 } 217 }
156 218
157 return ret; 219 list_for_each_entry(conn, &disp->conn, head) {
220 nvkm_connector_fini(conn);
221 }
222
223 return 0;
224}
225
226static int
227nvkm_disp_init(struct nvkm_engine *engine)
228{
229 struct nvkm_disp *disp = nvkm_disp(engine);
230 struct nvkm_connector *conn;
231 struct nvkm_output *outp;
158 232
159fail_outp: 233 list_for_each_entry(conn, &disp->conn, head) {
160 list_for_each_entry_continue_reverse(outp, &disp->outp, head) { 234 nvkm_connector_init(conn);
161 nv_ofuncs(outp)->fini(nv_object(outp), false);
162 } 235 }
163 236
164 return ret; 237 list_for_each_entry(outp, &disp->outp, head) {
238 nvkm_output_init(outp);
239 }
240
241 return 0;
165} 242}
166 243
167void 244static void *
168_nvkm_disp_dtor(struct nvkm_object *object) 245nvkm_disp_dtor(struct nvkm_engine *engine)
169{ 246{
170 struct nvkm_disp *disp = (void *)object; 247 struct nvkm_disp *disp = nvkm_disp(engine);
171 struct nvkm_output *outp, *outt; 248 struct nvkm_connector *conn;
249 struct nvkm_output *outp;
250 void *data = disp;
251
252 if (disp->func->dtor)
253 data = disp->func->dtor(disp);
172 254
173 nvkm_event_fini(&disp->vblank); 255 nvkm_event_fini(&disp->vblank);
174 nvkm_event_fini(&disp->hpd); 256 nvkm_event_fini(&disp->hpd);
175 257
176 if (disp->outp.next) { 258 while (!list_empty(&disp->outp)) {
177 list_for_each_entry_safe(outp, outt, &disp->outp, head) { 259 outp = list_first_entry(&disp->outp, typeof(*outp), head);
178 nvkm_object_ref(NULL, (struct nvkm_object **)&outp); 260 list_del(&outp->head);
179 } 261 nvkm_output_del(&outp);
180 } 262 }
181 263
182 nvkm_engine_destroy(&disp->base); 264 while (!list_empty(&disp->conn)) {
265 conn = list_first_entry(&disp->conn, typeof(*conn), head);
266 list_del(&conn->head);
267 nvkm_connector_del(&conn);
268 }
269
270 return data;
183} 271}
184 272
273static const struct nvkm_engine_func
274nvkm_disp = {
275 .dtor = nvkm_disp_dtor,
276 .init = nvkm_disp_init,
277 .fini = nvkm_disp_fini,
278 .intr = nvkm_disp_intr,
279 .base.sclass = nvkm_disp_class_get,
280};
281
185int 282int
186nvkm_disp_create_(struct nvkm_object *parent, struct nvkm_object *engine, 283nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
187 struct nvkm_oclass *oclass, int heads, const char *intname, 284 int index, int heads, struct nvkm_disp *disp)
188 const char *extname, int length, void **pobject)
189{ 285{
190 struct nvkm_disp_impl *impl = (void *)oclass; 286 struct nvkm_bios *bios = device->bios;
191 struct nvkm_bios *bios = nvkm_bios(parent); 287 struct nvkm_output *outp, *outt, *pair;
192 struct nvkm_disp *disp; 288 struct nvkm_connector *conn;
193 struct nvkm_oclass **sclass; 289 struct nvbios_connE connE;
194 struct nvkm_object *object;
195 struct dcb_output dcbE; 290 struct dcb_output dcbE;
196 u8 hpd = 0, ver, hdr; 291 u8 hpd = 0, ver, hdr;
197 u32 data; 292 u32 data;
198 int ret, i; 293 int ret, i;
199 294
200 ret = nvkm_engine_create_(parent, engine, oclass, true, intname, 295 INIT_LIST_HEAD(&disp->outp);
201 extname, length, pobject); 296 INIT_LIST_HEAD(&disp->conn);
202 disp = *pobject; 297 disp->func = func;
298 disp->head.nr = heads;
299
300 ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
301 true, &disp->engine);
203 if (ret) 302 if (ret)
204 return ret; 303 return ret;
205 304
206 INIT_LIST_HEAD(&disp->outp);
207
208 /* create output objects for each display path in the vbios */ 305 /* create output objects for each display path in the vbios */
209 i = -1; 306 i = -1;
210 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { 307 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
308 const struct nvkm_disp_func_outp *outps;
309 int (*ctor)(struct nvkm_disp *, int, struct dcb_output *,
310 struct nvkm_output **);
311
211 if (dcbE.type == DCB_OUTPUT_UNUSED) 312 if (dcbE.type == DCB_OUTPUT_UNUSED)
212 continue; 313 continue;
213 if (dcbE.type == DCB_OUTPUT_EOL) 314 if (dcbE.type == DCB_OUTPUT_EOL)
214 break; 315 break;
215 data = dcbE.location << 4 | dcbE.type; 316 outp = NULL;
317
318 switch (dcbE.location) {
319 case 0: outps = &disp->func->outp.internal; break;
320 case 1: outps = &disp->func->outp.external; break;
321 default:
322 nvkm_warn(&disp->engine.subdev,
323 "dcb %d locn %d unknown\n", i, dcbE.location);
324 continue;
325 }
216 326
217 oclass = nvkm_output_oclass; 327 switch (dcbE.type) {
218 sclass = impl->outp; 328 case DCB_OUTPUT_ANALOG: ctor = outps->crt ; break;
219 while (sclass && sclass[0]) { 329 case DCB_OUTPUT_TV : ctor = outps->tv ; break;
220 if (sclass[0]->handle == data) { 330 case DCB_OUTPUT_TMDS : ctor = outps->tmds; break;
221 oclass = sclass[0]; 331 case DCB_OUTPUT_LVDS : ctor = outps->lvds; break;
222 break; 332 case DCB_OUTPUT_DP : ctor = outps->dp ; break;
333 default:
334 nvkm_warn(&disp->engine.subdev,
335 "dcb %d type %d unknown\n", i, dcbE.type);
336 continue;
337 }
338
339 if (ctor)
340 ret = ctor(disp, i, &dcbE, &outp);
341 else
342 ret = -ENODEV;
343
344 if (ret) {
345 if (ret == -ENODEV) {
346 nvkm_debug(&disp->engine.subdev,
347 "dcb %d %d/%d not supported\n",
348 i, dcbE.location, dcbE.type);
349 continue;
223 } 350 }
224 sclass++; 351 nvkm_error(&disp->engine.subdev,
352 "failed to create output %d\n", i);
353 nvkm_output_del(&outp);
354 continue;
225 } 355 }
226 356
227 nvkm_object_ctor(*pobject, NULL, oclass, &dcbE, i, &object); 357 list_add_tail(&outp->head, &disp->outp);
228 hpd = max(hpd, (u8)(dcbE.connector + 1)); 358 hpd = max(hpd, (u8)(dcbE.connector + 1));
229 } 359 }
230 360
361 /* create connector objects based on the outputs we support */
362 list_for_each_entry_safe(outp, outt, &disp->outp, head) {
363 /* bios data *should* give us the most useful information */
364 data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
365 &connE);
366
367 /* no bios connector data... */
368 if (!data) {
369 /* heuristic: anything with the same ccb index is
370 * considered to be on the same connector, any
371 * output path without an associated ccb entry will
372 * be put on its own connector
373 */
374 int ccb_index = outp->info.i2c_index;
375 if (ccb_index != 0xf) {
376 list_for_each_entry(pair, &disp->outp, head) {
377 if (pair->info.i2c_index == ccb_index) {
378 outp->conn = pair->conn;
379 break;
380 }
381 }
382 }
383
384 /* connector shared with another output path */
385 if (outp->conn)
386 continue;
387
388 memset(&connE, 0x00, sizeof(connE));
389 connE.type = DCB_CONNECTOR_NONE;
390 i = -1;
391 } else {
392 i = outp->info.connector;
393 }
394
395 /* check that we haven't already created this connector */
396 list_for_each_entry(conn, &disp->conn, head) {
397 if (conn->index == outp->info.connector) {
398 outp->conn = conn;
399 break;
400 }
401 }
402
403 if (outp->conn)
404 continue;
405
406 /* apparently we need to create a new one! */
407 ret = nvkm_connector_new(disp, i, &connE, &outp->conn);
408 if (ret) {
409 nvkm_error(&disp->engine.subdev,
410 "failed to create output %d conn: %d\n",
411 outp->index, ret);
412 nvkm_connector_del(&outp->conn);
413 list_del(&outp->head);
414 nvkm_output_del(&outp);
415 continue;
416 }
417
418 list_add_tail(&outp->conn->head, &disp->conn);
419 }
420
231 ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd); 421 ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
232 if (ret) 422 if (ret)
233 return ret; 423 return ret;
234 424
235 ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank); 425 ret = nvkm_event_init(&nvkm_disp_vblank_func, 1, heads, &disp->vblank);
236 if (ret) 426 if (ret)
237 return ret; 427 return ret;
238 428
239 return 0; 429 return 0;
240} 430}
431
432int
433nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
434 int index, int heads, struct nvkm_disp **pdisp)
435{
436 if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
437 return -ENOMEM;
438 return nvkm_disp_ctor(func, device, index, heads, *pdisp);
439}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
new file mode 100644
index 000000000000..6d17630a3dee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
@@ -0,0 +1,80 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_mthd_list
30g84_disp_base_mthd_base = {
31 .mthd = 0x0000,
32 .addr = 0x000000,
33 .data = {
34 { 0x0080, 0x000000 },
35 { 0x0084, 0x0008c4 },
36 { 0x0088, 0x0008d0 },
37 { 0x008c, 0x0008dc },
38 { 0x0090, 0x0008e4 },
39 { 0x0094, 0x610884 },
40 { 0x00a0, 0x6108a0 },
41 { 0x00a4, 0x610878 },
42 { 0x00c0, 0x61086c },
43 { 0x00c4, 0x610800 },
44 { 0x00c8, 0x61080c },
45 { 0x00cc, 0x610818 },
46 { 0x00e0, 0x610858 },
47 { 0x00e4, 0x610860 },
48 { 0x00e8, 0x6108ac },
49 { 0x00ec, 0x6108b4 },
50 { 0x00fc, 0x610824 },
51 { 0x0100, 0x610894 },
52 { 0x0104, 0x61082c },
53 { 0x0110, 0x6108bc },
54 { 0x0114, 0x61088c },
55 {}
56 }
57};
58
59const struct nv50_disp_chan_mthd
60g84_disp_base_chan_mthd = {
61 .name = "Base",
62 .addr = 0x000540,
63 .prev = 0x000004,
64 .data = {
65 { "Global", 1, &g84_disp_base_mthd_base },
66 { "Image", 2, &nv50_disp_base_mthd_image },
67 {}
68 }
69};
70
71const struct nv50_disp_dmac_oclass
72g84_disp_base_oclass = {
73 .base.oclass = G82_DISP_BASE_CHANNEL_DMA,
74 .base.minver = 0,
75 .base.maxver = 0,
76 .ctor = nv50_disp_base_new,
77 .func = &nv50_disp_dmac_func,
78 .mthd = &g84_disp_base_chan_mthd,
79 .chid = 1,
80};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
new file mode 100644
index 000000000000..ebcb925e9d90
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_mthd_list
30gf119_disp_base_mthd_base = {
31 .mthd = 0x0000,
32 .addr = 0x000000,
33 .data = {
34 { 0x0080, 0x661080 },
35 { 0x0084, 0x661084 },
36 { 0x0088, 0x661088 },
37 { 0x008c, 0x66108c },
38 { 0x0090, 0x661090 },
39 { 0x0094, 0x661094 },
40 { 0x00a0, 0x6610a0 },
41 { 0x00a4, 0x6610a4 },
42 { 0x00c0, 0x6610c0 },
43 { 0x00c4, 0x6610c4 },
44 { 0x00c8, 0x6610c8 },
45 { 0x00cc, 0x6610cc },
46 { 0x00e0, 0x6610e0 },
47 { 0x00e4, 0x6610e4 },
48 { 0x00e8, 0x6610e8 },
49 { 0x00ec, 0x6610ec },
50 { 0x00fc, 0x6610fc },
51 { 0x0100, 0x661100 },
52 { 0x0104, 0x661104 },
53 { 0x0108, 0x661108 },
54 { 0x010c, 0x66110c },
55 { 0x0110, 0x661110 },
56 { 0x0114, 0x661114 },
57 { 0x0118, 0x661118 },
58 { 0x011c, 0x66111c },
59 { 0x0130, 0x661130 },
60 { 0x0134, 0x661134 },
61 { 0x0138, 0x661138 },
62 { 0x013c, 0x66113c },
63 { 0x0140, 0x661140 },
64 { 0x0144, 0x661144 },
65 { 0x0148, 0x661148 },
66 { 0x014c, 0x66114c },
67 { 0x0150, 0x661150 },
68 { 0x0154, 0x661154 },
69 { 0x0158, 0x661158 },
70 { 0x015c, 0x66115c },
71 { 0x0160, 0x661160 },
72 { 0x0164, 0x661164 },
73 { 0x0168, 0x661168 },
74 { 0x016c, 0x66116c },
75 {}
76 }
77};
78
79static const struct nv50_disp_mthd_list
80gf119_disp_base_mthd_image = {
81 .mthd = 0x0020,
82 .addr = 0x000020,
83 .data = {
84 { 0x0400, 0x661400 },
85 { 0x0404, 0x661404 },
86 { 0x0408, 0x661408 },
87 { 0x040c, 0x66140c },
88 { 0x0410, 0x661410 },
89 {}
90 }
91};
92
93const struct nv50_disp_chan_mthd
94gf119_disp_base_chan_mthd = {
95 .name = "Base",
96 .addr = 0x001000,
97 .prev = -0x020000,
98 .data = {
99 { "Global", 1, &gf119_disp_base_mthd_base },
100 { "Image", 2, &gf119_disp_base_mthd_image },
101 {}
102 }
103};
104
105const struct nv50_disp_dmac_oclass
106gf119_disp_base_oclass = {
107 .base.oclass = GF110_DISP_BASE_CHANNEL_DMA,
108 .base.minver = 0,
109 .base.maxver = 0,
110 .ctor = nv50_disp_base_new,
111 .func = &gf119_disp_dmac_func,
112 .mthd = &gf119_disp_base_chan_mthd,
113 .chid = 1,
114};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
new file mode 100644
index 000000000000..780a1d973634
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gk104_disp_base_oclass = {
31 .base.oclass = GK104_DISP_BASE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_base_new,
35 .func = &gf119_disp_dmac_func,
36 .mthd = &gf119_disp_base_chan_mthd,
37 .chid = 1,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
new file mode 100644
index 000000000000..d8bdd246c8ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gk110_disp_base_oclass = {
31 .base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_base_new,
35 .func = &gf119_disp_dmac_func,
36 .mthd = &gf119_disp_base_chan_mthd,
37 .chid = 1,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
new file mode 100644
index 000000000000..93451e46570c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gt200_disp_base_oclass = {
31 .base.oclass = GT200_DISP_BASE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_base_new,
35 .func = &nv50_disp_dmac_func,
36 .mthd = &g84_disp_base_chan_mthd,
37 .chid = 1,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
new file mode 100644
index 000000000000..08e2b1fa3806
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gt215_disp_base_oclass = {
31 .base.oclass = GT214_DISP_BASE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_base_new,
35 .func = &nv50_disp_dmac_func,
36 .mthd = &g84_disp_base_chan_mthd,
37 .chid = 1,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
new file mode 100644
index 000000000000..1fd89edefc26
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32int
33nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
34 const struct nv50_disp_chan_mthd *mthd,
35 struct nv50_disp_root *root, int chid,
36 const struct nvkm_oclass *oclass, void *data, u32 size,
37 struct nvkm_object **pobject)
38{
39 union {
40 struct nv50_disp_base_channel_dma_v0 v0;
41 } *args = data;
42 struct nvkm_object *parent = oclass->parent;
43 struct nv50_disp *disp = root->disp;
44 int head, ret;
45 u64 push;
46
47 nvif_ioctl(parent, "create disp base channel dma size %d\n", size);
48 if (nvif_unpack(args->v0, 0, 0, false)) {
49 nvif_ioctl(parent, "create disp base channel dma vers %d "
50 "pushbuf %016llx head %d\n",
51 args->v0.version, args->v0.pushbuf, args->v0.head);
52 if (args->v0.head > disp->base.head.nr)
53 return -EINVAL;
54 push = args->v0.pushbuf;
55 head = args->v0.head;
56 } else
57 return ret;
58
59 return nv50_disp_dmac_new_(func, mthd, root, chid + head,
60 head, push, oclass, pobject);
61}
62
63static const struct nv50_disp_mthd_list
64nv50_disp_base_mthd_base = {
65 .mthd = 0x0000,
66 .addr = 0x000000,
67 .data = {
68 { 0x0080, 0x000000 },
69 { 0x0084, 0x0008c4 },
70 { 0x0088, 0x0008d0 },
71 { 0x008c, 0x0008dc },
72 { 0x0090, 0x0008e4 },
73 { 0x0094, 0x610884 },
74 { 0x00a0, 0x6108a0 },
75 { 0x00a4, 0x610878 },
76 { 0x00c0, 0x61086c },
77 { 0x00e0, 0x610858 },
78 { 0x00e4, 0x610860 },
79 { 0x00e8, 0x6108ac },
80 { 0x00ec, 0x6108b4 },
81 { 0x0100, 0x610894 },
82 { 0x0110, 0x6108bc },
83 { 0x0114, 0x61088c },
84 {}
85 }
86};
87
88const struct nv50_disp_mthd_list
89nv50_disp_base_mthd_image = {
90 .mthd = 0x0400,
91 .addr = 0x000000,
92 .data = {
93 { 0x0800, 0x6108f0 },
94 { 0x0804, 0x6108fc },
95 { 0x0808, 0x61090c },
96 { 0x080c, 0x610914 },
97 { 0x0810, 0x610904 },
98 {}
99 }
100};
101
102static const struct nv50_disp_chan_mthd
103nv50_disp_base_chan_mthd = {
104 .name = "Base",
105 .addr = 0x000540,
106 .prev = 0x000004,
107 .data = {
108 { "Global", 1, &nv50_disp_base_mthd_base },
109 { "Image", 2, &nv50_disp_base_mthd_image },
110 {}
111 }
112};
113
114const struct nv50_disp_dmac_oclass
115nv50_disp_base_oclass = {
116 .base.oclass = NV50_DISP_BASE_CHANNEL_DMA,
117 .base.minver = 0,
118 .base.maxver = 0,
119 .ctor = nv50_disp_base_new,
120 .func = &nv50_disp_dmac_func,
121 .mthd = &nv50_disp_base_chan_mthd,
122 .chid = 1,
123};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
new file mode 100644
index 000000000000..17a3d835cb42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25
26static void
27gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
28{
29 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
30 struct nvkm_device *device = disp->base.engine.subdev.device;
31 nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000000 << index);
32 nvkm_wr32(device, 0x61008c, 0x00000001 << index);
33}
34
35static void
36gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
37{
38 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
39 struct nvkm_device *device = disp->base.engine.subdev.device;
40 nvkm_wr32(device, 0x61008c, 0x00000001 << index);
41 nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000001 << index);
42}
43
44const struct nvkm_event_func
45gf119_disp_chan_uevent = {
46 .ctor = nv50_disp_chan_uevent_ctor,
47 .init = gf119_disp_chan_uevent_init,
48 .fini = gf119_disp_chan_uevent_fini,
49};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
new file mode 100644
index 000000000000..01803c0679b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -0,0 +1,301 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28#include <core/ramht.h>
29#include <engine/dma.h>
30
31#include <nvif/class.h>
32#include <nvif/event.h>
33#include <nvif/unpack.h>
34
35static void
36nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c,
37 const struct nv50_disp_mthd_list *list, int inst)
38{
39 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
40 struct nvkm_device *device = subdev->device;
41 int i;
42
43 for (i = 0; list->data[i].mthd; i++) {
44 if (list->data[i].addr) {
45 u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
46 u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
47 u32 mthd = list->data[i].mthd + (list->mthd * inst);
48 const char *name = list->data[i].name;
49 char mods[16];
50
51 if (prev != next)
52 snprintf(mods, sizeof(mods), "-> %08x", next);
53 else
54 snprintf(mods, sizeof(mods), "%13c", ' ');
55
56 nvkm_printk_(subdev, debug, info,
57 "\t%04x: %08x %s%s%s\n",
58 mthd, prev, mods, name ? " // " : "",
59 name ? name : "");
60 }
61 }
62}
63
64void
65nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
66{
67 struct nv50_disp *disp = chan->root->disp;
68 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
69 const struct nv50_disp_chan_mthd *mthd = chan->mthd;
70 const struct nv50_disp_mthd_list *list;
71 int i, j;
72
73 if (debug > subdev->debug)
74 return;
75
76 for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
77 u32 base = chan->head * mthd->addr;
78 for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
79 const char *cname = mthd->name;
80 const char *sname = "";
81 char cname_[16], sname_[16];
82
83 if (mthd->addr) {
84 snprintf(cname_, sizeof(cname_), "%s %d",
85 mthd->name, chan->chid);
86 cname = cname_;
87 }
88
89 if (mthd->data[i].nr > 1) {
90 snprintf(sname_, sizeof(sname_), " - %s %d",
91 mthd->data[i].name, j);
92 sname = sname_;
93 }
94
95 nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
96 nv50_disp_mthd_list(disp, debug, base, mthd->prev,
97 list, j);
98 }
99 }
100}
101
102static void
103nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
104{
105 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
106 struct nvkm_device *device = disp->base.engine.subdev.device;
107 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
108 nvkm_wr32(device, 0x610020, 0x00000001 << index);
109}
110
111static void
112nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
113{
114 struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
115 struct nvkm_device *device = disp->base.engine.subdev.device;
116 nvkm_wr32(device, 0x610020, 0x00000001 << index);
117 nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
118}
119
120void
121nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid)
122{
123 struct nvif_notify_uevent_rep {
124 } rep;
125
126 nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep));
127}
128
129int
130nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
131 struct nvkm_notify *notify)
132{
133 struct nv50_disp_chan *chan = nv50_disp_chan(object);
134 union {
135 struct nvif_notify_uevent_req none;
136 } *args = data;
137 int ret;
138
139 if (nvif_unvers(args->none)) {
140 notify->size = sizeof(struct nvif_notify_uevent_rep);
141 notify->types = 1;
142 notify->index = chan->chid;
143 return 0;
144 }
145
146 return ret;
147}
148
149const struct nvkm_event_func
150nv50_disp_chan_uevent = {
151 .ctor = nv50_disp_chan_uevent_ctor,
152 .init = nv50_disp_chan_uevent_init,
153 .fini = nv50_disp_chan_uevent_fini,
154};
155
156int
157nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
158{
159 struct nv50_disp_chan *chan = nv50_disp_chan(object);
160 struct nv50_disp *disp = chan->root->disp;
161 struct nvkm_device *device = disp->base.engine.subdev.device;
162 *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
163 return 0;
164}
165
166int
167nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
168{
169 struct nv50_disp_chan *chan = nv50_disp_chan(object);
170 struct nv50_disp *disp = chan->root->disp;
171 struct nvkm_device *device = disp->base.engine.subdev.device;
172 nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
173 return 0;
174}
175
176int
177nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
178 struct nvkm_event **pevent)
179{
180 struct nv50_disp_chan *chan = nv50_disp_chan(object);
181 struct nv50_disp *disp = chan->root->disp;
182 switch (type) {
183 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
184 *pevent = &disp->uevent;
185 return 0;
186 default:
187 break;
188 }
189 return -EINVAL;
190}
191
192int
193nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
194{
195 struct nv50_disp_chan *chan = nv50_disp_chan(object);
196 struct nv50_disp *disp = chan->root->disp;
197 struct nvkm_device *device = disp->base.engine.subdev.device;
198 *addr = device->func->resource_addr(device, 0) +
199 0x640000 + (chan->chid * 0x1000);
200 *size = 0x001000;
201 return 0;
202}
203
204static int
205nv50_disp_chan_child_new(const struct nvkm_oclass *oclass,
206 void *data, u32 size, struct nvkm_object **pobject)
207{
208 struct nv50_disp_chan *chan = nv50_disp_chan(oclass->parent);
209 return chan->func->child_new(chan, oclass, data, size, pobject);
210}
211
212static int
213nv50_disp_chan_child_get(struct nvkm_object *object, int index,
214 struct nvkm_oclass *oclass)
215{
216 struct nv50_disp_chan *chan = nv50_disp_chan(object);
217 if (chan->func->child_get) {
218 int ret = chan->func->child_get(chan, index, oclass);
219 if (ret == 0)
220 oclass->ctor = nv50_disp_chan_child_new;
221 return ret;
222 }
223 return -EINVAL;
224}
225
226static int
227nv50_disp_chan_fini(struct nvkm_object *object, bool suspend)
228{
229 struct nv50_disp_chan *chan = nv50_disp_chan(object);
230 chan->func->fini(chan);
231 return 0;
232}
233
234static int
235nv50_disp_chan_init(struct nvkm_object *object)
236{
237 struct nv50_disp_chan *chan = nv50_disp_chan(object);
238 return chan->func->init(chan);
239}
240
241static void *
242nv50_disp_chan_dtor(struct nvkm_object *object)
243{
244 struct nv50_disp_chan *chan = nv50_disp_chan(object);
245 struct nv50_disp *disp = chan->root->disp;
246 if (chan->chid >= 0)
247 disp->chan[chan->chid] = NULL;
248 return chan->func->dtor ? chan->func->dtor(chan) : chan;
249}
250
251static const struct nvkm_object_func
252nv50_disp_chan = {
253 .dtor = nv50_disp_chan_dtor,
254 .init = nv50_disp_chan_init,
255 .fini = nv50_disp_chan_fini,
256 .rd32 = nv50_disp_chan_rd32,
257 .wr32 = nv50_disp_chan_wr32,
258 .ntfy = nv50_disp_chan_ntfy,
259 .map = nv50_disp_chan_map,
260 .sclass = nv50_disp_chan_child_get,
261};
262
263int
264nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
265 const struct nv50_disp_chan_mthd *mthd,
266 struct nv50_disp_root *root, int chid, int head,
267 const struct nvkm_oclass *oclass,
268 struct nv50_disp_chan *chan)
269{
270 struct nv50_disp *disp = root->disp;
271
272 nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object);
273 chan->func = func;
274 chan->mthd = mthd;
275 chan->root = root;
276 chan->chid = chid;
277 chan->head = head;
278
279 if (disp->chan[chan->chid]) {
280 chan->chid = -1;
281 return -EBUSY;
282 }
283 disp->chan[chan->chid] = chan;
284 return 0;
285}
286
287int
288nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
289 const struct nv50_disp_chan_mthd *mthd,
290 struct nv50_disp_root *root, int chid, int head,
291 const struct nvkm_oclass *oclass,
292 struct nvkm_object **pobject)
293{
294 struct nv50_disp_chan *chan;
295
296 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
297 return -ENOMEM;
298 *pobject = &chan->object;
299
300 return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
301}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
new file mode 100644
index 000000000000..aee374884c96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -0,0 +1,127 @@
1#ifndef __NV50_DISP_CHAN_H__
2#define __NV50_DISP_CHAN_H__
3#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
4#include "nv50.h"
5
6struct nv50_disp_chan {
7 const struct nv50_disp_chan_func *func;
8 const struct nv50_disp_chan_mthd *mthd;
9 struct nv50_disp_root *root;
10 int chid;
11 int head;
12
13 struct nvkm_object object;
14};
15
16struct nv50_disp_chan_func {
17 void *(*dtor)(struct nv50_disp_chan *);
18 int (*init)(struct nv50_disp_chan *);
19 void (*fini)(struct nv50_disp_chan *);
20 int (*child_get)(struct nv50_disp_chan *, int index,
21 struct nvkm_oclass *);
22 int (*child_new)(struct nv50_disp_chan *, const struct nvkm_oclass *,
23 void *data, u32 size, struct nvkm_object **);
24};
25
26int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
27 const struct nv50_disp_chan_mthd *,
28 struct nv50_disp_root *, int chid, int head,
29 const struct nvkm_oclass *, struct nv50_disp_chan *);
30int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
31 const struct nv50_disp_chan_mthd *,
32 struct nv50_disp_root *, int chid, int head,
33 const struct nvkm_oclass *, struct nvkm_object **);
34
35extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
36extern const struct nv50_disp_chan_func gf119_disp_pioc_func;
37
38extern const struct nvkm_event_func nv50_disp_chan_uevent;
39int nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
40 struct nvkm_notify *);
41void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
42
43extern const struct nvkm_event_func gf119_disp_chan_uevent;
44
45struct nv50_disp_mthd_list {
46 u32 mthd;
47 u32 addr;
48 struct {
49 u32 mthd;
50 u32 addr;
51 const char *name;
52 } data[];
53};
54
55struct nv50_disp_chan_mthd {
56 const char *name;
57 u32 addr;
58 s32 prev;
59 struct {
60 const char *name;
61 int nr;
62 const struct nv50_disp_mthd_list *mthd;
63 } data[];
64};
65
66void nv50_disp_chan_mthd(struct nv50_disp_chan *, int debug);
67
68extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
69extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
70extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
71extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
72
73extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd;
74extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
75extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
76extern const struct nv50_disp_chan_mthd g84_disp_base_chan_mthd;
77extern const struct nv50_disp_chan_mthd g84_disp_ovly_chan_mthd;
78
79extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd;
80
81extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base;
82extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac;
83extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor;
84extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
85extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
86
87extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
88
89struct nv50_disp_pioc_oclass {
90 int (*ctor)(const struct nv50_disp_chan_func *,
91 const struct nv50_disp_chan_mthd *,
92 struct nv50_disp_root *, int chid,
93 const struct nvkm_oclass *, void *data, u32 size,
94 struct nvkm_object **);
95 struct nvkm_sclass base;
96 const struct nv50_disp_chan_func *func;
97 const struct nv50_disp_chan_mthd *mthd;
98 int chid;
99};
100
101extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
102extern const struct nv50_disp_pioc_oclass nv50_disp_curs_oclass;
103
104extern const struct nv50_disp_pioc_oclass g84_disp_oimm_oclass;
105extern const struct nv50_disp_pioc_oclass g84_disp_curs_oclass;
106
107extern const struct nv50_disp_pioc_oclass gt215_disp_oimm_oclass;
108extern const struct nv50_disp_pioc_oclass gt215_disp_curs_oclass;
109
110extern const struct nv50_disp_pioc_oclass gf119_disp_oimm_oclass;
111extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
112
113extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
114extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
115
116
117int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
118 const struct nv50_disp_chan_mthd *,
119 struct nv50_disp_root *, int chid,
120 const struct nvkm_oclass *, void *data, u32 size,
121 struct nvkm_object **);
122int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
123 const struct nv50_disp_chan_mthd *,
124 struct nv50_disp_root *, int chid,
125 const struct nvkm_oclass *, void *data, u32 size,
126 struct nvkm_object **);
127#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
index cf03e0240ced..c6910d644a3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
@@ -33,15 +33,15 @@ static int
33nvkm_connector_hpd(struct nvkm_notify *notify) 33nvkm_connector_hpd(struct nvkm_notify *notify)
34{ 34{
35 struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd); 35 struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
36 struct nvkm_disp *disp = nvkm_disp(conn); 36 struct nvkm_disp *disp = conn->disp;
37 struct nvkm_gpio *gpio = nvkm_gpio(conn); 37 struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
38 const struct nvkm_gpio_ntfy_rep *line = notify->data; 38 const struct nvkm_gpio_ntfy_rep *line = notify->data;
39 struct nvif_notify_conn_rep_v0 rep; 39 struct nvif_notify_conn_rep_v0 rep;
40 int index = conn->index; 40 int index = conn->index;
41 41
42 DBG("HPD: %d\n", line->mask); 42 CONN_DBG(conn, "HPD: %d", line->mask);
43 43
44 if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index)) 44 if (!nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
45 rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG; 45 rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG;
46 else 46 else
47 rep.mask = NVIF_NOTIFY_CONN_V0_PLUG; 47 rep.mask = NVIF_NOTIFY_CONN_V0_PLUG;
@@ -51,78 +51,58 @@ nvkm_connector_hpd(struct nvkm_notify *notify)
51 return NVKM_NOTIFY_KEEP; 51 return NVKM_NOTIFY_KEEP;
52} 52}
53 53
54int 54void
55_nvkm_connector_fini(struct nvkm_object *object, bool suspend) 55nvkm_connector_fini(struct nvkm_connector *conn)
56{ 56{
57 struct nvkm_connector *conn = (void *)object;
58 nvkm_notify_put(&conn->hpd); 57 nvkm_notify_put(&conn->hpd);
59 return nvkm_object_fini(&conn->base, suspend);
60} 58}
61 59
62int 60void
63_nvkm_connector_init(struct nvkm_object *object) 61nvkm_connector_init(struct nvkm_connector *conn)
64{ 62{
65 struct nvkm_connector *conn = (void *)object; 63 nvkm_notify_get(&conn->hpd);
66 int ret = nvkm_object_init(&conn->base);
67 if (ret == 0)
68 nvkm_notify_get(&conn->hpd);
69 return ret;
70} 64}
71 65
72void 66void
73_nvkm_connector_dtor(struct nvkm_object *object) 67nvkm_connector_del(struct nvkm_connector **pconn)
74{ 68{
75 struct nvkm_connector *conn = (void *)object; 69 struct nvkm_connector *conn = *pconn;
76 nvkm_notify_fini(&conn->hpd); 70 if (conn) {
77 nvkm_object_destroy(&conn->base); 71 nvkm_notify_fini(&conn->hpd);
72 kfree(*pconn);
73 *pconn = NULL;
74 }
78} 75}
79 76
80int 77static void
81nvkm_connector_create_(struct nvkm_object *parent, 78nvkm_connector_ctor(struct nvkm_disp *disp, int index,
82 struct nvkm_object *engine, 79 struct nvbios_connE *info, struct nvkm_connector *conn)
83 struct nvkm_oclass *oclass,
84 struct nvbios_connE *info, int index,
85 int length, void **pobject)
86{ 80{
87 static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 }; 81 static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
88 struct nvkm_disp *disp = nvkm_disp(parent); 82 struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
89 struct nvkm_gpio *gpio = nvkm_gpio(parent);
90 struct nvkm_connector *conn;
91 struct nvkm_output *outp;
92 struct dcb_gpio_func func; 83 struct dcb_gpio_func func;
93 int ret; 84 int ret;
94 85
95 list_for_each_entry(outp, &disp->outp, head) { 86 conn->disp = disp;
96 if (outp->conn && outp->conn->index == index) {
97 atomic_inc(&nv_object(outp->conn)->refcount);
98 *pobject = outp->conn;
99 return 1;
100 }
101 }
102
103 ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
104 conn = *pobject;
105 if (ret)
106 return ret;
107
108 conn->info = *info;
109 conn->index = index; 87 conn->index = index;
88 conn->info = *info;
110 89
111 DBG("type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x\n", 90 CONN_DBG(conn, "type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x",
112 info->type, info->location, info->hpd, info->dp, 91 info->type, info->location, info->hpd, info->dp,
113 info->di, info->sr, info->lcdid); 92 info->di, info->sr, info->lcdid);
114 93
115 if ((info->hpd = ffs(info->hpd))) { 94 if ((info->hpd = ffs(info->hpd))) {
116 if (--info->hpd >= ARRAY_SIZE(hpd)) { 95 if (--info->hpd >= ARRAY_SIZE(hpd)) {
117 ERR("hpd %02x unknown\n", info->hpd); 96 CONN_ERR(conn, "hpd %02x unknown", info->hpd);
118 return 0; 97 return;
119 } 98 }
120 info->hpd = hpd[info->hpd]; 99 info->hpd = hpd[info->hpd];
121 100
122 ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func); 101 ret = nvkm_gpio_find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
123 if (ret) { 102 if (ret) {
124 ERR("func %02x lookup failed, %d\n", info->hpd, ret); 103 CONN_ERR(conn, "func %02x lookup failed, %d",
125 return 0; 104 info->hpd, ret);
105 return;
126 } 106 }
127 107
128 ret = nvkm_notify_init(NULL, &gpio->event, nvkm_connector_hpd, 108 ret = nvkm_notify_init(NULL, &gpio->event, nvkm_connector_hpd,
@@ -134,41 +114,19 @@ nvkm_connector_create_(struct nvkm_object *parent,
134 sizeof(struct nvkm_gpio_ntfy_rep), 114 sizeof(struct nvkm_gpio_ntfy_rep),
135 &conn->hpd); 115 &conn->hpd);
136 if (ret) { 116 if (ret) {
137 ERR("func %02x failed, %d\n", info->hpd, ret); 117 CONN_ERR(conn, "func %02x failed, %d", info->hpd, ret);
138 } else { 118 } else {
139 DBG("func %02x (HPD)\n", info->hpd); 119 CONN_DBG(conn, "func %02x (HPD)", info->hpd);
140 } 120 }
141 } 121 }
142
143 return 0;
144} 122}
145 123
146int 124int
147_nvkm_connector_ctor(struct nvkm_object *parent, 125nvkm_connector_new(struct nvkm_disp *disp, int index,
148 struct nvkm_object *engine, 126 struct nvbios_connE *info, struct nvkm_connector **pconn)
149 struct nvkm_oclass *oclass, void *info, u32 index,
150 struct nvkm_object **pobject)
151{ 127{
152 struct nvkm_connector *conn; 128 if (!(*pconn = kzalloc(sizeof(**pconn), GFP_KERNEL)))
153 int ret; 129 return -ENOMEM;
154 130 nvkm_connector_ctor(disp, index, info, *pconn);
155 ret = nvkm_connector_create(parent, engine, oclass, info, index, &conn);
156 *pobject = nv_object(conn);
157 if (ret)
158 return ret;
159
160 return 0; 131 return 0;
161} 132}
162
163struct nvkm_oclass *
164nvkm_connector_oclass = &(struct nvkm_connector_impl) {
165 .base = {
166 .handle = 0,
167 .ofuncs = &(struct nvkm_ofuncs) {
168 .ctor = _nvkm_connector_ctor,
169 .dtor = _nvkm_connector_dtor,
170 .init = _nvkm_connector_init,
171 .fini = _nvkm_connector_fini,
172 },
173 },
174}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index c87a061f7f7d..ed32fe7f1864 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,58 +1,33 @@
1#ifndef __NVKM_DISP_CONN_H__ 1#ifndef __NVKM_DISP_CONN_H__
2#define __NVKM_DISP_CONN_H__ 2#define __NVKM_DISP_CONN_H__
3#include <core/object.h> 3#include <engine/disp.h>
4#include <core/notify.h>
5 4
5#include <core/notify.h>
6#include <subdev/bios.h> 6#include <subdev/bios.h>
7#include <subdev/bios/conn.h> 7#include <subdev/bios/conn.h>
8 8
9struct nvkm_connector { 9struct nvkm_connector {
10 struct nvkm_object base; 10 struct nvkm_disp *disp;
11 struct list_head head;
12
13 struct nvbios_connE info;
14 int index; 11 int index;
12 struct nvbios_connE info;
15 13
16 struct nvkm_notify hpd; 14 struct nvkm_notify hpd;
17};
18 15
19#define nvkm_connector_create(p,e,c,b,i,d) \ 16 struct list_head head;
20 nvkm_connector_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
21#define nvkm_connector_destroy(d) ({ \
22 struct nvkm_connector *disp = (d); \
23 _nvkm_connector_dtor(nv_object(disp)); \
24})
25#define nvkm_connector_init(d) ({ \
26 struct nvkm_connector *disp = (d); \
27 _nvkm_connector_init(nv_object(disp)); \
28})
29#define nvkm_connector_fini(d,s) ({ \
30 struct nvkm_connector *disp = (d); \
31 _nvkm_connector_fini(nv_object(disp), (s)); \
32})
33
34int nvkm_connector_create_(struct nvkm_object *, struct nvkm_object *,
35 struct nvkm_oclass *, struct nvbios_connE *,
36 int, int, void **);
37
38int _nvkm_connector_ctor(struct nvkm_object *, struct nvkm_object *,
39 struct nvkm_oclass *, void *, u32,
40 struct nvkm_object **);
41void _nvkm_connector_dtor(struct nvkm_object *);
42int _nvkm_connector_init(struct nvkm_object *);
43int _nvkm_connector_fini(struct nvkm_object *, bool);
44
45struct nvkm_connector_impl {
46 struct nvkm_oclass base;
47}; 17};
48 18
49#ifndef MSG 19int nvkm_connector_new(struct nvkm_disp *, int index, struct nvbios_connE *,
50#define MSG(l,f,a...) do { \ 20 struct nvkm_connector **);
51 struct nvkm_connector *_conn = (void *)conn; \ 21void nvkm_connector_del(struct nvkm_connector **);
52 nv_##l(_conn, "%02x:%02x%02x: "f, _conn->index, \ 22void nvkm_connector_init(struct nvkm_connector *);
53 _conn->info.location, _conn->info.type, ##a); \ 23void nvkm_connector_fini(struct nvkm_connector *);
24
25#define CONN_MSG(c,l,f,a...) do { \
26 struct nvkm_connector *_conn = (c); \
27 nvkm_##l(&_conn->disp->engine.subdev, "conn %02x:%02x%02x: "f"\n", \
28 _conn->index, _conn->info.location, _conn->info.type, ##a); \
54} while(0) 29} while(0)
55#define DBG(f,a...) MSG(debug, f, ##a) 30#define CONN_ERR(c,f,a...) CONN_MSG((c), error, f, ##a)
56#define ERR(f,a...) MSG(error, f, ##a) 31#define CONN_DBG(c,f,a...) CONN_MSG((c), debug, f, ##a)
57#endif 32#define CONN_TRACE(c,f,a...) CONN_MSG((c), trace, f, ##a)
58#endif 33#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
new file mode 100644
index 000000000000..1baa5c34b327
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_mthd_list
30g84_disp_core_mthd_dac = {
31 .mthd = 0x0080,
32 .addr = 0x000008,
33 .data = {
34 { 0x0400, 0x610b58 },
35 { 0x0404, 0x610bdc },
36 { 0x0420, 0x610bc4 },
37 {}
38 }
39};
40
41const struct nv50_disp_mthd_list
42g84_disp_core_mthd_head = {
43 .mthd = 0x0400,
44 .addr = 0x000540,
45 .data = {
46 { 0x0800, 0x610ad8 },
47 { 0x0804, 0x610ad0 },
48 { 0x0808, 0x610a48 },
49 { 0x080c, 0x610a78 },
50 { 0x0810, 0x610ac0 },
51 { 0x0814, 0x610af8 },
52 { 0x0818, 0x610b00 },
53 { 0x081c, 0x610ae8 },
54 { 0x0820, 0x610af0 },
55 { 0x0824, 0x610b08 },
56 { 0x0828, 0x610b10 },
57 { 0x082c, 0x610a68 },
58 { 0x0830, 0x610a60 },
59 { 0x0834, 0x000000 },
60 { 0x0838, 0x610a40 },
61 { 0x0840, 0x610a24 },
62 { 0x0844, 0x610a2c },
63 { 0x0848, 0x610aa8 },
64 { 0x084c, 0x610ab0 },
65 { 0x085c, 0x610c5c },
66 { 0x0860, 0x610a84 },
67 { 0x0864, 0x610a90 },
68 { 0x0868, 0x610b18 },
69 { 0x086c, 0x610b20 },
70 { 0x0870, 0x610ac8 },
71 { 0x0874, 0x610a38 },
72 { 0x0878, 0x610c50 },
73 { 0x0880, 0x610a58 },
74 { 0x0884, 0x610a9c },
75 { 0x089c, 0x610c68 },
76 { 0x08a0, 0x610a70 },
77 { 0x08a4, 0x610a50 },
78 { 0x08a8, 0x610ae0 },
79 { 0x08c0, 0x610b28 },
80 { 0x08c4, 0x610b30 },
81 { 0x08c8, 0x610b40 },
82 { 0x08d4, 0x610b38 },
83 { 0x08d8, 0x610b48 },
84 { 0x08dc, 0x610b50 },
85 { 0x0900, 0x610a18 },
86 { 0x0904, 0x610ab8 },
87 { 0x0910, 0x610c70 },
88 { 0x0914, 0x610c78 },
89 {}
90 }
91};
92
93const struct nv50_disp_chan_mthd
94g84_disp_core_chan_mthd = {
95 .name = "Core",
96 .addr = 0x000000,
97 .prev = 0x000004,
98 .data = {
99 { "Global", 1, &nv50_disp_core_mthd_base },
100 { "DAC", 3, &g84_disp_core_mthd_dac },
101 { "SOR", 2, &nv50_disp_core_mthd_sor },
102 { "PIOR", 3, &nv50_disp_core_mthd_pior },
103 { "HEAD", 2, &g84_disp_core_mthd_head },
104 {}
105 }
106};
107
108const struct nv50_disp_dmac_oclass
109g84_disp_core_oclass = {
110 .base.oclass = G82_DISP_CORE_CHANNEL_DMA,
111 .base.minver = 0,
112 .base.maxver = 0,
113 .ctor = nv50_disp_core_new,
114 .func = &nv50_disp_core_func,
115 .mthd = &g84_disp_core_chan_mthd,
116 .chid = 0,
117};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
new file mode 100644
index 000000000000..019379a3a01c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_mthd_list
30g94_disp_core_mthd_sor = {
31 .mthd = 0x0040,
32 .addr = 0x000008,
33 .data = {
34 { 0x0600, 0x610794 },
35 {}
36 }
37};
38
39const struct nv50_disp_chan_mthd
40g94_disp_core_chan_mthd = {
41 .name = "Core",
42 .addr = 0x000000,
43 .prev = 0x000004,
44 .data = {
45 { "Global", 1, &nv50_disp_core_mthd_base },
46 { "DAC", 3, &g84_disp_core_mthd_dac },
47 { "SOR", 4, &g94_disp_core_mthd_sor },
48 { "PIOR", 3, &nv50_disp_core_mthd_pior },
49 { "HEAD", 2, &g84_disp_core_mthd_head },
50 {}
51 }
52};
53
54const struct nv50_disp_dmac_oclass
55g94_disp_core_oclass = {
56 .base.oclass = GT206_DISP_CORE_CHANNEL_DMA,
57 .base.minver = 0,
58 .base.maxver = 0,
59 .ctor = nv50_disp_core_new,
60 .func = &nv50_disp_core_func,
61 .mthd = &g94_disp_core_chan_mthd,
62 .chid = 0,
63};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
new file mode 100644
index 000000000000..6b1dc703dac7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28#include <subdev/timer.h>
29
30#include <nvif/class.h>
31#include <nvif/unpack.h>
32
33const struct nv50_disp_mthd_list
34gf119_disp_core_mthd_base = {
35 .mthd = 0x0000,
36 .addr = 0x000000,
37 .data = {
38 { 0x0080, 0x660080 },
39 { 0x0084, 0x660084 },
40 { 0x0088, 0x660088 },
41 { 0x008c, 0x000000 },
42 {}
43 }
44};
45
46const struct nv50_disp_mthd_list
47gf119_disp_core_mthd_dac = {
48 .mthd = 0x0020,
49 .addr = 0x000020,
50 .data = {
51 { 0x0180, 0x660180 },
52 { 0x0184, 0x660184 },
53 { 0x0188, 0x660188 },
54 { 0x0190, 0x660190 },
55 {}
56 }
57};
58
59const struct nv50_disp_mthd_list
60gf119_disp_core_mthd_sor = {
61 .mthd = 0x0020,
62 .addr = 0x000020,
63 .data = {
64 { 0x0200, 0x660200 },
65 { 0x0204, 0x660204 },
66 { 0x0208, 0x660208 },
67 { 0x0210, 0x660210 },
68 {}
69 }
70};
71
72const struct nv50_disp_mthd_list
73gf119_disp_core_mthd_pior = {
74 .mthd = 0x0020,
75 .addr = 0x000020,
76 .data = {
77 { 0x0300, 0x660300 },
78 { 0x0304, 0x660304 },
79 { 0x0308, 0x660308 },
80 { 0x0310, 0x660310 },
81 {}
82 }
83};
84
85static const struct nv50_disp_mthd_list
86gf119_disp_core_mthd_head = {
87 .mthd = 0x0300,
88 .addr = 0x000300,
89 .data = {
90 { 0x0400, 0x660400 },
91 { 0x0404, 0x660404 },
92 { 0x0408, 0x660408 },
93 { 0x040c, 0x66040c },
94 { 0x0410, 0x660410 },
95 { 0x0414, 0x660414 },
96 { 0x0418, 0x660418 },
97 { 0x041c, 0x66041c },
98 { 0x0420, 0x660420 },
99 { 0x0424, 0x660424 },
100 { 0x0428, 0x660428 },
101 { 0x042c, 0x66042c },
102 { 0x0430, 0x660430 },
103 { 0x0434, 0x660434 },
104 { 0x0438, 0x660438 },
105 { 0x0440, 0x660440 },
106 { 0x0444, 0x660444 },
107 { 0x0448, 0x660448 },
108 { 0x044c, 0x66044c },
109 { 0x0450, 0x660450 },
110 { 0x0454, 0x660454 },
111 { 0x0458, 0x660458 },
112 { 0x045c, 0x66045c },
113 { 0x0460, 0x660460 },
114 { 0x0468, 0x660468 },
115 { 0x046c, 0x66046c },
116 { 0x0470, 0x660470 },
117 { 0x0474, 0x660474 },
118 { 0x0480, 0x660480 },
119 { 0x0484, 0x660484 },
120 { 0x048c, 0x66048c },
121 { 0x0490, 0x660490 },
122 { 0x0494, 0x660494 },
123 { 0x0498, 0x660498 },
124 { 0x04b0, 0x6604b0 },
125 { 0x04b8, 0x6604b8 },
126 { 0x04bc, 0x6604bc },
127 { 0x04c0, 0x6604c0 },
128 { 0x04c4, 0x6604c4 },
129 { 0x04c8, 0x6604c8 },
130 { 0x04d0, 0x6604d0 },
131 { 0x04d4, 0x6604d4 },
132 { 0x04e0, 0x6604e0 },
133 { 0x04e4, 0x6604e4 },
134 { 0x04e8, 0x6604e8 },
135 { 0x04ec, 0x6604ec },
136 { 0x04f0, 0x6604f0 },
137 { 0x04f4, 0x6604f4 },
138 { 0x04f8, 0x6604f8 },
139 { 0x04fc, 0x6604fc },
140 { 0x0500, 0x660500 },
141 { 0x0504, 0x660504 },
142 { 0x0508, 0x660508 },
143 { 0x050c, 0x66050c },
144 { 0x0510, 0x660510 },
145 { 0x0514, 0x660514 },
146 { 0x0518, 0x660518 },
147 { 0x051c, 0x66051c },
148 { 0x052c, 0x66052c },
149 { 0x0530, 0x660530 },
150 { 0x054c, 0x66054c },
151 { 0x0550, 0x660550 },
152 { 0x0554, 0x660554 },
153 { 0x0558, 0x660558 },
154 { 0x055c, 0x66055c },
155 {}
156 }
157};
158
159static const struct nv50_disp_chan_mthd
160gf119_disp_core_chan_mthd = {
161 .name = "Core",
162 .addr = 0x000000,
163 .prev = -0x020000,
164 .data = {
165 { "Global", 1, &gf119_disp_core_mthd_base },
166 { "DAC", 3, &gf119_disp_core_mthd_dac },
167 { "SOR", 8, &gf119_disp_core_mthd_sor },
168 { "PIOR", 4, &gf119_disp_core_mthd_pior },
169 { "HEAD", 4, &gf119_disp_core_mthd_head },
170 {}
171 }
172};
173
174static void
175gf119_disp_core_fini(struct nv50_disp_dmac *chan)
176{
177 struct nv50_disp *disp = chan->base.root->disp;
178 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
179 struct nvkm_device *device = subdev->device;
180
181 /* deactivate channel */
182 nvkm_mask(device, 0x610490, 0x00000010, 0x00000000);
183 nvkm_mask(device, 0x610490, 0x00000003, 0x00000000);
184 if (nvkm_msec(device, 2000,
185 if (!(nvkm_rd32(device, 0x610490) & 0x001e0000))
186 break;
187 ) < 0) {
188 nvkm_error(subdev, "core fini: %08x\n",
189 nvkm_rd32(device, 0x610490));
190 }
191
192 /* disable error reporting and completion notification */
193 nvkm_mask(device, 0x610090, 0x00000001, 0x00000000);
194 nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000000);
195}
196
197static int
198gf119_disp_core_init(struct nv50_disp_dmac *chan)
199{
200 struct nv50_disp *disp = chan->base.root->disp;
201 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
202 struct nvkm_device *device = subdev->device;
203
204 /* enable error reporting */
205 nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
206
207 /* initialise channel for dma command submission */
208 nvkm_wr32(device, 0x610494, chan->push);
209 nvkm_wr32(device, 0x610498, 0x00010000);
210 nvkm_wr32(device, 0x61049c, 0x00000001);
211 nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
212 nvkm_wr32(device, 0x640000, 0x00000000);
213 nvkm_wr32(device, 0x610490, 0x01000013);
214
215 /* wait for it to go inactive */
216 if (nvkm_msec(device, 2000,
217 if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
218 break;
219 ) < 0) {
220 nvkm_error(subdev, "core init: %08x\n",
221 nvkm_rd32(device, 0x610490));
222 return -EBUSY;
223 }
224
225 return 0;
226}
227
228const struct nv50_disp_dmac_func
229gf119_disp_core_func = {
230 .init = gf119_disp_core_init,
231 .fini = gf119_disp_core_fini,
232 .bind = gf119_disp_dmac_bind,
233};
234
235const struct nv50_disp_dmac_oclass
236gf119_disp_core_oclass = {
237 .base.oclass = GF110_DISP_CORE_CHANNEL_DMA,
238 .base.minver = 0,
239 .base.maxver = 0,
240 .ctor = nv50_disp_core_new,
241 .func = &gf119_disp_core_func,
242 .mthd = &gf119_disp_core_chan_mthd,
243 .chid = 0,
244};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
new file mode 100644
index 000000000000..088ab222e823
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
@@ -0,0 +1,132 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_mthd_list
30gk104_disp_core_mthd_head = {
31 .mthd = 0x0300,
32 .addr = 0x000300,
33 .data = {
34 { 0x0400, 0x660400 },
35 { 0x0404, 0x660404 },
36 { 0x0408, 0x660408 },
37 { 0x040c, 0x66040c },
38 { 0x0410, 0x660410 },
39 { 0x0414, 0x660414 },
40 { 0x0418, 0x660418 },
41 { 0x041c, 0x66041c },
42 { 0x0420, 0x660420 },
43 { 0x0424, 0x660424 },
44 { 0x0428, 0x660428 },
45 { 0x042c, 0x66042c },
46 { 0x0430, 0x660430 },
47 { 0x0434, 0x660434 },
48 { 0x0438, 0x660438 },
49 { 0x0440, 0x660440 },
50 { 0x0444, 0x660444 },
51 { 0x0448, 0x660448 },
52 { 0x044c, 0x66044c },
53 { 0x0450, 0x660450 },
54 { 0x0454, 0x660454 },
55 { 0x0458, 0x660458 },
56 { 0x045c, 0x66045c },
57 { 0x0460, 0x660460 },
58 { 0x0468, 0x660468 },
59 { 0x046c, 0x66046c },
60 { 0x0470, 0x660470 },
61 { 0x0474, 0x660474 },
62 { 0x047c, 0x66047c },
63 { 0x0480, 0x660480 },
64 { 0x0484, 0x660484 },
65 { 0x0488, 0x660488 },
66 { 0x048c, 0x66048c },
67 { 0x0490, 0x660490 },
68 { 0x0494, 0x660494 },
69 { 0x0498, 0x660498 },
70 { 0x04a0, 0x6604a0 },
71 { 0x04b0, 0x6604b0 },
72 { 0x04b8, 0x6604b8 },
73 { 0x04bc, 0x6604bc },
74 { 0x04c0, 0x6604c0 },
75 { 0x04c4, 0x6604c4 },
76 { 0x04c8, 0x6604c8 },
77 { 0x04d0, 0x6604d0 },
78 { 0x04d4, 0x6604d4 },
79 { 0x04e0, 0x6604e0 },
80 { 0x04e4, 0x6604e4 },
81 { 0x04e8, 0x6604e8 },
82 { 0x04ec, 0x6604ec },
83 { 0x04f0, 0x6604f0 },
84 { 0x04f4, 0x6604f4 },
85 { 0x04f8, 0x6604f8 },
86 { 0x04fc, 0x6604fc },
87 { 0x0500, 0x660500 },
88 { 0x0504, 0x660504 },
89 { 0x0508, 0x660508 },
90 { 0x050c, 0x66050c },
91 { 0x0510, 0x660510 },
92 { 0x0514, 0x660514 },
93 { 0x0518, 0x660518 },
94 { 0x051c, 0x66051c },
95 { 0x0520, 0x660520 },
96 { 0x0524, 0x660524 },
97 { 0x052c, 0x66052c },
98 { 0x0530, 0x660530 },
99 { 0x054c, 0x66054c },
100 { 0x0550, 0x660550 },
101 { 0x0554, 0x660554 },
102 { 0x0558, 0x660558 },
103 { 0x055c, 0x66055c },
104 {}
105 }
106};
107
108const struct nv50_disp_chan_mthd
109gk104_disp_core_chan_mthd = {
110 .name = "Core",
111 .addr = 0x000000,
112 .prev = -0x020000,
113 .data = {
114 { "Global", 1, &gf119_disp_core_mthd_base },
115 { "DAC", 3, &gf119_disp_core_mthd_dac },
116 { "SOR", 8, &gf119_disp_core_mthd_sor },
117 { "PIOR", 4, &gf119_disp_core_mthd_pior },
118 { "HEAD", 4, &gk104_disp_core_mthd_head },
119 {}
120 }
121};
122
123const struct nv50_disp_dmac_oclass
124gk104_disp_core_oclass = {
125 .base.oclass = GK104_DISP_CORE_CHANNEL_DMA,
126 .base.minver = 0,
127 .base.maxver = 0,
128 .ctor = nv50_disp_core_new,
129 .func = &gf119_disp_core_func,
130 .mthd = &gk104_disp_core_chan_mthd,
131 .chid = 0,
132};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
new file mode 100644
index 000000000000..df0f45c20108
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gk110_disp_core_oclass = {
31 .base.oclass = GK110_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &gf119_disp_core_func,
36 .mthd = &gk104_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
new file mode 100644
index 000000000000..9e27f8fd98b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gm107_disp_core_oclass = {
31 .base.oclass = GM107_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &gf119_disp_core_func,
36 .mthd = &gk104_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
new file mode 100644
index 000000000000..222f4a822f4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gm204_disp_core_oclass = {
31 .base.oclass = GM204_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &gf119_disp_core_func,
36 .mthd = &gk104_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
new file mode 100644
index 000000000000..b234547708fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gt200_disp_core_oclass = {
31 .base.oclass = GT200_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &nv50_disp_core_func,
36 .mthd = &g84_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
new file mode 100644
index 000000000000..8f5ba2018975
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gt215_disp_core_oclass = {
31 .base.oclass = GT214_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &nv50_disp_core_func,
36 .mthd = &g94_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
new file mode 100644
index 000000000000..db4a9b3e0e09
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -0,0 +1,242 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28#include <subdev/timer.h>
29
30#include <nvif/class.h>
31#include <nvif/unpack.h>
32
33int
34nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
35 const struct nv50_disp_chan_mthd *mthd,
36 struct nv50_disp_root *root, int chid,
37 const struct nvkm_oclass *oclass, void *data, u32 size,
38 struct nvkm_object **pobject)
39{
40 union {
41 struct nv50_disp_core_channel_dma_v0 v0;
42 } *args = data;
43 struct nvkm_object *parent = oclass->parent;
44 u64 push;
45 int ret;
46
47 nvif_ioctl(parent, "create disp core channel dma size %d\n", size);
48 if (nvif_unpack(args->v0, 0, 0, false)) {
49 nvif_ioctl(parent, "create disp core channel dma vers %d "
50 "pushbuf %016llx\n",
51 args->v0.version, args->v0.pushbuf);
52 push = args->v0.pushbuf;
53 } else
54 return ret;
55
56 return nv50_disp_dmac_new_(func, mthd, root, chid, 0,
57 push, oclass, pobject);
58}
59
60const struct nv50_disp_mthd_list
61nv50_disp_core_mthd_base = {
62 .mthd = 0x0000,
63 .addr = 0x000000,
64 .data = {
65 { 0x0080, 0x000000 },
66 { 0x0084, 0x610bb8 },
67 { 0x0088, 0x610b9c },
68 { 0x008c, 0x000000 },
69 {}
70 }
71};
72
73static const struct nv50_disp_mthd_list
74nv50_disp_core_mthd_dac = {
75 .mthd = 0x0080,
76 .addr = 0x000008,
77 .data = {
78 { 0x0400, 0x610b58 },
79 { 0x0404, 0x610bdc },
80 { 0x0420, 0x610828 },
81 {}
82 }
83};
84
85const struct nv50_disp_mthd_list
86nv50_disp_core_mthd_sor = {
87 .mthd = 0x0040,
88 .addr = 0x000008,
89 .data = {
90 { 0x0600, 0x610b70 },
91 {}
92 }
93};
94
95const struct nv50_disp_mthd_list
96nv50_disp_core_mthd_pior = {
97 .mthd = 0x0040,
98 .addr = 0x000008,
99 .data = {
100 { 0x0700, 0x610b80 },
101 {}
102 }
103};
104
105static const struct nv50_disp_mthd_list
106nv50_disp_core_mthd_head = {
107 .mthd = 0x0400,
108 .addr = 0x000540,
109 .data = {
110 { 0x0800, 0x610ad8 },
111 { 0x0804, 0x610ad0 },
112 { 0x0808, 0x610a48 },
113 { 0x080c, 0x610a78 },
114 { 0x0810, 0x610ac0 },
115 { 0x0814, 0x610af8 },
116 { 0x0818, 0x610b00 },
117 { 0x081c, 0x610ae8 },
118 { 0x0820, 0x610af0 },
119 { 0x0824, 0x610b08 },
120 { 0x0828, 0x610b10 },
121 { 0x082c, 0x610a68 },
122 { 0x0830, 0x610a60 },
123 { 0x0834, 0x000000 },
124 { 0x0838, 0x610a40 },
125 { 0x0840, 0x610a24 },
126 { 0x0844, 0x610a2c },
127 { 0x0848, 0x610aa8 },
128 { 0x084c, 0x610ab0 },
129 { 0x0860, 0x610a84 },
130 { 0x0864, 0x610a90 },
131 { 0x0868, 0x610b18 },
132 { 0x086c, 0x610b20 },
133 { 0x0870, 0x610ac8 },
134 { 0x0874, 0x610a38 },
135 { 0x0880, 0x610a58 },
136 { 0x0884, 0x610a9c },
137 { 0x08a0, 0x610a70 },
138 { 0x08a4, 0x610a50 },
139 { 0x08a8, 0x610ae0 },
140 { 0x08c0, 0x610b28 },
141 { 0x08c4, 0x610b30 },
142 { 0x08c8, 0x610b40 },
143 { 0x08d4, 0x610b38 },
144 { 0x08d8, 0x610b48 },
145 { 0x08dc, 0x610b50 },
146 { 0x0900, 0x610a18 },
147 { 0x0904, 0x610ab8 },
148 {}
149 }
150};
151
152static const struct nv50_disp_chan_mthd
153nv50_disp_core_chan_mthd = {
154 .name = "Core",
155 .addr = 0x000000,
156 .prev = 0x000004,
157 .data = {
158 { "Global", 1, &nv50_disp_core_mthd_base },
159 { "DAC", 3, &nv50_disp_core_mthd_dac },
160 { "SOR", 2, &nv50_disp_core_mthd_sor },
161 { "PIOR", 3, &nv50_disp_core_mthd_pior },
162 { "HEAD", 2, &nv50_disp_core_mthd_head },
163 {}
164 }
165};
166
167static void
168nv50_disp_core_fini(struct nv50_disp_dmac *chan)
169{
170 struct nv50_disp *disp = chan->base.root->disp;
171 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
172 struct nvkm_device *device = subdev->device;
173
174 /* deactivate channel */
175 nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
176 nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
177 if (nvkm_msec(device, 2000,
178 if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
179 break;
180 ) < 0) {
181 nvkm_error(subdev, "core fini: %08x\n",
182 nvkm_rd32(device, 0x610200));
183 }
184
185 /* disable error reporting and completion notifications */
186 nvkm_mask(device, 0x610028, 0x00010001, 0x00000000);
187}
188
189static int
190nv50_disp_core_init(struct nv50_disp_dmac *chan)
191{
192 struct nv50_disp *disp = chan->base.root->disp;
193 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
194 struct nvkm_device *device = subdev->device;
195
196 /* enable error reporting */
197 nvkm_mask(device, 0x610028, 0x00010000, 0x00010000);
198
199 /* attempt to unstick channel from some unknown state */
200 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
201 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
202 if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
203 nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
204
205 /* initialise channel for dma command submission */
206 nvkm_wr32(device, 0x610204, chan->push);
207 nvkm_wr32(device, 0x610208, 0x00010000);
208 nvkm_wr32(device, 0x61020c, 0x00000000);
209 nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
210 nvkm_wr32(device, 0x640000, 0x00000000);
211 nvkm_wr32(device, 0x610200, 0x01000013);
212
213 /* wait for it to go inactive */
214 if (nvkm_msec(device, 2000,
215 if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
216 break;
217 ) < 0) {
218 nvkm_error(subdev, "core init: %08x\n",
219 nvkm_rd32(device, 0x610200));
220 return -EBUSY;
221 }
222
223 return 0;
224}
225
226const struct nv50_disp_dmac_func
227nv50_disp_core_func = {
228 .init = nv50_disp_core_init,
229 .fini = nv50_disp_core_fini,
230 .bind = nv50_disp_dmac_bind,
231};
232
233const struct nv50_disp_dmac_oclass
234nv50_disp_core_oclass = {
235 .base.oclass = NV50_DISP_CORE_CHANNEL_DMA,
236 .base.minver = 0,
237 .base.maxver = 0,
238 .ctor = nv50_disp_core_new,
239 .func = &nv50_disp_core_func,
240 .mthd = &nv50_disp_core_chan_mthd,
241 .chid = 0,
242};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
new file mode 100644
index 000000000000..dd99fc7060b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_pioc_oclass
30g84_disp_curs_oclass = {
31 .base.oclass = G82_DISP_CURSOR,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_curs_new,
35 .func = &nv50_disp_pioc_func,
36 .chid = 7,
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index f042e7d8321d..2a1574e06ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -21,17 +21,17 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "channv50.h"
25#include "rootnv50.h"
25 26
26struct nvkm_oclass * 27#include <nvif/class.h>
27g94_mc_oclass = &(struct nvkm_mc_oclass) { 28
28 .base.handle = NV_SUBDEV(MC, 0x94), 29const struct nv50_disp_pioc_oclass
29 .base.ofuncs = &(struct nvkm_ofuncs) { 30gf119_disp_curs_oclass = {
30 .ctor = nv04_mc_ctor, 31 .base.oclass = GF110_DISP_CURSOR,
31 .dtor = _nvkm_mc_dtor, 32 .base.minver = 0,
32 .init = nv50_mc_init, 33 .base.maxver = 0,
33 .fini = _nvkm_mc_fini, 34 .ctor = nv50_disp_curs_new,
34 }, 35 .func = &gf119_disp_pioc_func,
35 .intr = nv50_mc_intr, 36 .chid = 13,
36 .msi_rearm = nv40_mc_msi_rearm, 37};
37}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
index 8d2a8f457778..28e8f06c9472 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -21,18 +21,17 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "channv50.h"
25#include "rootnv50.h"
25 26
26struct nvkm_oclass * 27#include <nvif/class.h>
27gf106_mc_oclass = &(struct nvkm_mc_oclass) { 28
28 .base.handle = NV_SUBDEV(MC, 0xc3), 29const struct nv50_disp_pioc_oclass
29 .base.ofuncs = &(struct nvkm_ofuncs) { 30gk104_disp_curs_oclass = {
30 .ctor = nv04_mc_ctor, 31 .base.oclass = GK104_DISP_CURSOR,
31 .dtor = _nvkm_mc_dtor, 32 .base.minver = 0,
32 .init = nv50_mc_init, 33 .base.maxver = 0,
33 .fini = _nvkm_mc_fini, 34 .ctor = nv50_disp_curs_new,
34 }, 35 .func = &gf119_disp_pioc_func,
35 .intr = gf100_mc_intr, 36 .chid = 13,
36 .msi_rearm = nv40_mc_msi_rearm, 37};
37 .unk260 = gf100_mc_unk260,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
new file mode 100644
index 000000000000..d8a4b9ca139c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_pioc_oclass
30gt215_disp_curs_oclass = {
31 .base.oclass = GT214_DISP_CURSOR,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_curs_new,
35 .func = &nv50_disp_pioc_func,
36 .chid = 7,
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
new file mode 100644
index 000000000000..225858e62cf6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32int
33nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
34 const struct nv50_disp_chan_mthd *mthd,
35 struct nv50_disp_root *root, int chid,
36 const struct nvkm_oclass *oclass, void *data, u32 size,
37 struct nvkm_object **pobject)
38{
39 union {
40 struct nv50_disp_cursor_v0 v0;
41 } *args = data;
42 struct nvkm_object *parent = oclass->parent;
43 struct nv50_disp *disp = root->disp;
44 int head, ret;
45
46 nvif_ioctl(parent, "create disp cursor size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, false)) {
48 nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
49 args->v0.version, args->v0.head);
50 if (args->v0.head > disp->base.head.nr)
51 return -EINVAL;
52 head = args->v0.head;
53 } else
54 return ret;
55
56 return nv50_disp_chan_new_(func, mthd, root, chid + head,
57 head, oclass, pobject);
58}
59
60const struct nv50_disp_pioc_oclass
61nv50_disp_curs_oclass = {
62 .base.oclass = NV50_DISP_CURSOR,
63 .base.minver = 0,
64 .base.maxver = 0,
65 .ctor = nv50_disp_curs_new,
66 .func = &nv50_disp_pioc_func,
67 .chid = 7,
68};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index 0f7d1ec4d37e..9bfa9e7dc161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -33,6 +33,7 @@
33int 33int
34nv50_dac_power(NV50_DISP_MTHD_V1) 34nv50_dac_power(NV50_DISP_MTHD_V1)
35{ 35{
36 struct nvkm_device *device = disp->base.engine.subdev.device;
36 const u32 doff = outp->or * 0x800; 37 const u32 doff = outp->or * 0x800;
37 union { 38 union {
38 struct nv50_disp_dac_pwr_v0 v0; 39 struct nv50_disp_dac_pwr_v0 v0;
@@ -40,12 +41,12 @@ nv50_dac_power(NV50_DISP_MTHD_V1)
40 u32 stat; 41 u32 stat;
41 int ret; 42 int ret;
42 43
43 nv_ioctl(object, "disp dac pwr size %d\n", size); 44 nvif_ioctl(object, "disp dac pwr size %d\n", size);
44 if (nvif_unpack(args->v0, 0, 0, false)) { 45 if (nvif_unpack(args->v0, 0, 0, false)) {
45 nv_ioctl(object, "disp dac pwr vers %d state %d data %d " 46 nvif_ioctl(object, "disp dac pwr vers %d state %d data %d "
46 "vsync %d hsync %d\n", 47 "vsync %d hsync %d\n",
47 args->v0.version, args->v0.state, args->v0.data, 48 args->v0.version, args->v0.state, args->v0.data,
48 args->v0.vsync, args->v0.hsync); 49 args->v0.vsync, args->v0.hsync);
49 stat = 0x00000040 * !args->v0.state; 50 stat = 0x00000040 * !args->v0.state;
50 stat |= 0x00000010 * !args->v0.data; 51 stat |= 0x00000010 * !args->v0.data;
51 stat |= 0x00000004 * !args->v0.vsync; 52 stat |= 0x00000004 * !args->v0.vsync;
@@ -53,15 +54,23 @@ nv50_dac_power(NV50_DISP_MTHD_V1)
53 } else 54 } else
54 return ret; 55 return ret;
55 56
56 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 57 nvkm_msec(device, 2000,
57 nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); 58 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
58 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 59 break;
60 );
61 nvkm_mask(device, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
62 nvkm_msec(device, 2000,
63 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
64 break;
65 );
59 return 0; 66 return 0;
60} 67}
61 68
62int 69int
63nv50_dac_sense(NV50_DISP_MTHD_V1) 70nv50_dac_sense(NV50_DISP_MTHD_V1)
64{ 71{
72 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
73 struct nvkm_device *device = subdev->device;
65 union { 74 union {
66 struct nv50_disp_dac_load_v0 v0; 75 struct nv50_disp_dac_load_v0 v0;
67 } *args = data; 76 } *args = data;
@@ -69,31 +78,49 @@ nv50_dac_sense(NV50_DISP_MTHD_V1)
69 u32 loadval; 78 u32 loadval;
70 int ret; 79 int ret;
71 80
72 nv_ioctl(object, "disp dac load size %d\n", size); 81 nvif_ioctl(object, "disp dac load size %d\n", size);
73 if (nvif_unpack(args->v0, 0, 0, false)) { 82 if (nvif_unpack(args->v0, 0, 0, false)) {
74 nv_ioctl(object, "disp dac load vers %d data %08x\n", 83 nvif_ioctl(object, "disp dac load vers %d data %08x\n",
75 args->v0.version, args->v0.data); 84 args->v0.version, args->v0.data);
76 if (args->v0.data & 0xfff00000) 85 if (args->v0.data & 0xfff00000)
77 return -EINVAL; 86 return -EINVAL;
78 loadval = args->v0.data; 87 loadval = args->v0.data;
79 } else 88 } else
80 return ret; 89 return ret;
81 90
82 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); 91 nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80150000);
83 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 92 nvkm_msec(device, 2000,
93 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
94 break;
95 );
84 96
85 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); 97 nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
86 mdelay(9); 98 mdelay(9);
87 udelay(500); 99 udelay(500);
88 loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000); 100 loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
89 101
90 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); 102 nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80550000);
91 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 103 nvkm_msec(device, 2000,
104 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
105 break;
106 );
92 107
93 nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval); 108 nvkm_debug(subdev, "DAC%d sense: %08x\n", outp->or, loadval);
94 if (!(loadval & 0x80000000)) 109 if (!(loadval & 0x80000000))
95 return -ETIMEDOUT; 110 return -ETIMEDOUT;
96 111
97 args->v0.load = (loadval & 0x38000000) >> 27; 112 args->v0.load = (loadval & 0x38000000) >> 27;
98 return 0; 113 return 0;
99} 114}
115
116static const struct nvkm_output_func
117nv50_dac_output_func = {
118};
119
120int
121nv50_dac_output_new(struct nvkm_disp *disp, int index,
122 struct dcb_output *dcbE, struct nvkm_output **poutp)
123{
124 return nvkm_output_new_(&nv50_dac_output_func, disp,
125 index, dcbE, poutp);
126}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
new file mode 100644
index 000000000000..876b14549a58
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <core/ramht.h>
28#include <subdev/timer.h>
29
30int
31gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
32 struct nvkm_object *object, u32 handle)
33{
34 return nvkm_ramht_insert(chan->base.root->ramht, object,
35 chan->base.chid, -9, handle,
36 chan->base.chid << 27 | 0x00000001);
37}
38
39static void
40gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
41{
42 struct nv50_disp *disp = chan->base.root->disp;
43 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
44 struct nvkm_device *device = subdev->device;
45 int chid = chan->base.chid;
46
47 /* deactivate channel */
48 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
49 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
50 if (nvkm_msec(device, 2000,
51 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
52 break;
53 ) < 0) {
54 nvkm_error(subdev, "ch %d fini: %08x\n", chid,
55 nvkm_rd32(device, 0x610490 + (chid * 0x10)));
56 }
57
58 /* disable error reporting and completion notification */
59 nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
60 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
61}
62
63static int
64gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
65{
66 struct nv50_disp *disp = chan->base.root->disp;
67 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
68 struct nvkm_device *device = subdev->device;
69 int chid = chan->base.chid;
70
71 /* enable error reporting */
72 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
73
74 /* initialise channel for dma command submission */
75 nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
76 nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
77 nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
78 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
79 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
80 nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
81
82 /* wait for it to go inactive */
83 if (nvkm_msec(device, 2000,
84 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
85 break;
86 ) < 0) {
87 nvkm_error(subdev, "ch %d init: %08x\n", chid,
88 nvkm_rd32(device, 0x610490 + (chid * 0x10)));
89 return -EBUSY;
90 }
91
92 return 0;
93}
94
95const struct nv50_disp_dmac_func
96gf119_disp_dmac_func = {
97 .init = gf119_disp_dmac_init,
98 .fini = gf119_disp_dmac_fini,
99 .bind = gf119_disp_dmac_bind,
100};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
new file mode 100644
index 000000000000..9c6645a357b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -0,0 +1,247 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28#include <core/oproxy.h>
29#include <core/ramht.h>
30#include <subdev/fb.h>
31#include <subdev/timer.h>
32#include <engine/dma.h>
33
34struct nv50_disp_dmac_object {
35 struct nvkm_oproxy oproxy;
36 struct nv50_disp_root *root;
37 int hash;
38};
39
40static void
41nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
42{
43 struct nv50_disp_dmac_object *object =
44 container_of(base, typeof(*object), oproxy);
45 nvkm_ramht_remove(object->root->ramht, object->hash);
46}
47
48static const struct nvkm_oproxy_func
49nv50_disp_dmac_child_func_ = {
50 .dtor[0] = nv50_disp_dmac_child_del_,
51};
52
53static int
54nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
55 const struct nvkm_oclass *oclass,
56 void *data, u32 size, struct nvkm_object **pobject)
57{
58 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
59 struct nv50_disp_root *root = chan->base.root;
60 struct nvkm_device *device = root->disp->base.engine.subdev.device;
61 const struct nvkm_device_oclass *sclass = oclass->priv;
62 struct nv50_disp_dmac_object *object;
63 int ret;
64
65 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
66 return -ENOMEM;
67 nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
68 object->root = root;
69 *pobject = &object->oproxy.base;
70
71 ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
72 if (ret)
73 return ret;
74
75 object->hash = chan->func->bind(chan, object->oproxy.object,
76 oclass->handle);
77 if (object->hash < 0)
78 return object->hash;
79
80 return 0;
81}
82
83static int
84nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
85 struct nvkm_oclass *sclass)
86{
87 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
88 struct nv50_disp *disp = chan->base.root->disp;
89 struct nvkm_device *device = disp->base.engine.subdev.device;
90 const struct nvkm_device_oclass *oclass = NULL;
91
92 sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
93 if (sclass->engine && sclass->engine->func->base.sclass) {
94 sclass->engine->func->base.sclass(sclass, index, &oclass);
95 if (oclass) {
96 sclass->priv = oclass;
97 return 0;
98 }
99 }
100
101 return -EINVAL;
102}
103
104static void
105nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
106{
107 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
108 chan->func->fini(chan);
109}
110
111static int
112nv50_disp_dmac_init_(struct nv50_disp_chan *base)
113{
114 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
115 return chan->func->init(chan);
116}
117
118static void *
119nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
120{
121 return nv50_disp_dmac(base);
122}
123
124static const struct nv50_disp_chan_func
125nv50_disp_dmac_func_ = {
126 .dtor = nv50_disp_dmac_dtor_,
127 .init = nv50_disp_dmac_init_,
128 .fini = nv50_disp_dmac_fini_,
129 .child_get = nv50_disp_dmac_child_get_,
130 .child_new = nv50_disp_dmac_child_new_,
131};
132
133int
134nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
135 const struct nv50_disp_chan_mthd *mthd,
136 struct nv50_disp_root *root, int chid, int head, u64 push,
137 const struct nvkm_oclass *oclass,
138 struct nvkm_object **pobject)
139{
140 struct nvkm_device *device = root->disp->base.engine.subdev.device;
141 struct nvkm_client *client = oclass->client;
142 struct nvkm_dmaobj *dmaobj;
143 struct nv50_disp_dmac *chan;
144 int ret;
145
146 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
147 return -ENOMEM;
148 *pobject = &chan->base.object;
149 chan->func = func;
150
151 ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
152 chid, head, oclass, &chan->base);
153 if (ret)
154 return ret;
155
156 dmaobj = nvkm_dma_search(device->dma, client, push);
157 if (!dmaobj)
158 return -ENOENT;
159
160 if (dmaobj->limit - dmaobj->start != 0xfff)
161 return -EINVAL;
162
163 switch (dmaobj->target) {
164 case NV_MEM_TARGET_VRAM:
165 chan->push = 0x00000001 | dmaobj->start >> 8;
166 break;
167 case NV_MEM_TARGET_PCI_NOSNOOP:
168 chan->push = 0x00000003 | dmaobj->start >> 8;
169 break;
170 default:
171 return -EINVAL;
172 }
173
174 return 0;
175}
176
177int
178nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
179 struct nvkm_object *object, u32 handle)
180{
181 return nvkm_ramht_insert(chan->base.root->ramht, object,
182 chan->base.chid, -10, handle,
183 chan->base.chid << 28 |
184 chan->base.chid);
185}
186
187static void
188nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
189{
190 struct nv50_disp *disp = chan->base.root->disp;
191 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
192 struct nvkm_device *device = subdev->device;
193 int chid = chan->base.chid;
194
195 /* deactivate channel */
196 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
197 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
198 if (nvkm_msec(device, 2000,
199 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
200 break;
201 ) < 0) {
202 nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
203 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
204 }
205
206 /* disable error reporting and completion notifications */
207 nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
208}
209
210static int
211nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
212{
213 struct nv50_disp *disp = chan->base.root->disp;
214 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
215 struct nvkm_device *device = subdev->device;
216 int chid = chan->base.chid;
217
218 /* enable error reporting */
219 nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
220
221 /* initialise channel for dma command submission */
222 nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
223 nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
224 nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
225 nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
226 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
227 nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
228
229 /* wait for it to go inactive */
230 if (nvkm_msec(device, 2000,
231 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
232 break;
233 ) < 0) {
234 nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
235 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
236 return -EBUSY;
237 }
238
239 return 0;
240}
241
242const struct nv50_disp_dmac_func
243nv50_disp_dmac_func = {
244 .init = nv50_disp_dmac_init,
245 .fini = nv50_disp_dmac_fini,
246 .bind = nv50_disp_dmac_bind,
247};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
new file mode 100644
index 000000000000..c748ca23ab70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -0,0 +1,91 @@
1#ifndef __NV50_DISP_DMAC_H__
2#define __NV50_DISP_DMAC_H__
3#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
4#include "channv50.h"
5
6struct nv50_disp_dmac {
7 const struct nv50_disp_dmac_func *func;
8 struct nv50_disp_chan base;
9 u32 push;
10};
11
12struct nv50_disp_dmac_func {
13 int (*init)(struct nv50_disp_dmac *);
14 void (*fini)(struct nv50_disp_dmac *);
15 int (*bind)(struct nv50_disp_dmac *, struct nvkm_object *, u32 handle);
16};
17
18int nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *,
19 const struct nv50_disp_chan_mthd *,
20 struct nv50_disp_root *, int chid, int head, u64 push,
21 const struct nvkm_oclass *, struct nvkm_object **);
22
23extern const struct nv50_disp_dmac_func nv50_disp_dmac_func;
24int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
25extern const struct nv50_disp_dmac_func nv50_disp_core_func;
26
27extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
28int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
29extern const struct nv50_disp_dmac_func gf119_disp_core_func;
30
31struct nv50_disp_dmac_oclass {
32 int (*ctor)(const struct nv50_disp_dmac_func *,
33 const struct nv50_disp_chan_mthd *,
34 struct nv50_disp_root *, int chid,
35 const struct nvkm_oclass *, void *data, u32 size,
36 struct nvkm_object **);
37 struct nvkm_sclass base;
38 const struct nv50_disp_dmac_func *func;
39 const struct nv50_disp_chan_mthd *mthd;
40 int chid;
41};
42
43int nv50_disp_core_new(const struct nv50_disp_dmac_func *,
44 const struct nv50_disp_chan_mthd *,
45 struct nv50_disp_root *, int chid,
46 const struct nvkm_oclass *oclass, void *data, u32 size,
47 struct nvkm_object **);
48int nv50_disp_base_new(const struct nv50_disp_dmac_func *,
49 const struct nv50_disp_chan_mthd *,
50 struct nv50_disp_root *, int chid,
51 const struct nvkm_oclass *oclass, void *data, u32 size,
52 struct nvkm_object **);
53int nv50_disp_ovly_new(const struct nv50_disp_dmac_func *,
54 const struct nv50_disp_chan_mthd *,
55 struct nv50_disp_root *, int chid,
56 const struct nvkm_oclass *oclass, void *data, u32 size,
57 struct nvkm_object **);
58
59extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
60extern const struct nv50_disp_dmac_oclass nv50_disp_base_oclass;
61extern const struct nv50_disp_dmac_oclass nv50_disp_ovly_oclass;
62
63extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
64extern const struct nv50_disp_dmac_oclass g84_disp_base_oclass;
65extern const struct nv50_disp_dmac_oclass g84_disp_ovly_oclass;
66
67extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
68
69extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
70extern const struct nv50_disp_dmac_oclass gt200_disp_base_oclass;
71extern const struct nv50_disp_dmac_oclass gt200_disp_ovly_oclass;
72
73extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
74extern const struct nv50_disp_dmac_oclass gt215_disp_base_oclass;
75extern const struct nv50_disp_dmac_oclass gt215_disp_ovly_oclass;
76
77extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
78extern const struct nv50_disp_dmac_oclass gf119_disp_base_oclass;
79extern const struct nv50_disp_dmac_oclass gf119_disp_ovly_oclass;
80
81extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
82extern const struct nv50_disp_dmac_oclass gk104_disp_base_oclass;
83extern const struct nv50_disp_dmac_oclass gk104_disp_ovly_oclass;
84
85extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
86extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
87
88extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
89
90extern const struct nv50_disp_dmac_oclass gm204_disp_core_oclass;
91#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 68347661adca..74e2f7c6c07e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -48,12 +48,12 @@ struct dp_state {
48static int 48static int
49dp_set_link_config(struct dp_state *dp) 49dp_set_link_config(struct dp_state *dp)
50{ 50{
51 struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
52 struct nvkm_output_dp *outp = dp->outp; 51 struct nvkm_output_dp *outp = dp->outp;
53 struct nvkm_disp *disp = nvkm_disp(outp); 52 struct nvkm_disp *disp = outp->base.disp;
54 struct nvkm_bios *bios = nvkm_bios(disp); 53 struct nvkm_subdev *subdev = &disp->engine.subdev;
54 struct nvkm_bios *bios = subdev->device->bios;
55 struct nvbios_init init = { 55 struct nvbios_init init = {
56 .subdev = nv_subdev(disp), 56 .subdev = subdev,
57 .bios = bios, 57 .bios = bios,
58 .offset = 0x0000, 58 .offset = 0x0000,
59 .outp = &outp->base.info, 59 .outp = &outp->base.info,
@@ -64,33 +64,33 @@ dp_set_link_config(struct dp_state *dp)
64 u8 sink[2]; 64 u8 sink[2];
65 int ret; 65 int ret;
66 66
67 DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); 67 OUTP_DBG(&outp->base, "%d lanes at %d KB/s", dp->link_nr, dp->link_bw);
68 68
69 /* set desired link configuration on the source */ 69 /* set desired link configuration on the source */
70 if ((lnkcmp = dp->outp->info.lnkcmp)) { 70 if ((lnkcmp = dp->outp->info.lnkcmp)) {
71 if (outp->version < 0x30) { 71 if (outp->version < 0x30) {
72 while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp)) 72 while ((dp->link_bw / 10) < nvbios_rd16(bios, lnkcmp))
73 lnkcmp += 4; 73 lnkcmp += 4;
74 init.offset = nv_ro16(bios, lnkcmp + 2); 74 init.offset = nvbios_rd16(bios, lnkcmp + 2);
75 } else { 75 } else {
76 while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp)) 76 while ((dp->link_bw / 27000) < nvbios_rd08(bios, lnkcmp))
77 lnkcmp += 3; 77 lnkcmp += 3;
78 init.offset = nv_ro16(bios, lnkcmp + 1); 78 init.offset = nvbios_rd16(bios, lnkcmp + 1);
79 } 79 }
80 80
81 nvbios_exec(&init); 81 nvbios_exec(&init);
82 } 82 }
83 83
84 ret = impl->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000, 84 ret = outp->func->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
85 outp->dpcd[DPCD_RC02] & 85 outp->dpcd[DPCD_RC02] &
86 DPCD_RC02_ENHANCED_FRAME_CAP); 86 DPCD_RC02_ENHANCED_FRAME_CAP);
87 if (ret) { 87 if (ret) {
88 if (ret < 0) 88 if (ret < 0)
89 ERR("lnk_ctl failed with %d\n", ret); 89 OUTP_ERR(&outp->base, "lnk_ctl failed with %d", ret);
90 return ret; 90 return ret;
91 } 91 }
92 92
93 impl->lnk_pwr(outp, dp->link_nr); 93 outp->func->lnk_pwr(outp, dp->link_nr);
94 94
95 /* set desired link configuration on the sink */ 95 /* set desired link configuration on the sink */
96 sink[0] = dp->link_bw / 27000; 96 sink[0] = dp->link_bw / 27000;
@@ -98,29 +98,27 @@ dp_set_link_config(struct dp_state *dp)
98 if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP) 98 if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
99 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN; 99 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
100 100
101 return nv_wraux(outp->base.edid, DPCD_LC00_LINK_BW_SET, sink, 2); 101 return nvkm_wraux(outp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
102} 102}
103 103
104static void 104static void
105dp_set_training_pattern(struct dp_state *dp, u8 pattern) 105dp_set_training_pattern(struct dp_state *dp, u8 pattern)
106{ 106{
107 struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
108 struct nvkm_output_dp *outp = dp->outp; 107 struct nvkm_output_dp *outp = dp->outp;
109 u8 sink_tp; 108 u8 sink_tp;
110 109
111 DBG("training pattern %d\n", pattern); 110 OUTP_DBG(&outp->base, "training pattern %d", pattern);
112 impl->pattern(outp, pattern); 111 outp->func->pattern(outp, pattern);
113 112
114 nv_rdaux(outp->base.edid, DPCD_LC02, &sink_tp, 1); 113 nvkm_rdaux(outp->aux, DPCD_LC02, &sink_tp, 1);
115 sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET; 114 sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
116 sink_tp |= pattern; 115 sink_tp |= pattern;
117 nv_wraux(outp->base.edid, DPCD_LC02, &sink_tp, 1); 116 nvkm_wraux(outp->aux, DPCD_LC02, &sink_tp, 1);
118} 117}
119 118
120static int 119static int
121dp_link_train_commit(struct dp_state *dp, bool pc) 120dp_link_train_commit(struct dp_state *dp, bool pc)
122{ 121{
123 struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
124 struct nvkm_output_dp *outp = dp->outp; 122 struct nvkm_output_dp *outp = dp->outp;
125 int ret, i; 123 int ret, i;
126 124
@@ -146,16 +144,17 @@ dp_link_train_commit(struct dp_state *dp, bool pc)
146 dp->conf[i] = (lpre << 3) | lvsw; 144 dp->conf[i] = (lpre << 3) | lvsw;
147 dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4); 145 dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
148 146
149 DBG("config lane %d %02x %02x\n", i, dp->conf[i], lpc2); 147 OUTP_DBG(&outp->base, "config lane %d %02x %02x",
150 impl->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3); 148 i, dp->conf[i], lpc2);
149 outp->func->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
151 } 150 }
152 151
153 ret = nv_wraux(outp->base.edid, DPCD_LC03(0), dp->conf, 4); 152 ret = nvkm_wraux(outp->aux, DPCD_LC03(0), dp->conf, 4);
154 if (ret) 153 if (ret)
155 return ret; 154 return ret;
156 155
157 if (pc) { 156 if (pc) {
158 ret = nv_wraux(outp->base.edid, DPCD_LC0F, dp->pc2conf, 2); 157 ret = nvkm_wraux(outp->aux, DPCD_LC0F, dp->pc2conf, 2);
159 if (ret) 158 if (ret)
160 return ret; 159 return ret;
161 } 160 }
@@ -174,17 +173,18 @@ dp_link_train_update(struct dp_state *dp, bool pc, u32 delay)
174 else 173 else
175 udelay(delay); 174 udelay(delay);
176 175
177 ret = nv_rdaux(outp->base.edid, DPCD_LS02, dp->stat, 6); 176 ret = nvkm_rdaux(outp->aux, DPCD_LS02, dp->stat, 6);
178 if (ret) 177 if (ret)
179 return ret; 178 return ret;
180 179
181 if (pc) { 180 if (pc) {
182 ret = nv_rdaux(outp->base.edid, DPCD_LS0C, &dp->pc2stat, 1); 181 ret = nvkm_rdaux(outp->aux, DPCD_LS0C, &dp->pc2stat, 1);
183 if (ret) 182 if (ret)
184 dp->pc2stat = 0x00; 183 dp->pc2stat = 0x00;
185 DBG("status %6ph pc2 %02x\n", dp->stat, dp->pc2stat); 184 OUTP_DBG(&outp->base, "status %6ph pc2 %02x",
185 dp->stat, dp->pc2stat);
186 } else { 186 } else {
187 DBG("status %6ph\n", dp->stat); 187 OUTP_DBG(&outp->base, "status %6ph", dp->stat);
188 } 188 }
189 189
190 return 0; 190 return 0;
@@ -260,11 +260,11 @@ static void
260dp_link_train_init(struct dp_state *dp, bool spread) 260dp_link_train_init(struct dp_state *dp, bool spread)
261{ 261{
262 struct nvkm_output_dp *outp = dp->outp; 262 struct nvkm_output_dp *outp = dp->outp;
263 struct nvkm_disp *disp = nvkm_disp(outp); 263 struct nvkm_disp *disp = outp->base.disp;
264 struct nvkm_bios *bios = nvkm_bios(disp); 264 struct nvkm_subdev *subdev = &disp->engine.subdev;
265 struct nvbios_init init = { 265 struct nvbios_init init = {
266 .subdev = nv_subdev(disp), 266 .subdev = subdev,
267 .bios = bios, 267 .bios = subdev->device->bios,
268 .outp = &outp->base.info, 268 .outp = &outp->base.info,
269 .crtc = -1, 269 .crtc = -1,
270 .execute = 1, 270 .execute = 1,
@@ -286,11 +286,11 @@ static void
286dp_link_train_fini(struct dp_state *dp) 286dp_link_train_fini(struct dp_state *dp)
287{ 287{
288 struct nvkm_output_dp *outp = dp->outp; 288 struct nvkm_output_dp *outp = dp->outp;
289 struct nvkm_disp *disp = nvkm_disp(outp); 289 struct nvkm_disp *disp = outp->base.disp;
290 struct nvkm_bios *bios = nvkm_bios(disp); 290 struct nvkm_subdev *subdev = &disp->engine.subdev;
291 struct nvbios_init init = { 291 struct nvbios_init init = {
292 .subdev = nv_subdev(disp), 292 .subdev = subdev,
293 .bios = bios, 293 .bios = subdev->device->bios,
294 .outp = &outp->base.info, 294 .outp = &outp->base.info,
295 .crtc = -1, 295 .crtc = -1,
296 .execute = 1, 296 .execute = 1,
@@ -322,7 +322,7 @@ void
322nvkm_dp_train(struct work_struct *w) 322nvkm_dp_train(struct work_struct *w)
323{ 323{
324 struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work); 324 struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
325 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 325 struct nv50_disp *disp = nv50_disp(outp->base.disp);
326 const struct dp_rates *cfg = nvkm_dp_rates; 326 const struct dp_rates *cfg = nvkm_dp_rates;
327 struct dp_state _dp = { 327 struct dp_state _dp = {
328 .outp = outp, 328 .outp = outp,
@@ -330,11 +330,11 @@ nvkm_dp_train(struct work_struct *w)
330 u32 datarate = 0; 330 u32 datarate = 0;
331 int ret; 331 int ret;
332 332
333 if (!outp->base.info.location && priv->sor.magic) 333 if (!outp->base.info.location && disp->func->sor.magic)
334 priv->sor.magic(&outp->base); 334 disp->func->sor.magic(&outp->base);
335 335
336 /* bring capabilities within encoder limits */ 336 /* bring capabilities within encoder limits */
337 if (nv_mclass(priv) < GF110_DISP) 337 if (disp->base.engine.subdev.device->chipset < 0xd0)
338 outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED; 338 outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
339 if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) { 339 if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
340 outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT; 340 outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
@@ -386,12 +386,12 @@ nvkm_dp_train(struct work_struct *w)
386 /* finish link training and execute post-train script from vbios */ 386 /* finish link training and execute post-train script from vbios */
387 dp_set_training_pattern(dp, 0); 387 dp_set_training_pattern(dp, 0);
388 if (ret < 0) 388 if (ret < 0)
389 ERR("link training failed\n"); 389 OUTP_ERR(&outp->base, "link training failed");
390 390
391 dp_link_train_fini(dp); 391 dp_link_train_fini(dp);
392 392
393 /* signal completion and enable link interrupt handling */ 393 /* signal completion and enable link interrupt handling */
394 DBG("training complete\n"); 394 OUTP_DBG(&outp->base, "training complete");
395 atomic_set(&outp->lt.done, 1); 395 atomic_set(&outp->lt.done, 1);
396 wake_up(&outp->lt.wait); 396 wake_up(&outp->lt.wait);
397 nvkm_notify_get(&outp->irq); 397 nvkm_notify_get(&outp->irq);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index a0dcf534cb20..3e3e592cd09f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -22,251 +22,34 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25#include "rootnv50.h"
26#include <nvif/class.h> 26
27 27static const struct nv50_disp_func
28/******************************************************************************* 28g84_disp = {
29 * EVO master channel object 29 .intr = nv50_disp_intr,
30 ******************************************************************************/ 30 .uevent = &nv50_disp_chan_uevent,
31 31 .super = nv50_disp_intr_supervisor,
32const struct nv50_disp_mthd_list 32 .root = &g84_disp_root_oclass,
33g84_disp_core_mthd_dac = { 33 .head.vblank_init = nv50_disp_vblank_init,
34 .mthd = 0x0080, 34 .head.vblank_fini = nv50_disp_vblank_fini,
35 .addr = 0x000008, 35 .head.scanoutpos = nv50_disp_root_scanoutpos,
36 .data = { 36 .outp.internal.crt = nv50_dac_output_new,
37 { 0x0400, 0x610b58 }, 37 .outp.internal.tmds = nv50_sor_output_new,
38 { 0x0404, 0x610bdc }, 38 .outp.internal.lvds = nv50_sor_output_new,
39 { 0x0420, 0x610bc4 }, 39 .outp.external.tmds = nv50_pior_output_new,
40 {} 40 .outp.external.dp = nv50_pior_dp_new,
41 } 41 .dac.nr = 3,
42}; 42 .dac.power = nv50_dac_power,
43 43 .dac.sense = nv50_dac_sense,
44const struct nv50_disp_mthd_list 44 .sor.nr = 2,
45g84_disp_core_mthd_head = { 45 .sor.power = nv50_sor_power,
46 .mthd = 0x0400, 46 .sor.hdmi = g84_hdmi_ctrl,
47 .addr = 0x000540, 47 .pior.nr = 3,
48 .data = { 48 .pior.power = nv50_pior_power,
49 { 0x0800, 0x610ad8 }, 49};
50 { 0x0804, 0x610ad0 }, 50
51 { 0x0808, 0x610a48 }, 51int
52 { 0x080c, 0x610a78 }, 52g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
53 { 0x0810, 0x610ac0 },
54 { 0x0814, 0x610af8 },
55 { 0x0818, 0x610b00 },
56 { 0x081c, 0x610ae8 },
57 { 0x0820, 0x610af0 },
58 { 0x0824, 0x610b08 },
59 { 0x0828, 0x610b10 },
60 { 0x082c, 0x610a68 },
61 { 0x0830, 0x610a60 },
62 { 0x0834, 0x000000 },
63 { 0x0838, 0x610a40 },
64 { 0x0840, 0x610a24 },
65 { 0x0844, 0x610a2c },
66 { 0x0848, 0x610aa8 },
67 { 0x084c, 0x610ab0 },
68 { 0x085c, 0x610c5c },
69 { 0x0860, 0x610a84 },
70 { 0x0864, 0x610a90 },
71 { 0x0868, 0x610b18 },
72 { 0x086c, 0x610b20 },
73 { 0x0870, 0x610ac8 },
74 { 0x0874, 0x610a38 },
75 { 0x0878, 0x610c50 },
76 { 0x0880, 0x610a58 },
77 { 0x0884, 0x610a9c },
78 { 0x089c, 0x610c68 },
79 { 0x08a0, 0x610a70 },
80 { 0x08a4, 0x610a50 },
81 { 0x08a8, 0x610ae0 },
82 { 0x08c0, 0x610b28 },
83 { 0x08c4, 0x610b30 },
84 { 0x08c8, 0x610b40 },
85 { 0x08d4, 0x610b38 },
86 { 0x08d8, 0x610b48 },
87 { 0x08dc, 0x610b50 },
88 { 0x0900, 0x610a18 },
89 { 0x0904, 0x610ab8 },
90 { 0x0910, 0x610c70 },
91 { 0x0914, 0x610c78 },
92 {}
93 }
94};
95
96const struct nv50_disp_mthd_chan
97g84_disp_core_mthd_chan = {
98 .name = "Core",
99 .addr = 0x000000,
100 .data = {
101 { "Global", 1, &nv50_disp_core_mthd_base },
102 { "DAC", 3, &g84_disp_core_mthd_dac },
103 { "SOR", 2, &nv50_disp_core_mthd_sor },
104 { "PIOR", 3, &nv50_disp_core_mthd_pior },
105 { "HEAD", 2, &g84_disp_core_mthd_head },
106 {}
107 }
108};
109
110/*******************************************************************************
111 * EVO sync channel objects
112 ******************************************************************************/
113
114static const struct nv50_disp_mthd_list
115g84_disp_base_mthd_base = {
116 .mthd = 0x0000,
117 .addr = 0x000000,
118 .data = {
119 { 0x0080, 0x000000 },
120 { 0x0084, 0x0008c4 },
121 { 0x0088, 0x0008d0 },
122 { 0x008c, 0x0008dc },
123 { 0x0090, 0x0008e4 },
124 { 0x0094, 0x610884 },
125 { 0x00a0, 0x6108a0 },
126 { 0x00a4, 0x610878 },
127 { 0x00c0, 0x61086c },
128 { 0x00c4, 0x610800 },
129 { 0x00c8, 0x61080c },
130 { 0x00cc, 0x610818 },
131 { 0x00e0, 0x610858 },
132 { 0x00e4, 0x610860 },
133 { 0x00e8, 0x6108ac },
134 { 0x00ec, 0x6108b4 },
135 { 0x00fc, 0x610824 },
136 { 0x0100, 0x610894 },
137 { 0x0104, 0x61082c },
138 { 0x0110, 0x6108bc },
139 { 0x0114, 0x61088c },
140 {}
141 }
142};
143
144const struct nv50_disp_mthd_chan
145g84_disp_base_mthd_chan = {
146 .name = "Base",
147 .addr = 0x000540,
148 .data = {
149 { "Global", 1, &g84_disp_base_mthd_base },
150 { "Image", 2, &nv50_disp_base_mthd_image },
151 {}
152 }
153};
154
155/*******************************************************************************
156 * EVO overlay channel objects
157 ******************************************************************************/
158
159static const struct nv50_disp_mthd_list
160g84_disp_ovly_mthd_base = {
161 .mthd = 0x0000,
162 .addr = 0x000000,
163 .data = {
164 { 0x0080, 0x000000 },
165 { 0x0084, 0x6109a0 },
166 { 0x0088, 0x6109c0 },
167 { 0x008c, 0x6109c8 },
168 { 0x0090, 0x6109b4 },
169 { 0x0094, 0x610970 },
170 { 0x00a0, 0x610998 },
171 { 0x00a4, 0x610964 },
172 { 0x00c0, 0x610958 },
173 { 0x00e0, 0x6109a8 },
174 { 0x00e4, 0x6109d0 },
175 { 0x00e8, 0x6109d8 },
176 { 0x0100, 0x61094c },
177 { 0x0104, 0x610984 },
178 { 0x0108, 0x61098c },
179 { 0x0800, 0x6109f8 },
180 { 0x0808, 0x610a08 },
181 { 0x080c, 0x610a10 },
182 { 0x0810, 0x610a00 },
183 {}
184 }
185};
186
187const struct nv50_disp_mthd_chan
188g84_disp_ovly_mthd_chan = {
189 .name = "Overlay",
190 .addr = 0x000540,
191 .data = {
192 { "Global", 1, &g84_disp_ovly_mthd_base },
193 {}
194 }
195};
196
197/*******************************************************************************
198 * Base display object
199 ******************************************************************************/
200
201static struct nvkm_oclass
202g84_disp_sclass[] = {
203 { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
204 { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
205 { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
206 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
207 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
208 {}
209};
210
211static struct nvkm_oclass
212g84_disp_main_oclass[] = {
213 { G82_DISP, &nv50_disp_main_ofuncs },
214 {}
215};
216
217/*******************************************************************************
218 * Display engine implementation
219 ******************************************************************************/
220
221static int
222g84_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
223 struct nvkm_oclass *oclass, void *data, u32 size,
224 struct nvkm_object **pobject)
225{ 53{
226 struct nv50_disp_priv *priv; 54 return nv50_disp_new_(&g84_disp, device, index, 2, pdisp);
227 int ret;
228
229 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
230 "display", &priv);
231 *pobject = nv_object(priv);
232 if (ret)
233 return ret;
234
235 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
236 if (ret)
237 return ret;
238
239 nv_engine(priv)->sclass = g84_disp_main_oclass;
240 nv_engine(priv)->cclass = &nv50_disp_cclass;
241 nv_subdev(priv)->intr = nv50_disp_intr;
242 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
243 priv->sclass = g84_disp_sclass;
244 priv->head.nr = 2;
245 priv->dac.nr = 3;
246 priv->sor.nr = 2;
247 priv->pior.nr = 3;
248 priv->dac.power = nv50_dac_power;
249 priv->dac.sense = nv50_dac_sense;
250 priv->sor.power = nv50_sor_power;
251 priv->sor.hdmi = g84_hdmi_ctrl;
252 priv->pior.power = nv50_pior_power;
253 return 0;
254} 55}
255
256struct nvkm_oclass *
257g84_disp_oclass = &(struct nv50_disp_impl) {
258 .base.base.handle = NV_ENGINE(DISP, 0x82),
259 .base.base.ofuncs = &(struct nvkm_ofuncs) {
260 .ctor = g84_disp_ctor,
261 .dtor = _nvkm_disp_dtor,
262 .init = _nvkm_disp_init,
263 .fini = _nvkm_disp_fini,
264 },
265 .base.vblank = &nv50_disp_vblank_func,
266 .base.outp = nv50_disp_outp_sclass,
267 .mthd.core = &g84_disp_core_mthd_chan,
268 .mthd.base = &g84_disp_base_mthd_chan,
269 .mthd.ovly = &g84_disp_ovly_mthd_chan,
270 .mthd.prev = 0x000004,
271 .head.scanoutpos = nv50_disp_main_scanoutpos,
272}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 1ab0d0ae3cc8..7a7af3b478f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -22,118 +22,35 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "outpdp.h" 25#include "rootnv50.h"
26 26
27#include <nvif/class.h> 27static const struct nv50_disp_func
28 28g94_disp = {
29/******************************************************************************* 29 .intr = nv50_disp_intr,
30 * EVO master channel object 30 .uevent = &nv50_disp_chan_uevent,
31 ******************************************************************************/ 31 .super = nv50_disp_intr_supervisor,
32 32 .root = &g94_disp_root_oclass,
33const struct nv50_disp_mthd_list 33 .head.vblank_init = nv50_disp_vblank_init,
34g94_disp_core_mthd_sor = { 34 .head.vblank_fini = nv50_disp_vblank_fini,
35 .mthd = 0x0040, 35 .head.scanoutpos = nv50_disp_root_scanoutpos,
36 .addr = 0x000008, 36 .outp.internal.crt = nv50_dac_output_new,
37 .data = { 37 .outp.internal.tmds = nv50_sor_output_new,
38 { 0x0600, 0x610794 }, 38 .outp.internal.lvds = nv50_sor_output_new,
39 {} 39 .outp.internal.dp = g94_sor_dp_new,
40 } 40 .outp.external.tmds = nv50_pior_output_new,
41}; 41 .outp.external.dp = nv50_pior_dp_new,
42 42 .dac.nr = 3,
43const struct nv50_disp_mthd_chan 43 .dac.power = nv50_dac_power,
44g94_disp_core_mthd_chan = { 44 .dac.sense = nv50_dac_sense,
45 .name = "Core", 45 .sor.nr = 4,
46 .addr = 0x000000, 46 .sor.power = nv50_sor_power,
47 .data = { 47 .sor.hdmi = g84_hdmi_ctrl,
48 { "Global", 1, &nv50_disp_core_mthd_base }, 48 .pior.nr = 3,
49 { "DAC", 3, &g84_disp_core_mthd_dac }, 49 .pior.power = nv50_pior_power,
50 { "SOR", 4, &g94_disp_core_mthd_sor },
51 { "PIOR", 3, &nv50_disp_core_mthd_pior },
52 { "HEAD", 2, &g84_disp_core_mthd_head },
53 {}
54 }
55}; 50};
56 51
57/******************************************************************************* 52int
58 * Base display object 53g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
59 ******************************************************************************/
60
61static struct nvkm_oclass
62g94_disp_sclass[] = {
63 { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
64 { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
65 { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
66 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
67 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
68 {}
69};
70
71static struct nvkm_oclass
72g94_disp_main_oclass[] = {
73 { GT206_DISP, &nv50_disp_main_ofuncs },
74 {}
75};
76
77/*******************************************************************************
78 * Display engine implementation
79 ******************************************************************************/
80
81static int
82g94_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
83 struct nvkm_oclass *oclass, void *data, u32 size,
84 struct nvkm_object **pobject)
85{ 54{
86 struct nv50_disp_priv *priv; 55 return nv50_disp_new_(&g94_disp, device, index, 2, pdisp);
87 int ret;
88
89 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
90 "display", &priv);
91 *pobject = nv_object(priv);
92 if (ret)
93 return ret;
94
95 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
96 if (ret)
97 return ret;
98
99 nv_engine(priv)->sclass = g94_disp_main_oclass;
100 nv_engine(priv)->cclass = &nv50_disp_cclass;
101 nv_subdev(priv)->intr = nv50_disp_intr;
102 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
103 priv->sclass = g94_disp_sclass;
104 priv->head.nr = 2;
105 priv->dac.nr = 3;
106 priv->sor.nr = 4;
107 priv->pior.nr = 3;
108 priv->dac.power = nv50_dac_power;
109 priv->dac.sense = nv50_dac_sense;
110 priv->sor.power = nv50_sor_power;
111 priv->sor.hdmi = g84_hdmi_ctrl;
112 priv->pior.power = nv50_pior_power;
113 return 0;
114} 56}
115
116struct nvkm_oclass *
117g94_disp_outp_sclass[] = {
118 &nv50_pior_dp_impl.base.base,
119 &g94_sor_dp_impl.base.base,
120 NULL
121};
122
123struct nvkm_oclass *
124g94_disp_oclass = &(struct nv50_disp_impl) {
125 .base.base.handle = NV_ENGINE(DISP, 0x88),
126 .base.base.ofuncs = &(struct nvkm_ofuncs) {
127 .ctor = g94_disp_ctor,
128 .dtor = _nvkm_disp_dtor,
129 .init = _nvkm_disp_init,
130 .fini = _nvkm_disp_fini,
131 },
132 .base.vblank = &nv50_disp_vblank_func,
133 .base.outp = g94_disp_outp_sclass,
134 .mthd.core = &g94_disp_core_mthd_chan,
135 .mthd.base = &g84_disp_base_mthd_chan,
136 .mthd.ovly = &g84_disp_ovly_mthd_chan,
137 .mthd.prev = 0x000004,
138 .head.scanoutpos = nv50_disp_main_scanoutpos,
139}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
deleted file mode 100644
index 7f2f05f78cc8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ /dev/null
@@ -1,1310 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25#include "outp.h"
26#include "outpdp.h"
27
28#include <core/client.h>
29#include <core/gpuobj.h>
30#include <core/ramht.h>
31#include <subdev/bios.h>
32#include <subdev/bios/dcb.h>
33#include <subdev/bios/disp.h>
34#include <subdev/bios/init.h>
35#include <subdev/bios/pll.h>
36#include <subdev/devinit.h>
37#include <subdev/timer.h>
38
39#include <nvif/class.h>
40#include <nvif/unpack.h>
41
42/*******************************************************************************
43 * EVO channel base class
44 ******************************************************************************/
45
46static void
47gf110_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
48{
49 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
50 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
51 nv_wr32(priv, 0x61008c, 0x00000001 << index);
52}
53
54static void
55gf110_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
56{
57 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
58 nv_wr32(priv, 0x61008c, 0x00000001 << index);
59 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
60}
61
62const struct nvkm_event_func
63gf110_disp_chan_uevent = {
64 .ctor = nv50_disp_chan_uevent_ctor,
65 .init = gf110_disp_chan_uevent_init,
66 .fini = gf110_disp_chan_uevent_fini,
67};
68
69/*******************************************************************************
70 * EVO DMA channel base class
71 ******************************************************************************/
72
73static int
74gf110_disp_dmac_object_attach(struct nvkm_object *parent,
75 struct nvkm_object *object, u32 name)
76{
77 struct nv50_disp_base *base = (void *)parent->parent;
78 struct nv50_disp_chan *chan = (void *)parent;
79 u32 addr = nv_gpuobj(object)->node->offset;
80 u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
81 return nvkm_ramht_insert(base->ramht, chan->chid, name, data);
82}
83
84static void
85gf110_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
86{
87 struct nv50_disp_base *base = (void *)parent->parent;
88 nvkm_ramht_remove(base->ramht, cookie);
89}
90
91static int
92gf110_disp_dmac_init(struct nvkm_object *object)
93{
94 struct nv50_disp_priv *priv = (void *)object->engine;
95 struct nv50_disp_dmac *dmac = (void *)object;
96 int chid = dmac->base.chid;
97 int ret;
98
99 ret = nv50_disp_chan_init(&dmac->base);
100 if (ret)
101 return ret;
102
103 /* enable error reporting */
104 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
105
106 /* initialise channel for dma command submission */
107 nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
108 nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
109 nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
110 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
111 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
112 nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
113
114 /* wait for it to go inactive */
115 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
116 nv_error(dmac, "init: 0x%08x\n",
117 nv_rd32(priv, 0x610490 + (chid * 0x10)));
118 return -EBUSY;
119 }
120
121 return 0;
122}
123
124static int
125gf110_disp_dmac_fini(struct nvkm_object *object, bool suspend)
126{
127 struct nv50_disp_priv *priv = (void *)object->engine;
128 struct nv50_disp_dmac *dmac = (void *)object;
129 int chid = dmac->base.chid;
130
131 /* deactivate channel */
132 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
133 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
134 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
135 nv_error(dmac, "fini: 0x%08x\n",
136 nv_rd32(priv, 0x610490 + (chid * 0x10)));
137 if (suspend)
138 return -EBUSY;
139 }
140
141 /* disable error reporting and completion notification */
142 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
143 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
144
145 return nv50_disp_chan_fini(&dmac->base, suspend);
146}
147
148/*******************************************************************************
149 * EVO master channel object
150 ******************************************************************************/
151
152const struct nv50_disp_mthd_list
153gf110_disp_core_mthd_base = {
154 .mthd = 0x0000,
155 .addr = 0x000000,
156 .data = {
157 { 0x0080, 0x660080 },
158 { 0x0084, 0x660084 },
159 { 0x0088, 0x660088 },
160 { 0x008c, 0x000000 },
161 {}
162 }
163};
164
165const struct nv50_disp_mthd_list
166gf110_disp_core_mthd_dac = {
167 .mthd = 0x0020,
168 .addr = 0x000020,
169 .data = {
170 { 0x0180, 0x660180 },
171 { 0x0184, 0x660184 },
172 { 0x0188, 0x660188 },
173 { 0x0190, 0x660190 },
174 {}
175 }
176};
177
178const struct nv50_disp_mthd_list
179gf110_disp_core_mthd_sor = {
180 .mthd = 0x0020,
181 .addr = 0x000020,
182 .data = {
183 { 0x0200, 0x660200 },
184 { 0x0204, 0x660204 },
185 { 0x0208, 0x660208 },
186 { 0x0210, 0x660210 },
187 {}
188 }
189};
190
191const struct nv50_disp_mthd_list
192gf110_disp_core_mthd_pior = {
193 .mthd = 0x0020,
194 .addr = 0x000020,
195 .data = {
196 { 0x0300, 0x660300 },
197 { 0x0304, 0x660304 },
198 { 0x0308, 0x660308 },
199 { 0x0310, 0x660310 },
200 {}
201 }
202};
203
204static const struct nv50_disp_mthd_list
205gf110_disp_core_mthd_head = {
206 .mthd = 0x0300,
207 .addr = 0x000300,
208 .data = {
209 { 0x0400, 0x660400 },
210 { 0x0404, 0x660404 },
211 { 0x0408, 0x660408 },
212 { 0x040c, 0x66040c },
213 { 0x0410, 0x660410 },
214 { 0x0414, 0x660414 },
215 { 0x0418, 0x660418 },
216 { 0x041c, 0x66041c },
217 { 0x0420, 0x660420 },
218 { 0x0424, 0x660424 },
219 { 0x0428, 0x660428 },
220 { 0x042c, 0x66042c },
221 { 0x0430, 0x660430 },
222 { 0x0434, 0x660434 },
223 { 0x0438, 0x660438 },
224 { 0x0440, 0x660440 },
225 { 0x0444, 0x660444 },
226 { 0x0448, 0x660448 },
227 { 0x044c, 0x66044c },
228 { 0x0450, 0x660450 },
229 { 0x0454, 0x660454 },
230 { 0x0458, 0x660458 },
231 { 0x045c, 0x66045c },
232 { 0x0460, 0x660460 },
233 { 0x0468, 0x660468 },
234 { 0x046c, 0x66046c },
235 { 0x0470, 0x660470 },
236 { 0x0474, 0x660474 },
237 { 0x0480, 0x660480 },
238 { 0x0484, 0x660484 },
239 { 0x048c, 0x66048c },
240 { 0x0490, 0x660490 },
241 { 0x0494, 0x660494 },
242 { 0x0498, 0x660498 },
243 { 0x04b0, 0x6604b0 },
244 { 0x04b8, 0x6604b8 },
245 { 0x04bc, 0x6604bc },
246 { 0x04c0, 0x6604c0 },
247 { 0x04c4, 0x6604c4 },
248 { 0x04c8, 0x6604c8 },
249 { 0x04d0, 0x6604d0 },
250 { 0x04d4, 0x6604d4 },
251 { 0x04e0, 0x6604e0 },
252 { 0x04e4, 0x6604e4 },
253 { 0x04e8, 0x6604e8 },
254 { 0x04ec, 0x6604ec },
255 { 0x04f0, 0x6604f0 },
256 { 0x04f4, 0x6604f4 },
257 { 0x04f8, 0x6604f8 },
258 { 0x04fc, 0x6604fc },
259 { 0x0500, 0x660500 },
260 { 0x0504, 0x660504 },
261 { 0x0508, 0x660508 },
262 { 0x050c, 0x66050c },
263 { 0x0510, 0x660510 },
264 { 0x0514, 0x660514 },
265 { 0x0518, 0x660518 },
266 { 0x051c, 0x66051c },
267 { 0x052c, 0x66052c },
268 { 0x0530, 0x660530 },
269 { 0x054c, 0x66054c },
270 { 0x0550, 0x660550 },
271 { 0x0554, 0x660554 },
272 { 0x0558, 0x660558 },
273 { 0x055c, 0x66055c },
274 {}
275 }
276};
277
278static const struct nv50_disp_mthd_chan
279gf110_disp_core_mthd_chan = {
280 .name = "Core",
281 .addr = 0x000000,
282 .data = {
283 { "Global", 1, &gf110_disp_core_mthd_base },
284 { "DAC", 3, &gf110_disp_core_mthd_dac },
285 { "SOR", 8, &gf110_disp_core_mthd_sor },
286 { "PIOR", 4, &gf110_disp_core_mthd_pior },
287 { "HEAD", 4, &gf110_disp_core_mthd_head },
288 {}
289 }
290};
291
292static int
293gf110_disp_core_init(struct nvkm_object *object)
294{
295 struct nv50_disp_priv *priv = (void *)object->engine;
296 struct nv50_disp_dmac *mast = (void *)object;
297 int ret;
298
299 ret = nv50_disp_chan_init(&mast->base);
300 if (ret)
301 return ret;
302
303 /* enable error reporting */
304 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
305
306 /* initialise channel for dma command submission */
307 nv_wr32(priv, 0x610494, mast->push);
308 nv_wr32(priv, 0x610498, 0x00010000);
309 nv_wr32(priv, 0x61049c, 0x00000001);
310 nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
311 nv_wr32(priv, 0x640000, 0x00000000);
312 nv_wr32(priv, 0x610490, 0x01000013);
313
314 /* wait for it to go inactive */
315 if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
316 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
317 return -EBUSY;
318 }
319
320 return 0;
321}
322
323static int
324gf110_disp_core_fini(struct nvkm_object *object, bool suspend)
325{
326 struct nv50_disp_priv *priv = (void *)object->engine;
327 struct nv50_disp_dmac *mast = (void *)object;
328
329 /* deactivate channel */
330 nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
331 nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
332 if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
333 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
334 if (suspend)
335 return -EBUSY;
336 }
337
338 /* disable error reporting and completion notification */
339 nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
340 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
341
342 return nv50_disp_chan_fini(&mast->base, suspend);
343}
344
345struct nv50_disp_chan_impl
346gf110_disp_core_ofuncs = {
347 .base.ctor = nv50_disp_core_ctor,
348 .base.dtor = nv50_disp_dmac_dtor,
349 .base.init = gf110_disp_core_init,
350 .base.fini = gf110_disp_core_fini,
351 .base.ntfy = nv50_disp_chan_ntfy,
352 .base.map = nv50_disp_chan_map,
353 .base.rd32 = nv50_disp_chan_rd32,
354 .base.wr32 = nv50_disp_chan_wr32,
355 .chid = 0,
356 .attach = gf110_disp_dmac_object_attach,
357 .detach = gf110_disp_dmac_object_detach,
358};
359
360/*******************************************************************************
361 * EVO sync channel objects
362 ******************************************************************************/
363
364static const struct nv50_disp_mthd_list
365gf110_disp_base_mthd_base = {
366 .mthd = 0x0000,
367 .addr = 0x000000,
368 .data = {
369 { 0x0080, 0x661080 },
370 { 0x0084, 0x661084 },
371 { 0x0088, 0x661088 },
372 { 0x008c, 0x66108c },
373 { 0x0090, 0x661090 },
374 { 0x0094, 0x661094 },
375 { 0x00a0, 0x6610a0 },
376 { 0x00a4, 0x6610a4 },
377 { 0x00c0, 0x6610c0 },
378 { 0x00c4, 0x6610c4 },
379 { 0x00c8, 0x6610c8 },
380 { 0x00cc, 0x6610cc },
381 { 0x00e0, 0x6610e0 },
382 { 0x00e4, 0x6610e4 },
383 { 0x00e8, 0x6610e8 },
384 { 0x00ec, 0x6610ec },
385 { 0x00fc, 0x6610fc },
386 { 0x0100, 0x661100 },
387 { 0x0104, 0x661104 },
388 { 0x0108, 0x661108 },
389 { 0x010c, 0x66110c },
390 { 0x0110, 0x661110 },
391 { 0x0114, 0x661114 },
392 { 0x0118, 0x661118 },
393 { 0x011c, 0x66111c },
394 { 0x0130, 0x661130 },
395 { 0x0134, 0x661134 },
396 { 0x0138, 0x661138 },
397 { 0x013c, 0x66113c },
398 { 0x0140, 0x661140 },
399 { 0x0144, 0x661144 },
400 { 0x0148, 0x661148 },
401 { 0x014c, 0x66114c },
402 { 0x0150, 0x661150 },
403 { 0x0154, 0x661154 },
404 { 0x0158, 0x661158 },
405 { 0x015c, 0x66115c },
406 { 0x0160, 0x661160 },
407 { 0x0164, 0x661164 },
408 { 0x0168, 0x661168 },
409 { 0x016c, 0x66116c },
410 {}
411 }
412};
413
414static const struct nv50_disp_mthd_list
415gf110_disp_base_mthd_image = {
416 .mthd = 0x0020,
417 .addr = 0x000020,
418 .data = {
419 { 0x0400, 0x661400 },
420 { 0x0404, 0x661404 },
421 { 0x0408, 0x661408 },
422 { 0x040c, 0x66140c },
423 { 0x0410, 0x661410 },
424 {}
425 }
426};
427
428const struct nv50_disp_mthd_chan
429gf110_disp_base_mthd_chan = {
430 .name = "Base",
431 .addr = 0x001000,
432 .data = {
433 { "Global", 1, &gf110_disp_base_mthd_base },
434 { "Image", 2, &gf110_disp_base_mthd_image },
435 {}
436 }
437};
438
439struct nv50_disp_chan_impl
440gf110_disp_base_ofuncs = {
441 .base.ctor = nv50_disp_base_ctor,
442 .base.dtor = nv50_disp_dmac_dtor,
443 .base.init = gf110_disp_dmac_init,
444 .base.fini = gf110_disp_dmac_fini,
445 .base.ntfy = nv50_disp_chan_ntfy,
446 .base.map = nv50_disp_chan_map,
447 .base.rd32 = nv50_disp_chan_rd32,
448 .base.wr32 = nv50_disp_chan_wr32,
449 .chid = 1,
450 .attach = gf110_disp_dmac_object_attach,
451 .detach = gf110_disp_dmac_object_detach,
452};
453
454/*******************************************************************************
455 * EVO overlay channel objects
456 ******************************************************************************/
457
458static const struct nv50_disp_mthd_list
459gf110_disp_ovly_mthd_base = {
460 .mthd = 0x0000,
461 .data = {
462 { 0x0080, 0x665080 },
463 { 0x0084, 0x665084 },
464 { 0x0088, 0x665088 },
465 { 0x008c, 0x66508c },
466 { 0x0090, 0x665090 },
467 { 0x0094, 0x665094 },
468 { 0x00a0, 0x6650a0 },
469 { 0x00a4, 0x6650a4 },
470 { 0x00b0, 0x6650b0 },
471 { 0x00b4, 0x6650b4 },
472 { 0x00b8, 0x6650b8 },
473 { 0x00c0, 0x6650c0 },
474 { 0x00e0, 0x6650e0 },
475 { 0x00e4, 0x6650e4 },
476 { 0x00e8, 0x6650e8 },
477 { 0x0100, 0x665100 },
478 { 0x0104, 0x665104 },
479 { 0x0108, 0x665108 },
480 { 0x010c, 0x66510c },
481 { 0x0110, 0x665110 },
482 { 0x0118, 0x665118 },
483 { 0x011c, 0x66511c },
484 { 0x0120, 0x665120 },
485 { 0x0124, 0x665124 },
486 { 0x0130, 0x665130 },
487 { 0x0134, 0x665134 },
488 { 0x0138, 0x665138 },
489 { 0x013c, 0x66513c },
490 { 0x0140, 0x665140 },
491 { 0x0144, 0x665144 },
492 { 0x0148, 0x665148 },
493 { 0x014c, 0x66514c },
494 { 0x0150, 0x665150 },
495 { 0x0154, 0x665154 },
496 { 0x0158, 0x665158 },
497 { 0x015c, 0x66515c },
498 { 0x0160, 0x665160 },
499 { 0x0164, 0x665164 },
500 { 0x0168, 0x665168 },
501 { 0x016c, 0x66516c },
502 { 0x0400, 0x665400 },
503 { 0x0408, 0x665408 },
504 { 0x040c, 0x66540c },
505 { 0x0410, 0x665410 },
506 {}
507 }
508};
509
510static const struct nv50_disp_mthd_chan
511gf110_disp_ovly_mthd_chan = {
512 .name = "Overlay",
513 .addr = 0x001000,
514 .data = {
515 { "Global", 1, &gf110_disp_ovly_mthd_base },
516 {}
517 }
518};
519
520struct nv50_disp_chan_impl
521gf110_disp_ovly_ofuncs = {
522 .base.ctor = nv50_disp_ovly_ctor,
523 .base.dtor = nv50_disp_dmac_dtor,
524 .base.init = gf110_disp_dmac_init,
525 .base.fini = gf110_disp_dmac_fini,
526 .base.ntfy = nv50_disp_chan_ntfy,
527 .base.map = nv50_disp_chan_map,
528 .base.rd32 = nv50_disp_chan_rd32,
529 .base.wr32 = nv50_disp_chan_wr32,
530 .chid = 5,
531 .attach = gf110_disp_dmac_object_attach,
532 .detach = gf110_disp_dmac_object_detach,
533};
534
535/*******************************************************************************
536 * EVO PIO channel base class
537 ******************************************************************************/
538
539static int
540gf110_disp_pioc_init(struct nvkm_object *object)
541{
542 struct nv50_disp_priv *priv = (void *)object->engine;
543 struct nv50_disp_pioc *pioc = (void *)object;
544 int chid = pioc->base.chid;
545 int ret;
546
547 ret = nv50_disp_chan_init(&pioc->base);
548 if (ret)
549 return ret;
550
551 /* enable error reporting */
552 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
553
554 /* activate channel */
555 nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
556 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
557 nv_error(pioc, "init: 0x%08x\n",
558 nv_rd32(priv, 0x610490 + (chid * 0x10)));
559 return -EBUSY;
560 }
561
562 return 0;
563}
564
565static int
566gf110_disp_pioc_fini(struct nvkm_object *object, bool suspend)
567{
568 struct nv50_disp_priv *priv = (void *)object->engine;
569 struct nv50_disp_pioc *pioc = (void *)object;
570 int chid = pioc->base.chid;
571
572 nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
573 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
574 nv_error(pioc, "timeout: 0x%08x\n",
575 nv_rd32(priv, 0x610490 + (chid * 0x10)));
576 if (suspend)
577 return -EBUSY;
578 }
579
580 /* disable error reporting and completion notification */
581 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
582 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
583
584 return nv50_disp_chan_fini(&pioc->base, suspend);
585}
586
587/*******************************************************************************
588 * EVO immediate overlay channel objects
589 ******************************************************************************/
590
591struct nv50_disp_chan_impl
592gf110_disp_oimm_ofuncs = {
593 .base.ctor = nv50_disp_oimm_ctor,
594 .base.dtor = nv50_disp_pioc_dtor,
595 .base.init = gf110_disp_pioc_init,
596 .base.fini = gf110_disp_pioc_fini,
597 .base.ntfy = nv50_disp_chan_ntfy,
598 .base.map = nv50_disp_chan_map,
599 .base.rd32 = nv50_disp_chan_rd32,
600 .base.wr32 = nv50_disp_chan_wr32,
601 .chid = 9,
602};
603
604/*******************************************************************************
605 * EVO cursor channel objects
606 ******************************************************************************/
607
608struct nv50_disp_chan_impl
609gf110_disp_curs_ofuncs = {
610 .base.ctor = nv50_disp_curs_ctor,
611 .base.dtor = nv50_disp_pioc_dtor,
612 .base.init = gf110_disp_pioc_init,
613 .base.fini = gf110_disp_pioc_fini,
614 .base.ntfy = nv50_disp_chan_ntfy,
615 .base.map = nv50_disp_chan_map,
616 .base.rd32 = nv50_disp_chan_rd32,
617 .base.wr32 = nv50_disp_chan_wr32,
618 .chid = 13,
619};
620
621/*******************************************************************************
622 * Base display object
623 ******************************************************************************/
624
625int
626gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
627{
628 const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
629 const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
630 const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
631 union {
632 struct nv04_disp_scanoutpos_v0 v0;
633 } *args = data;
634 int ret;
635
636 nv_ioctl(object, "disp scanoutpos size %d\n", size);
637 if (nvif_unpack(args->v0, 0, 0, false)) {
638 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
639 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
640 args->v0.hblanke = (blanke & 0x0000ffff);
641 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
642 args->v0.hblanks = (blanks & 0x0000ffff);
643 args->v0.vtotal = ( total & 0xffff0000) >> 16;
644 args->v0.htotal = ( total & 0x0000ffff);
645 args->v0.time[0] = ktime_to_ns(ktime_get());
646 args->v0.vline = /* vline read locks hline */
647 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
648 args->v0.time[1] = ktime_to_ns(ktime_get());
649 args->v0.hline =
650 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
651 } else
652 return ret;
653
654 return 0;
655}
656
657static int
658gf110_disp_main_init(struct nvkm_object *object)
659{
660 struct nv50_disp_priv *priv = (void *)object->engine;
661 struct nv50_disp_base *base = (void *)object;
662 int ret, i;
663 u32 tmp;
664
665 ret = nvkm_parent_init(&base->base);
666 if (ret)
667 return ret;
668
669 /* The below segments of code copying values from one register to
670 * another appear to inform EVO of the display capabilities or
671 * something similar.
672 */
673
674 /* ... CRTC caps */
675 for (i = 0; i < priv->head.nr; i++) {
676 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
677 nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
678 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
679 nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
680 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
681 nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
682 }
683
684 /* ... DAC caps */
685 for (i = 0; i < priv->dac.nr; i++) {
686 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
687 nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
688 }
689
690 /* ... SOR caps */
691 for (i = 0; i < priv->sor.nr; i++) {
692 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
693 nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
694 }
695
696 /* steal display away from vbios, or something like that */
697 if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
698 nv_wr32(priv, 0x6100ac, 0x00000100);
699 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
700 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
701 nv_error(priv, "timeout acquiring display\n");
702 return -EBUSY;
703 }
704 }
705
706 /* point at display engine memory area (hash table, objects) */
707 nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
708
709 /* enable supervisor interrupts, disable everything else */
710 nv_wr32(priv, 0x610090, 0x00000000);
711 nv_wr32(priv, 0x6100a0, 0x00000000);
712 nv_wr32(priv, 0x6100b0, 0x00000307);
713
714 /* disable underflow reporting, preventing an intermittent issue
715 * on some gk104 boards where the production vbios left this
716 * setting enabled by default.
717 *
718 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
719 */
720 for (i = 0; i < priv->head.nr; i++)
721 nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
722
723 return 0;
724}
725
726static int
727gf110_disp_main_fini(struct nvkm_object *object, bool suspend)
728{
729 struct nv50_disp_priv *priv = (void *)object->engine;
730 struct nv50_disp_base *base = (void *)object;
731
732 /* disable all interrupts */
733 nv_wr32(priv, 0x6100b0, 0x00000000);
734
735 return nvkm_parent_fini(&base->base, suspend);
736}
737
738struct nvkm_ofuncs
739gf110_disp_main_ofuncs = {
740 .ctor = nv50_disp_main_ctor,
741 .dtor = nv50_disp_main_dtor,
742 .init = gf110_disp_main_init,
743 .fini = gf110_disp_main_fini,
744 .mthd = nv50_disp_main_mthd,
745 .ntfy = nvkm_disp_ntfy,
746};
747
748static struct nvkm_oclass
749gf110_disp_main_oclass[] = {
750 { GF110_DISP, &gf110_disp_main_ofuncs },
751 {}
752};
753
754static struct nvkm_oclass
755gf110_disp_sclass[] = {
756 { GF110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
757 { GF110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
758 { GF110_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
759 { GF110_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
760 { GF110_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
761 {}
762};
763
764/*******************************************************************************
765 * Display engine implementation
766 ******************************************************************************/
767
768static void
769gf110_disp_vblank_init(struct nvkm_event *event, int type, int head)
770{
771 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
772 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
773}
774
775static void
776gf110_disp_vblank_fini(struct nvkm_event *event, int type, int head)
777{
778 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
779 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
780}
781
782const struct nvkm_event_func
783gf110_disp_vblank_func = {
784 .ctor = nvkm_disp_vblank_ctor,
785 .init = gf110_disp_vblank_init,
786 .fini = gf110_disp_vblank_fini,
787};
788
789static struct nvkm_output *
790exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
791 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
792 struct nvbios_outp *info)
793{
794 struct nvkm_bios *bios = nvkm_bios(priv);
795 struct nvkm_output *outp;
796 u16 mask, type;
797
798 if (or < 4) {
799 type = DCB_OUTPUT_ANALOG;
800 mask = 0;
801 } else {
802 or -= 4;
803 switch (ctrl & 0x00000f00) {
804 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
805 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
806 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
807 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
808 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
810 default:
811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
812 return NULL;
813 }
814 }
815
816 mask = 0x00c0 & (mask << 6);
817 mask |= 0x0001 << or;
818 mask |= 0x0100 << head;
819
820 list_for_each_entry(outp, &priv->base.outp, head) {
821 if ((outp->info.hasht & 0xff) == type &&
822 (outp->info.hashm & mask) == mask) {
823 *data = nvbios_outp_match(bios, outp->info.hasht,
824 outp->info.hashm,
825 ver, hdr, cnt, len, info);
826 if (!*data)
827 return NULL;
828 return outp;
829 }
830 }
831
832 return NULL;
833}
834
835static struct nvkm_output *
836exec_script(struct nv50_disp_priv *priv, int head, int id)
837{
838 struct nvkm_bios *bios = nvkm_bios(priv);
839 struct nvkm_output *outp;
840 struct nvbios_outp info;
841 u8 ver, hdr, cnt, len;
842 u32 data, ctrl = 0;
843 int or;
844
845 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
846 ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
847 if (ctrl & (1 << head))
848 break;
849 }
850
851 if (or == 8)
852 return NULL;
853
854 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
855 if (outp) {
856 struct nvbios_init init = {
857 .subdev = nv_subdev(priv),
858 .bios = bios,
859 .offset = info.script[id],
860 .outp = &outp->info,
861 .crtc = head,
862 .execute = 1,
863 };
864
865 nvbios_exec(&init);
866 }
867
868 return outp;
869}
870
871static struct nvkm_output *
872exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
873{
874 struct nvkm_bios *bios = nvkm_bios(priv);
875 struct nvkm_output *outp;
876 struct nvbios_outp info1;
877 struct nvbios_ocfg info2;
878 u8 ver, hdr, cnt, len;
879 u32 data, ctrl = 0;
880 int or;
881
882 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
883 ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
884 if (ctrl & (1 << head))
885 break;
886 }
887
888 if (or == 8)
889 return NULL;
890
891 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
892 if (!outp)
893 return NULL;
894
895 switch (outp->info.type) {
896 case DCB_OUTPUT_TMDS:
897 *conf = (ctrl & 0x00000f00) >> 8;
898 if (pclk >= 165000)
899 *conf |= 0x0100;
900 break;
901 case DCB_OUTPUT_LVDS:
902 *conf = priv->sor.lvdsconf;
903 break;
904 case DCB_OUTPUT_DP:
905 *conf = (ctrl & 0x00000f00) >> 8;
906 break;
907 case DCB_OUTPUT_ANALOG:
908 default:
909 *conf = 0x00ff;
910 break;
911 }
912
913 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
914 if (data && id < 0xff) {
915 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
916 if (data) {
917 struct nvbios_init init = {
918 .subdev = nv_subdev(priv),
919 .bios = bios,
920 .offset = data,
921 .outp = &outp->info,
922 .crtc = head,
923 .execute = 1,
924 };
925
926 nvbios_exec(&init);
927 }
928 }
929
930 return outp;
931}
932
933static void
934gf110_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
935{
936 exec_script(priv, head, 1);
937}
938
939static void
940gf110_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
941{
942 struct nvkm_output *outp = exec_script(priv, head, 2);
943
944 /* see note in nv50_disp_intr_unk20_0() */
945 if (outp && outp->info.type == DCB_OUTPUT_DP) {
946 struct nvkm_output_dp *outpdp = (void *)outp;
947 struct nvbios_init init = {
948 .subdev = nv_subdev(priv),
949 .bios = nvkm_bios(priv),
950 .outp = &outp->info,
951 .crtc = head,
952 .offset = outpdp->info.script[4],
953 .execute = 1,
954 };
955
956 nvbios_exec(&init);
957 atomic_set(&outpdp->lt.done, 0);
958 }
959}
960
961static void
962gf110_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
963{
964 struct nvkm_devinit *devinit = nvkm_devinit(priv);
965 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
966 if (pclk)
967 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
968 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
969}
970
971static void
972gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
973 struct dcb_output *outp)
974{
975 const int or = ffs(outp->or) - 1;
976 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
977 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
978 const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
979 const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
980 const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
981 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
982 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
983 const u32 hoff = (head * 0x800);
984 const u32 soff = ( or * 0x800);
985 const u32 loff = (link * 0x080) + soff;
986 const u32 symbol = 100000;
987 const u32 TU = 64;
988 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
989 u32 clksor = nv_rd32(priv, 0x612300 + soff);
990 u32 datarate, link_nr, link_bw, bits;
991 u64 ratio, value;
992
993 link_nr = hweight32(dpctrl & 0x000f0000);
994 link_bw = (clksor & 0x007c0000) >> 18;
995 link_bw *= 27000;
996
997 /* symbols/hblank - algorithm taken from comments in tegra driver */
998 value = vblanke + vactive - vblanks - 7;
999 value = value * link_bw;
1000 do_div(value, pclk);
1001 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
1002 nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
1003
1004 /* symbols/vblank - algorithm taken from comments in tegra driver */
1005 value = vblanks - vblanke - 25;
1006 value = value * link_bw;
1007 do_div(value, pclk);
1008 value = value - ((36 / link_nr) + 3) - 1;
1009 nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
1010
1011 /* watermark */
1012 if ((conf & 0x3c0) == 0x180) bits = 30;
1013 else if ((conf & 0x3c0) == 0x140) bits = 24;
1014 else bits = 18;
1015 datarate = (pclk * bits) / 8;
1016
1017 ratio = datarate;
1018 ratio *= symbol;
1019 do_div(ratio, link_nr * link_bw);
1020
1021 value = (symbol - ratio) * TU;
1022 value *= ratio;
1023 do_div(value, symbol);
1024 do_div(value, symbol);
1025
1026 value += 5;
1027 value |= 0x08000000;
1028
1029 nv_wr32(priv, 0x616610 + hoff, value);
1030}
1031
1032static void
1033gf110_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1034{
1035 struct nvkm_output *outp;
1036 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1037 u32 conf, addr, data;
1038
1039 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
1040 if (!outp)
1041 return;
1042
1043 /* see note in nv50_disp_intr_unk20_2() */
1044 if (outp->info.type == DCB_OUTPUT_DP) {
1045 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1046 switch ((sync & 0x000003c0) >> 6) {
1047 case 6: pclk = pclk * 30; break;
1048 case 5: pclk = pclk * 24; break;
1049 case 2:
1050 default:
1051 pclk = pclk * 18;
1052 break;
1053 }
1054
1055 if (nvkm_output_dp_train(outp, pclk, true))
1056 ERR("link not trained before attach\n");
1057 } else {
1058 if (priv->sor.magic)
1059 priv->sor.magic(outp);
1060 }
1061
1062 exec_clkcmp(priv, head, 0, pclk, &conf);
1063
1064 if (outp->info.type == DCB_OUTPUT_ANALOG) {
1065 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
1066 data = 0x00000000;
1067 } else {
1068 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
1069 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1070 switch (outp->info.type) {
1071 case DCB_OUTPUT_TMDS:
1072 nv_mask(priv, addr, 0x007c0000, 0x00280000);
1073 break;
1074 case DCB_OUTPUT_DP:
1075 gf110_disp_intr_unk2_2_tu(priv, head, &outp->info);
1076 break;
1077 default:
1078 break;
1079 }
1080 }
1081
1082 nv_mask(priv, addr, 0x00000707, data);
1083}
1084
1085static void
1086gf110_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
1087{
1088 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1089 u32 conf;
1090
1091 exec_clkcmp(priv, head, 1, pclk, &conf);
1092}
1093
1094void
1095gf110_disp_intr_supervisor(struct work_struct *work)
1096{
1097 struct nv50_disp_priv *priv =
1098 container_of(work, struct nv50_disp_priv, supervisor);
1099 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1100 u32 mask[4];
1101 int head;
1102
1103 nv_debug(priv, "supervisor %d\n", ffs(priv->super));
1104 for (head = 0; head < priv->head.nr; head++) {
1105 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
1106 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
1107 }
1108
1109 if (priv->super & 0x00000001) {
1110 nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
1111 for (head = 0; head < priv->head.nr; head++) {
1112 if (!(mask[head] & 0x00001000))
1113 continue;
1114 nv_debug(priv, "supervisor 1.0 - head %d\n", head);
1115 gf110_disp_intr_unk1_0(priv, head);
1116 }
1117 } else
1118 if (priv->super & 0x00000002) {
1119 for (head = 0; head < priv->head.nr; head++) {
1120 if (!(mask[head] & 0x00001000))
1121 continue;
1122 nv_debug(priv, "supervisor 2.0 - head %d\n", head);
1123 gf110_disp_intr_unk2_0(priv, head);
1124 }
1125 for (head = 0; head < priv->head.nr; head++) {
1126 if (!(mask[head] & 0x00010000))
1127 continue;
1128 nv_debug(priv, "supervisor 2.1 - head %d\n", head);
1129 gf110_disp_intr_unk2_1(priv, head);
1130 }
1131 for (head = 0; head < priv->head.nr; head++) {
1132 if (!(mask[head] & 0x00001000))
1133 continue;
1134 nv_debug(priv, "supervisor 2.2 - head %d\n", head);
1135 gf110_disp_intr_unk2_2(priv, head);
1136 }
1137 } else
1138 if (priv->super & 0x00000004) {
1139 for (head = 0; head < priv->head.nr; head++) {
1140 if (!(mask[head] & 0x00001000))
1141 continue;
1142 nv_debug(priv, "supervisor 3.0 - head %d\n", head);
1143 gf110_disp_intr_unk4_0(priv, head);
1144 }
1145 }
1146
1147 for (head = 0; head < priv->head.nr; head++)
1148 nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
1149 nv_wr32(priv, 0x6101d0, 0x80000000);
1150}
1151
1152static void
1153gf110_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1154{
1155 const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1156 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
1157 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
1158 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
1159
1160 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
1161 "0x%08x 0x%08x\n",
1162 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1163
1164 if (chid == 0) {
1165 switch (mthd & 0xffc) {
1166 case 0x0080:
1167 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1168 impl->mthd.core);
1169 break;
1170 default:
1171 break;
1172 }
1173 } else
1174 if (chid <= 4) {
1175 switch (mthd & 0xffc) {
1176 case 0x0080:
1177 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1178 impl->mthd.base);
1179 break;
1180 default:
1181 break;
1182 }
1183 } else
1184 if (chid <= 8) {
1185 switch (mthd & 0xffc) {
1186 case 0x0080:
1187 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
1188 impl->mthd.ovly);
1189 break;
1190 default:
1191 break;
1192 }
1193 }
1194
1195 nv_wr32(priv, 0x61009c, (1 << chid));
1196 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
1197}
1198
1199void
1200gf110_disp_intr(struct nvkm_subdev *subdev)
1201{
1202 struct nv50_disp_priv *priv = (void *)subdev;
1203 u32 intr = nv_rd32(priv, 0x610088);
1204 int i;
1205
1206 if (intr & 0x00000001) {
1207 u32 stat = nv_rd32(priv, 0x61008c);
1208 while (stat) {
1209 int chid = __ffs(stat); stat &= ~(1 << chid);
1210 nv50_disp_chan_uevent_send(priv, chid);
1211 nv_wr32(priv, 0x61008c, 1 << chid);
1212 }
1213 intr &= ~0x00000001;
1214 }
1215
1216 if (intr & 0x00000002) {
1217 u32 stat = nv_rd32(priv, 0x61009c);
1218 int chid = ffs(stat) - 1;
1219 if (chid >= 0)
1220 gf110_disp_intr_error(priv, chid);
1221 intr &= ~0x00000002;
1222 }
1223
1224 if (intr & 0x00100000) {
1225 u32 stat = nv_rd32(priv, 0x6100ac);
1226 if (stat & 0x00000007) {
1227 priv->super = (stat & 0x00000007);
1228 schedule_work(&priv->supervisor);
1229 nv_wr32(priv, 0x6100ac, priv->super);
1230 stat &= ~0x00000007;
1231 }
1232
1233 if (stat) {
1234 nv_info(priv, "unknown intr24 0x%08x\n", stat);
1235 nv_wr32(priv, 0x6100ac, stat);
1236 }
1237
1238 intr &= ~0x00100000;
1239 }
1240
1241 for (i = 0; i < priv->head.nr; i++) {
1242 u32 mask = 0x01000000 << i;
1243 if (mask & intr) {
1244 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
1245 if (stat & 0x00000001)
1246 nvkm_disp_vblank(&priv->base, i);
1247 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
1248 nv_rd32(priv, 0x6100c0 + (i * 0x800));
1249 }
1250 }
1251}
1252
1253static int
1254gf110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1255 struct nvkm_oclass *oclass, void *data, u32 size,
1256 struct nvkm_object **pobject)
1257{
1258 struct nv50_disp_priv *priv;
1259 int heads = nv_rd32(parent, 0x022448);
1260 int ret;
1261
1262 ret = nvkm_disp_create(parent, engine, oclass, heads,
1263 "PDISP", "display", &priv);
1264 *pobject = nv_object(priv);
1265 if (ret)
1266 return ret;
1267
1268 ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
1269 if (ret)
1270 return ret;
1271
1272 nv_engine(priv)->sclass = gf110_disp_main_oclass;
1273 nv_engine(priv)->cclass = &nv50_disp_cclass;
1274 nv_subdev(priv)->intr = gf110_disp_intr;
1275 INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
1276 priv->sclass = gf110_disp_sclass;
1277 priv->head.nr = heads;
1278 priv->dac.nr = 3;
1279 priv->sor.nr = 4;
1280 priv->dac.power = nv50_dac_power;
1281 priv->dac.sense = nv50_dac_sense;
1282 priv->sor.power = nv50_sor_power;
1283 priv->sor.hda_eld = gf110_hda_eld;
1284 priv->sor.hdmi = gf110_hdmi_ctrl;
1285 return 0;
1286}
1287
1288struct nvkm_oclass *
1289gf110_disp_outp_sclass[] = {
1290 &gf110_sor_dp_impl.base.base,
1291 NULL
1292};
1293
1294struct nvkm_oclass *
1295gf110_disp_oclass = &(struct nv50_disp_impl) {
1296 .base.base.handle = NV_ENGINE(DISP, 0x90),
1297 .base.base.ofuncs = &(struct nvkm_ofuncs) {
1298 .ctor = gf110_disp_ctor,
1299 .dtor = _nvkm_disp_dtor,
1300 .init = _nvkm_disp_init,
1301 .fini = _nvkm_disp_fini,
1302 },
1303 .base.vblank = &gf110_disp_vblank_func,
1304 .base.outp = gf110_disp_outp_sclass,
1305 .mthd.core = &gf110_disp_core_mthd_chan,
1306 .mthd.base = &gf110_disp_base_mthd_chan,
1307 .mthd.ovly = &gf110_disp_ovly_mthd_chan,
1308 .mthd.prev = -0x020000,
1309 .head.scanoutpos = gf110_disp_main_scanoutpos,
1310}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
new file mode 100644
index 000000000000..186fd3ac78f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -0,0 +1,536 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25#include "rootnv50.h"
26
27#include <subdev/bios.h>
28#include <subdev/bios/disp.h>
29#include <subdev/bios/init.h>
30#include <subdev/bios/pll.h>
31#include <subdev/devinit.h>
32
33void
34gf119_disp_vblank_init(struct nv50_disp *disp, int head)
35{
36 struct nvkm_device *device = disp->base.engine.subdev.device;
37 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
38}
39
40void
41gf119_disp_vblank_fini(struct nv50_disp *disp, int head)
42{
43 struct nvkm_device *device = disp->base.engine.subdev.device;
44 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
45}
46
47static struct nvkm_output *
48exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
49 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
50 struct nvbios_outp *info)
51{
52 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
53 struct nvkm_bios *bios = subdev->device->bios;
54 struct nvkm_output *outp;
55 u16 mask, type;
56
57 if (or < 4) {
58 type = DCB_OUTPUT_ANALOG;
59 mask = 0;
60 } else {
61 or -= 4;
62 switch (ctrl & 0x00000f00) {
63 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
64 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
65 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
66 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
67 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
68 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
69 default:
70 nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
71 return NULL;
72 }
73 }
74
75 mask = 0x00c0 & (mask << 6);
76 mask |= 0x0001 << or;
77 mask |= 0x0100 << head;
78
79 list_for_each_entry(outp, &disp->base.outp, head) {
80 if ((outp->info.hasht & 0xff) == type &&
81 (outp->info.hashm & mask) == mask) {
82 *data = nvbios_outp_match(bios, outp->info.hasht,
83 outp->info.hashm,
84 ver, hdr, cnt, len, info);
85 if (!*data)
86 return NULL;
87 return outp;
88 }
89 }
90
91 return NULL;
92}
93
94static struct nvkm_output *
95exec_script(struct nv50_disp *disp, int head, int id)
96{
97 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
98 struct nvkm_device *device = subdev->device;
99 struct nvkm_bios *bios = device->bios;
100 struct nvkm_output *outp;
101 struct nvbios_outp info;
102 u8 ver, hdr, cnt, len;
103 u32 data, ctrl = 0;
104 int or;
105
106 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
107 ctrl = nvkm_rd32(device, 0x640180 + (or * 0x20));
108 if (ctrl & (1 << head))
109 break;
110 }
111
112 if (or == 8)
113 return NULL;
114
115 outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
116 if (outp) {
117 struct nvbios_init init = {
118 .subdev = subdev,
119 .bios = bios,
120 .offset = info.script[id],
121 .outp = &outp->info,
122 .crtc = head,
123 .execute = 1,
124 };
125
126 nvbios_exec(&init);
127 }
128
129 return outp;
130}
131
132static struct nvkm_output *
133exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
134{
135 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
136 struct nvkm_device *device = subdev->device;
137 struct nvkm_bios *bios = device->bios;
138 struct nvkm_output *outp;
139 struct nvbios_outp info1;
140 struct nvbios_ocfg info2;
141 u8 ver, hdr, cnt, len;
142 u32 data, ctrl = 0;
143 int or;
144
145 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
146 ctrl = nvkm_rd32(device, 0x660180 + (or * 0x20));
147 if (ctrl & (1 << head))
148 break;
149 }
150
151 if (or == 8)
152 return NULL;
153
154 outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
155 if (!outp)
156 return NULL;
157
158 switch (outp->info.type) {
159 case DCB_OUTPUT_TMDS:
160 *conf = (ctrl & 0x00000f00) >> 8;
161 if (pclk >= 165000)
162 *conf |= 0x0100;
163 break;
164 case DCB_OUTPUT_LVDS:
165 *conf = disp->sor.lvdsconf;
166 break;
167 case DCB_OUTPUT_DP:
168 *conf = (ctrl & 0x00000f00) >> 8;
169 break;
170 case DCB_OUTPUT_ANALOG:
171 default:
172 *conf = 0x00ff;
173 break;
174 }
175
176 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
177 if (data && id < 0xff) {
178 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
179 if (data) {
180 struct nvbios_init init = {
181 .subdev = subdev,
182 .bios = bios,
183 .offset = data,
184 .outp = &outp->info,
185 .crtc = head,
186 .execute = 1,
187 };
188
189 nvbios_exec(&init);
190 }
191 }
192
193 return outp;
194}
195
196static void
197gf119_disp_intr_unk1_0(struct nv50_disp *disp, int head)
198{
199 exec_script(disp, head, 1);
200}
201
202static void
203gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
204{
205 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
206 struct nvkm_output *outp = exec_script(disp, head, 2);
207
208 /* see note in nv50_disp_intr_unk20_0() */
209 if (outp && outp->info.type == DCB_OUTPUT_DP) {
210 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
211 struct nvbios_init init = {
212 .subdev = subdev,
213 .bios = subdev->device->bios,
214 .outp = &outp->info,
215 .crtc = head,
216 .offset = outpdp->info.script[4],
217 .execute = 1,
218 };
219
220 nvbios_exec(&init);
221 atomic_set(&outpdp->lt.done, 0);
222 }
223}
224
225static void
226gf119_disp_intr_unk2_1(struct nv50_disp *disp, int head)
227{
228 struct nvkm_device *device = disp->base.engine.subdev.device;
229 struct nvkm_devinit *devinit = device->devinit;
230 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
231 if (pclk)
232 nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
233 nvkm_wr32(device, 0x612200 + (head * 0x800), 0x00000000);
234}
235
236static void
237gf119_disp_intr_unk2_2_tu(struct nv50_disp *disp, int head,
238 struct dcb_output *outp)
239{
240 struct nvkm_device *device = disp->base.engine.subdev.device;
241 const int or = ffs(outp->or) - 1;
242 const u32 ctrl = nvkm_rd32(device, 0x660200 + (or * 0x020));
243 const u32 conf = nvkm_rd32(device, 0x660404 + (head * 0x300));
244 const s32 vactive = nvkm_rd32(device, 0x660414 + (head * 0x300)) & 0xffff;
245 const s32 vblanke = nvkm_rd32(device, 0x66041c + (head * 0x300)) & 0xffff;
246 const s32 vblanks = nvkm_rd32(device, 0x660420 + (head * 0x300)) & 0xffff;
247 const u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
248 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
249 const u32 hoff = (head * 0x800);
250 const u32 soff = ( or * 0x800);
251 const u32 loff = (link * 0x080) + soff;
252 const u32 symbol = 100000;
253 const u32 TU = 64;
254 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
255 u32 clksor = nvkm_rd32(device, 0x612300 + soff);
256 u32 datarate, link_nr, link_bw, bits;
257 u64 ratio, value;
258
259 link_nr = hweight32(dpctrl & 0x000f0000);
260 link_bw = (clksor & 0x007c0000) >> 18;
261 link_bw *= 27000;
262
263 /* symbols/hblank - algorithm taken from comments in tegra driver */
264 value = vblanke + vactive - vblanks - 7;
265 value = value * link_bw;
266 do_div(value, pclk);
267 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
268 nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, value);
269
270 /* symbols/vblank - algorithm taken from comments in tegra driver */
271 value = vblanks - vblanke - 25;
272 value = value * link_bw;
273 do_div(value, pclk);
274 value = value - ((36 / link_nr) + 3) - 1;
275 nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, value);
276
277 /* watermark */
278 if ((conf & 0x3c0) == 0x180) bits = 30;
279 else if ((conf & 0x3c0) == 0x140) bits = 24;
280 else bits = 18;
281 datarate = (pclk * bits) / 8;
282
283 ratio = datarate;
284 ratio *= symbol;
285 do_div(ratio, link_nr * link_bw);
286
287 value = (symbol - ratio) * TU;
288 value *= ratio;
289 do_div(value, symbol);
290 do_div(value, symbol);
291
292 value += 5;
293 value |= 0x08000000;
294
295 nvkm_wr32(device, 0x616610 + hoff, value);
296}
297
298static void
299gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
300{
301 struct nvkm_device *device = disp->base.engine.subdev.device;
302 struct nvkm_output *outp;
303 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
304 u32 conf, addr, data;
305
306 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
307 if (!outp)
308 return;
309
310 /* see note in nv50_disp_intr_unk20_2() */
311 if (outp->info.type == DCB_OUTPUT_DP) {
312 u32 sync = nvkm_rd32(device, 0x660404 + (head * 0x300));
313 switch ((sync & 0x000003c0) >> 6) {
314 case 6: pclk = pclk * 30; break;
315 case 5: pclk = pclk * 24; break;
316 case 2:
317 default:
318 pclk = pclk * 18;
319 break;
320 }
321
322 if (nvkm_output_dp_train(outp, pclk, true))
323 OUTP_ERR(outp, "link not trained before attach");
324 } else {
325 if (disp->func->sor.magic)
326 disp->func->sor.magic(outp);
327 }
328
329 exec_clkcmp(disp, head, 0, pclk, &conf);
330
331 if (outp->info.type == DCB_OUTPUT_ANALOG) {
332 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
333 data = 0x00000000;
334 } else {
335 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
336 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
337 switch (outp->info.type) {
338 case DCB_OUTPUT_TMDS:
339 nvkm_mask(device, addr, 0x007c0000, 0x00280000);
340 break;
341 case DCB_OUTPUT_DP:
342 gf119_disp_intr_unk2_2_tu(disp, head, &outp->info);
343 break;
344 default:
345 break;
346 }
347 }
348
349 nvkm_mask(device, addr, 0x00000707, data);
350}
351
352static void
353gf119_disp_intr_unk4_0(struct nv50_disp *disp, int head)
354{
355 struct nvkm_device *device = disp->base.engine.subdev.device;
356 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
357 u32 conf;
358
359 exec_clkcmp(disp, head, 1, pclk, &conf);
360}
361
362void
363gf119_disp_intr_supervisor(struct work_struct *work)
364{
365 struct nv50_disp *disp =
366 container_of(work, struct nv50_disp, supervisor);
367 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
368 struct nvkm_device *device = subdev->device;
369 u32 mask[4];
370 int head;
371
372 nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
373 for (head = 0; head < disp->base.head.nr; head++) {
374 mask[head] = nvkm_rd32(device, 0x6101d4 + (head * 0x800));
375 nvkm_debug(subdev, "head %d: %08x\n", head, mask[head]);
376 }
377
378 if (disp->super & 0x00000001) {
379 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
380 for (head = 0; head < disp->base.head.nr; head++) {
381 if (!(mask[head] & 0x00001000))
382 continue;
383 nvkm_debug(subdev, "supervisor 1.0 - head %d\n", head);
384 gf119_disp_intr_unk1_0(disp, head);
385 }
386 } else
387 if (disp->super & 0x00000002) {
388 for (head = 0; head < disp->base.head.nr; head++) {
389 if (!(mask[head] & 0x00001000))
390 continue;
391 nvkm_debug(subdev, "supervisor 2.0 - head %d\n", head);
392 gf119_disp_intr_unk2_0(disp, head);
393 }
394 for (head = 0; head < disp->base.head.nr; head++) {
395 if (!(mask[head] & 0x00010000))
396 continue;
397 nvkm_debug(subdev, "supervisor 2.1 - head %d\n", head);
398 gf119_disp_intr_unk2_1(disp, head);
399 }
400 for (head = 0; head < disp->base.head.nr; head++) {
401 if (!(mask[head] & 0x00001000))
402 continue;
403 nvkm_debug(subdev, "supervisor 2.2 - head %d\n", head);
404 gf119_disp_intr_unk2_2(disp, head);
405 }
406 } else
407 if (disp->super & 0x00000004) {
408 for (head = 0; head < disp->base.head.nr; head++) {
409 if (!(mask[head] & 0x00001000))
410 continue;
411 nvkm_debug(subdev, "supervisor 3.0 - head %d\n", head);
412 gf119_disp_intr_unk4_0(disp, head);
413 }
414 }
415
416 for (head = 0; head < disp->base.head.nr; head++)
417 nvkm_wr32(device, 0x6101d4 + (head * 0x800), 0x00000000);
418 nvkm_wr32(device, 0x6101d0, 0x80000000);
419}
420
421static void
422gf119_disp_intr_error(struct nv50_disp *disp, int chid)
423{
424 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
425 struct nvkm_device *device = subdev->device;
426 u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
427 u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
428 u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
429
430 nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
431 chid, (mthd & 0x0000ffc), data, mthd, unkn);
432
433 if (chid < ARRAY_SIZE(disp->chan)) {
434 switch (mthd & 0xffc) {
435 case 0x0080:
436 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
437 break;
438 default:
439 break;
440 }
441 }
442
443 nvkm_wr32(device, 0x61009c, (1 << chid));
444 nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
445}
446
447void
448gf119_disp_intr(struct nv50_disp *disp)
449{
450 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
451 struct nvkm_device *device = subdev->device;
452 u32 intr = nvkm_rd32(device, 0x610088);
453 int i;
454
455 if (intr & 0x00000001) {
456 u32 stat = nvkm_rd32(device, 0x61008c);
457 while (stat) {
458 int chid = __ffs(stat); stat &= ~(1 << chid);
459 nv50_disp_chan_uevent_send(disp, chid);
460 nvkm_wr32(device, 0x61008c, 1 << chid);
461 }
462 intr &= ~0x00000001;
463 }
464
465 if (intr & 0x00000002) {
466 u32 stat = nvkm_rd32(device, 0x61009c);
467 int chid = ffs(stat) - 1;
468 if (chid >= 0)
469 gf119_disp_intr_error(disp, chid);
470 intr &= ~0x00000002;
471 }
472
473 if (intr & 0x00100000) {
474 u32 stat = nvkm_rd32(device, 0x6100ac);
475 if (stat & 0x00000007) {
476 disp->super = (stat & 0x00000007);
477 schedule_work(&disp->supervisor);
478 nvkm_wr32(device, 0x6100ac, disp->super);
479 stat &= ~0x00000007;
480 }
481
482 if (stat) {
483 nvkm_warn(subdev, "intr24 %08x\n", stat);
484 nvkm_wr32(device, 0x6100ac, stat);
485 }
486
487 intr &= ~0x00100000;
488 }
489
490 for (i = 0; i < disp->base.head.nr; i++) {
491 u32 mask = 0x01000000 << i;
492 if (mask & intr) {
493 u32 stat = nvkm_rd32(device, 0x6100bc + (i * 0x800));
494 if (stat & 0x00000001)
495 nvkm_disp_vblank(&disp->base, i);
496 nvkm_mask(device, 0x6100bc + (i * 0x800), 0, 0);
497 nvkm_rd32(device, 0x6100c0 + (i * 0x800));
498 }
499 }
500}
501
502int
503gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
504 int index, struct nvkm_disp **pdisp)
505{
506 u32 heads = nvkm_rd32(device, 0x022448);
507 return nv50_disp_new_(func, device, index, heads, pdisp);
508}
509
510static const struct nv50_disp_func
511gf119_disp = {
512 .intr = gf119_disp_intr,
513 .uevent = &gf119_disp_chan_uevent,
514 .super = gf119_disp_intr_supervisor,
515 .root = &gf119_disp_root_oclass,
516 .head.vblank_init = gf119_disp_vblank_init,
517 .head.vblank_fini = gf119_disp_vblank_fini,
518 .head.scanoutpos = gf119_disp_root_scanoutpos,
519 .outp.internal.crt = nv50_dac_output_new,
520 .outp.internal.tmds = nv50_sor_output_new,
521 .outp.internal.lvds = nv50_sor_output_new,
522 .outp.internal.dp = gf119_sor_dp_new,
523 .dac.nr = 3,
524 .dac.power = nv50_dac_power,
525 .dac.sense = nv50_dac_sense,
526 .sor.nr = 4,
527 .sor.power = nv50_sor_power,
528 .sor.hda_eld = gf119_hda_eld,
529 .sor.hdmi = gf119_hdmi_ctrl,
530};
531
532int
533gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
534{
535 return gf119_disp_new_(&gf119_disp, device, index, pdisp);
536}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 6f4019ab4e65..a86384b8e388 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -22,247 +22,32 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25#include "rootnv50.h"
26#include <nvif/class.h> 26
27 27static const struct nv50_disp_func
28/******************************************************************************* 28gk104_disp = {
29 * EVO master channel object 29 .intr = gf119_disp_intr,
30 ******************************************************************************/ 30 .uevent = &gf119_disp_chan_uevent,
31 31 .super = gf119_disp_intr_supervisor,
32static const struct nv50_disp_mthd_list 32 .root = &gk104_disp_root_oclass,
33gk104_disp_core_mthd_head = { 33 .head.vblank_init = gf119_disp_vblank_init,
34 .mthd = 0x0300, 34 .head.vblank_fini = gf119_disp_vblank_fini,
35 .addr = 0x000300, 35 .head.scanoutpos = gf119_disp_root_scanoutpos,
36 .data = { 36 .outp.internal.crt = nv50_dac_output_new,
37 { 0x0400, 0x660400 }, 37 .outp.internal.tmds = nv50_sor_output_new,
38 { 0x0404, 0x660404 }, 38 .outp.internal.lvds = nv50_sor_output_new,
39 { 0x0408, 0x660408 }, 39 .outp.internal.dp = gf119_sor_dp_new,
40 { 0x040c, 0x66040c }, 40 .dac.nr = 3,
41 { 0x0410, 0x660410 }, 41 .dac.power = nv50_dac_power,
42 { 0x0414, 0x660414 }, 42 .dac.sense = nv50_dac_sense,
43 { 0x0418, 0x660418 }, 43 .sor.nr = 4,
44 { 0x041c, 0x66041c }, 44 .sor.power = nv50_sor_power,
45 { 0x0420, 0x660420 }, 45 .sor.hda_eld = gf119_hda_eld,
46 { 0x0424, 0x660424 }, 46 .sor.hdmi = gk104_hdmi_ctrl,
47 { 0x0428, 0x660428 },
48 { 0x042c, 0x66042c },
49 { 0x0430, 0x660430 },
50 { 0x0434, 0x660434 },
51 { 0x0438, 0x660438 },
52 { 0x0440, 0x660440 },
53 { 0x0444, 0x660444 },
54 { 0x0448, 0x660448 },
55 { 0x044c, 0x66044c },
56 { 0x0450, 0x660450 },
57 { 0x0454, 0x660454 },
58 { 0x0458, 0x660458 },
59 { 0x045c, 0x66045c },
60 { 0x0460, 0x660460 },
61 { 0x0468, 0x660468 },
62 { 0x046c, 0x66046c },
63 { 0x0470, 0x660470 },
64 { 0x0474, 0x660474 },
65 { 0x047c, 0x66047c },
66 { 0x0480, 0x660480 },
67 { 0x0484, 0x660484 },
68 { 0x0488, 0x660488 },
69 { 0x048c, 0x66048c },
70 { 0x0490, 0x660490 },
71 { 0x0494, 0x660494 },
72 { 0x0498, 0x660498 },
73 { 0x04a0, 0x6604a0 },
74 { 0x04b0, 0x6604b0 },
75 { 0x04b8, 0x6604b8 },
76 { 0x04bc, 0x6604bc },
77 { 0x04c0, 0x6604c0 },
78 { 0x04c4, 0x6604c4 },
79 { 0x04c8, 0x6604c8 },
80 { 0x04d0, 0x6604d0 },
81 { 0x04d4, 0x6604d4 },
82 { 0x04e0, 0x6604e0 },
83 { 0x04e4, 0x6604e4 },
84 { 0x04e8, 0x6604e8 },
85 { 0x04ec, 0x6604ec },
86 { 0x04f0, 0x6604f0 },
87 { 0x04f4, 0x6604f4 },
88 { 0x04f8, 0x6604f8 },
89 { 0x04fc, 0x6604fc },
90 { 0x0500, 0x660500 },
91 { 0x0504, 0x660504 },
92 { 0x0508, 0x660508 },
93 { 0x050c, 0x66050c },
94 { 0x0510, 0x660510 },
95 { 0x0514, 0x660514 },
96 { 0x0518, 0x660518 },
97 { 0x051c, 0x66051c },
98 { 0x0520, 0x660520 },
99 { 0x0524, 0x660524 },
100 { 0x052c, 0x66052c },
101 { 0x0530, 0x660530 },
102 { 0x054c, 0x66054c },
103 { 0x0550, 0x660550 },
104 { 0x0554, 0x660554 },
105 { 0x0558, 0x660558 },
106 { 0x055c, 0x66055c },
107 {}
108 }
109};
110
111const struct nv50_disp_mthd_chan
112gk104_disp_core_mthd_chan = {
113 .name = "Core",
114 .addr = 0x000000,
115 .data = {
116 { "Global", 1, &gf110_disp_core_mthd_base },
117 { "DAC", 3, &gf110_disp_core_mthd_dac },
118 { "SOR", 8, &gf110_disp_core_mthd_sor },
119 { "PIOR", 4, &gf110_disp_core_mthd_pior },
120 { "HEAD", 4, &gk104_disp_core_mthd_head },
121 {}
122 }
123};
124
125/*******************************************************************************
126 * EVO overlay channel objects
127 ******************************************************************************/
128
129static const struct nv50_disp_mthd_list
130gk104_disp_ovly_mthd_base = {
131 .mthd = 0x0000,
132 .data = {
133 { 0x0080, 0x665080 },
134 { 0x0084, 0x665084 },
135 { 0x0088, 0x665088 },
136 { 0x008c, 0x66508c },
137 { 0x0090, 0x665090 },
138 { 0x0094, 0x665094 },
139 { 0x00a0, 0x6650a0 },
140 { 0x00a4, 0x6650a4 },
141 { 0x00b0, 0x6650b0 },
142 { 0x00b4, 0x6650b4 },
143 { 0x00b8, 0x6650b8 },
144 { 0x00c0, 0x6650c0 },
145 { 0x00c4, 0x6650c4 },
146 { 0x00e0, 0x6650e0 },
147 { 0x00e4, 0x6650e4 },
148 { 0x00e8, 0x6650e8 },
149 { 0x0100, 0x665100 },
150 { 0x0104, 0x665104 },
151 { 0x0108, 0x665108 },
152 { 0x010c, 0x66510c },
153 { 0x0110, 0x665110 },
154 { 0x0118, 0x665118 },
155 { 0x011c, 0x66511c },
156 { 0x0120, 0x665120 },
157 { 0x0124, 0x665124 },
158 { 0x0130, 0x665130 },
159 { 0x0134, 0x665134 },
160 { 0x0138, 0x665138 },
161 { 0x013c, 0x66513c },
162 { 0x0140, 0x665140 },
163 { 0x0144, 0x665144 },
164 { 0x0148, 0x665148 },
165 { 0x014c, 0x66514c },
166 { 0x0150, 0x665150 },
167 { 0x0154, 0x665154 },
168 { 0x0158, 0x665158 },
169 { 0x015c, 0x66515c },
170 { 0x0160, 0x665160 },
171 { 0x0164, 0x665164 },
172 { 0x0168, 0x665168 },
173 { 0x016c, 0x66516c },
174 { 0x0400, 0x665400 },
175 { 0x0404, 0x665404 },
176 { 0x0408, 0x665408 },
177 { 0x040c, 0x66540c },
178 { 0x0410, 0x665410 },
179 {}
180 }
181};
182
183const struct nv50_disp_mthd_chan
184gk104_disp_ovly_mthd_chan = {
185 .name = "Overlay",
186 .addr = 0x001000,
187 .data = {
188 { "Global", 1, &gk104_disp_ovly_mthd_base },
189 {}
190 }
191}; 47};
192 48
193/******************************************************************************* 49int
194 * Base display object 50gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
195 ******************************************************************************/
196
197static struct nvkm_oclass
198gk104_disp_sclass[] = {
199 { GK104_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
200 { GK104_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
201 { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
202 { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
203 { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
204 {}
205};
206
207static struct nvkm_oclass
208gk104_disp_main_oclass[] = {
209 { GK104_DISP, &gf110_disp_main_ofuncs },
210 {}
211};
212
213/*******************************************************************************
214 * Display engine implementation
215 ******************************************************************************/
216
217static int
218gk104_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
219 struct nvkm_oclass *oclass, void *data, u32 size,
220 struct nvkm_object **pobject)
221{ 51{
222 struct nv50_disp_priv *priv; 52 return gf119_disp_new_(&gk104_disp, device, index, pdisp);
223 int heads = nv_rd32(parent, 0x022448);
224 int ret;
225
226 ret = nvkm_disp_create(parent, engine, oclass, heads,
227 "PDISP", "display", &priv);
228 *pobject = nv_object(priv);
229 if (ret)
230 return ret;
231
232 ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
233 if (ret)
234 return ret;
235
236 nv_engine(priv)->sclass = gk104_disp_main_oclass;
237 nv_engine(priv)->cclass = &nv50_disp_cclass;
238 nv_subdev(priv)->intr = gf110_disp_intr;
239 INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
240 priv->sclass = gk104_disp_sclass;
241 priv->head.nr = heads;
242 priv->dac.nr = 3;
243 priv->sor.nr = 4;
244 priv->dac.power = nv50_dac_power;
245 priv->dac.sense = nv50_dac_sense;
246 priv->sor.power = nv50_sor_power;
247 priv->sor.hda_eld = gf110_hda_eld;
248 priv->sor.hdmi = gk104_hdmi_ctrl;
249 return 0;
250} 53}
251
252struct nvkm_oclass *
253gk104_disp_oclass = &(struct nv50_disp_impl) {
254 .base.base.handle = NV_ENGINE(DISP, 0x91),
255 .base.base.ofuncs = &(struct nvkm_ofuncs) {
256 .ctor = gk104_disp_ctor,
257 .dtor = _nvkm_disp_dtor,
258 .init = _nvkm_disp_init,
259 .fini = _nvkm_disp_fini,
260 },
261 .base.vblank = &gf110_disp_vblank_func,
262 .base.outp = gf110_disp_outp_sclass,
263 .mthd.core = &gk104_disp_core_mthd_chan,
264 .mthd.base = &gf110_disp_base_mthd_chan,
265 .mthd.ovly = &gk104_disp_ovly_mthd_chan,
266 .mthd.prev = -0x020000,
267 .head.scanoutpos = gf110_disp_main_scanoutpos,
268}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index daa4b460a6ba..0d574c7e594a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -22,82 +22,32 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25#include "rootnv50.h"
26#include <nvif/class.h> 26
27 27static const struct nv50_disp_func
28/******************************************************************************* 28gk110_disp = {
29 * Base display object 29 .intr = gf119_disp_intr,
30 ******************************************************************************/ 30 .uevent = &gf119_disp_chan_uevent,
31 31 .super = gf119_disp_intr_supervisor,
32static struct nvkm_oclass 32 .root = &gk110_disp_root_oclass,
33gk110_disp_sclass[] = { 33 .head.vblank_init = gf119_disp_vblank_init,
34 { GK110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base }, 34 .head.vblank_fini = gf119_disp_vblank_fini,
35 { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base }, 35 .head.scanoutpos = gf119_disp_root_scanoutpos,
36 { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base }, 36 .outp.internal.crt = nv50_dac_output_new,
37 { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base }, 37 .outp.internal.tmds = nv50_sor_output_new,
38 { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base }, 38 .outp.internal.lvds = nv50_sor_output_new,
39 {} 39 .outp.internal.dp = gf119_sor_dp_new,
40}; 40 .dac.nr = 3,
41 41 .dac.power = nv50_dac_power,
42static struct nvkm_oclass 42 .dac.sense = nv50_dac_sense,
43gk110_disp_main_oclass[] = { 43 .sor.nr = 4,
44 { GK110_DISP, &gf110_disp_main_ofuncs }, 44 .sor.power = nv50_sor_power,
45 {} 45 .sor.hda_eld = gf119_hda_eld,
46 .sor.hdmi = gk104_hdmi_ctrl,
46}; 47};
47 48
48/******************************************************************************* 49int
49 * Display engine implementation 50gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
50 ******************************************************************************/
51
52static int
53gk110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
54 struct nvkm_oclass *oclass, void *data, u32 size,
55 struct nvkm_object **pobject)
56{ 51{
57 struct nv50_disp_priv *priv; 52 return gf119_disp_new_(&gk110_disp, device, index, pdisp);
58 int heads = nv_rd32(parent, 0x022448);
59 int ret;
60
61 ret = nvkm_disp_create(parent, engine, oclass, heads,
62 "PDISP", "display", &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
68 if (ret)
69 return ret;
70
71 nv_engine(priv)->sclass = gk110_disp_main_oclass;
72 nv_engine(priv)->cclass = &nv50_disp_cclass;
73 nv_subdev(priv)->intr = gf110_disp_intr;
74 INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
75 priv->sclass = gk110_disp_sclass;
76 priv->head.nr = heads;
77 priv->dac.nr = 3;
78 priv->sor.nr = 4;
79 priv->dac.power = nv50_dac_power;
80 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power;
82 priv->sor.hda_eld = gf110_hda_eld;
83 priv->sor.hdmi = gk104_hdmi_ctrl;
84 return 0;
85} 53}
86
87struct nvkm_oclass *
88gk110_disp_oclass = &(struct nv50_disp_impl) {
89 .base.base.handle = NV_ENGINE(DISP, 0x92),
90 .base.base.ofuncs = &(struct nvkm_ofuncs) {
91 .ctor = gk110_disp_ctor,
92 .dtor = _nvkm_disp_dtor,
93 .init = _nvkm_disp_init,
94 .fini = _nvkm_disp_fini,
95 },
96 .base.vblank = &gf110_disp_vblank_func,
97 .base.outp = gf110_disp_outp_sclass,
98 .mthd.core = &gk104_disp_core_mthd_chan,
99 .mthd.base = &gf110_disp_base_mthd_chan,
100 .mthd.ovly = &gk104_disp_ovly_mthd_chan,
101 .mthd.prev = -0x020000,
102 .head.scanoutpos = gf110_disp_main_scanoutpos,
103}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 881cc94385a1..b6944142d616 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -22,82 +22,32 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25#include "rootnv50.h"
26#include <nvif/class.h> 26
27 27static const struct nv50_disp_func
28/******************************************************************************* 28gm107_disp = {
29 * Base display object 29 .intr = gf119_disp_intr,
30 ******************************************************************************/ 30 .uevent = &gf119_disp_chan_uevent,
31 31 .super = gf119_disp_intr_supervisor,
32static struct nvkm_oclass 32 .root = &gm107_disp_root_oclass,
33gm107_disp_sclass[] = { 33 .head.vblank_init = gf119_disp_vblank_init,
34 { GM107_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base }, 34 .head.vblank_fini = gf119_disp_vblank_fini,
35 { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base }, 35 .head.scanoutpos = gf119_disp_root_scanoutpos,
36 { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base }, 36 .outp.internal.crt = nv50_dac_output_new,
37 { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base }, 37 .outp.internal.tmds = nv50_sor_output_new,
38 { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base }, 38 .outp.internal.lvds = nv50_sor_output_new,
39 {} 39 .outp.internal.dp = gf119_sor_dp_new,
40}; 40 .dac.nr = 3,
41 41 .dac.power = nv50_dac_power,
42static struct nvkm_oclass 42 .dac.sense = nv50_dac_sense,
43gm107_disp_main_oclass[] = { 43 .sor.nr = 4,
44 { GM107_DISP, &gf110_disp_main_ofuncs }, 44 .sor.power = nv50_sor_power,
45 {} 45 .sor.hda_eld = gf119_hda_eld,
46 .sor.hdmi = gk104_hdmi_ctrl,
46}; 47};
47 48
48/******************************************************************************* 49int
49 * Display engine implementation 50gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
50 ******************************************************************************/
51
52static int
53gm107_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
54 struct nvkm_oclass *oclass, void *data, u32 size,
55 struct nvkm_object **pobject)
56{ 51{
57 struct nv50_disp_priv *priv; 52 return gf119_disp_new_(&gm107_disp, device, index, pdisp);
58 int heads = nv_rd32(parent, 0x022448);
59 int ret;
60
61 ret = nvkm_disp_create(parent, engine, oclass, heads,
62 "PDISP", "display", &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
68 if (ret)
69 return ret;
70
71 nv_engine(priv)->sclass = gm107_disp_main_oclass;
72 nv_engine(priv)->cclass = &nv50_disp_cclass;
73 nv_subdev(priv)->intr = gf110_disp_intr;
74 INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
75 priv->sclass = gm107_disp_sclass;
76 priv->head.nr = heads;
77 priv->dac.nr = 3;
78 priv->sor.nr = 4;
79 priv->dac.power = nv50_dac_power;
80 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power;
82 priv->sor.hda_eld = gf110_hda_eld;
83 priv->sor.hdmi = gk104_hdmi_ctrl;
84 return 0;
85} 53}
86
87struct nvkm_oclass *
88gm107_disp_oclass = &(struct nv50_disp_impl) {
89 .base.base.handle = NV_ENGINE(DISP, 0x07),
90 .base.base.ofuncs = &(struct nvkm_ofuncs) {
91 .ctor = gm107_disp_ctor,
92 .dtor = _nvkm_disp_dtor,
93 .init = _nvkm_disp_init,
94 .fini = _nvkm_disp_fini,
95 },
96 .base.vblank = &gf110_disp_vblank_func,
97 .base.outp = gf110_disp_outp_sclass,
98 .mthd.core = &gk104_disp_core_mthd_chan,
99 .mthd.base = &gf110_disp_base_mthd_chan,
100 .mthd.ovly = &gk104_disp_ovly_mthd_chan,
101 .mthd.prev = -0x020000,
102 .head.scanoutpos = gf110_disp_main_scanoutpos,
103}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
index 67004f8302b3..30f1987b5b40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
@@ -22,90 +22,33 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "outpdp.h" 25#include "rootnv50.h"
26 26
27#include <nvif/class.h> 27static const struct nv50_disp_func
28 28gm204_disp = {
29/******************************************************************************* 29 .intr = gf119_disp_intr,
30 * Base display object 30 .uevent = &gf119_disp_chan_uevent,
31 ******************************************************************************/ 31 .super = gf119_disp_intr_supervisor,
32 32 .root = &gm204_disp_root_oclass,
33static struct nvkm_oclass 33 .head.vblank_init = gf119_disp_vblank_init,
34gm204_disp_sclass[] = { 34 .head.vblank_fini = gf119_disp_vblank_fini,
35 { GM204_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base }, 35 .head.scanoutpos = gf119_disp_root_scanoutpos,
36 { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base }, 36 .outp.internal.crt = nv50_dac_output_new,
37 { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base }, 37 .outp.internal.tmds = nv50_sor_output_new,
38 { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base }, 38 .outp.internal.lvds = nv50_sor_output_new,
39 { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base }, 39 .outp.internal.dp = gm204_sor_dp_new,
40 {} 40 .dac.nr = 3,
41 .dac.power = nv50_dac_power,
42 .dac.sense = nv50_dac_sense,
43 .sor.nr = 4,
44 .sor.power = nv50_sor_power,
45 .sor.hda_eld = gf119_hda_eld,
46 .sor.hdmi = gk104_hdmi_ctrl,
47 .sor.magic = gm204_sor_magic,
41}; 48};
42 49
43static struct nvkm_oclass 50int
44gm204_disp_main_oclass[] = { 51gm204_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
45 { GM204_DISP, &gf110_disp_main_ofuncs },
46 {}
47};
48
49/*******************************************************************************
50 * Display engine implementation
51 ******************************************************************************/
52
53static int
54gm204_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
55 struct nvkm_oclass *oclass, void *data, u32 size,
56 struct nvkm_object **pobject)
57{ 52{
58 struct nv50_disp_priv *priv; 53 return gf119_disp_new_(&gm204_disp, device, index, pdisp);
59 int heads = nv_rd32(parent, 0x022448);
60 int ret;
61
62 ret = nvkm_disp_create(parent, engine, oclass, heads,
63 "PDISP", "display", &priv);
64 *pobject = nv_object(priv);
65 if (ret)
66 return ret;
67
68 ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
69 if (ret)
70 return ret;
71
72 nv_engine(priv)->sclass = gm204_disp_main_oclass;
73 nv_engine(priv)->cclass = &nv50_disp_cclass;
74 nv_subdev(priv)->intr = gf110_disp_intr;
75 INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
76 priv->sclass = gm204_disp_sclass;
77 priv->head.nr = heads;
78 priv->dac.nr = 3;
79 priv->sor.nr = 4;
80 priv->dac.power = nv50_dac_power;
81 priv->dac.sense = nv50_dac_sense;
82 priv->sor.power = nv50_sor_power;
83 priv->sor.hda_eld = gf110_hda_eld;
84 priv->sor.hdmi = gf110_hdmi_ctrl;
85 priv->sor.magic = gm204_sor_magic;
86 return 0;
87} 54}
88
89struct nvkm_oclass *
90gm204_disp_outp_sclass[] = {
91 &gm204_sor_dp_impl.base.base,
92 NULL
93};
94
95struct nvkm_oclass *
96gm204_disp_oclass = &(struct nv50_disp_impl) {
97 .base.base.handle = NV_ENGINE(DISP, 0x07),
98 .base.base.ofuncs = &(struct nvkm_ofuncs) {
99 .ctor = gm204_disp_ctor,
100 .dtor = _nvkm_disp_dtor,
101 .init = _nvkm_disp_init,
102 .fini = _nvkm_disp_fini,
103 },
104 .base.vblank = &gf110_disp_vblank_func,
105 .base.outp = gm204_disp_outp_sclass,
106 .mthd.core = &gk104_disp_core_mthd_chan,
107 .mthd.base = &gf110_disp_base_mthd_chan,
108 .mthd.ovly = &gk104_disp_ovly_mthd_chan,
109 .mthd.prev = -0x020000,
110 .head.scanoutpos = gf110_disp_main_scanoutpos,
111}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index a45307213f4b..6bc3bf096001 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -22,127 +22,34 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25#include "rootnv50.h"
26#include <nvif/class.h> 26
27 27static const struct nv50_disp_func
28/******************************************************************************* 28gt200_disp = {
29 * EVO overlay channel objects 29 .intr = nv50_disp_intr,
30 ******************************************************************************/ 30 .uevent = &nv50_disp_chan_uevent,
31 31 .super = nv50_disp_intr_supervisor,
32static const struct nv50_disp_mthd_list 32 .root = &gt200_disp_root_oclass,
33gt200_disp_ovly_mthd_base = { 33 .head.vblank_init = nv50_disp_vblank_init,
34 .mthd = 0x0000, 34 .head.vblank_fini = nv50_disp_vblank_fini,
35 .addr = 0x000000, 35 .head.scanoutpos = nv50_disp_root_scanoutpos,
36 .data = { 36 .outp.internal.crt = nv50_dac_output_new,
37 { 0x0080, 0x000000 }, 37 .outp.internal.tmds = nv50_sor_output_new,
38 { 0x0084, 0x6109a0 }, 38 .outp.internal.lvds = nv50_sor_output_new,
39 { 0x0088, 0x6109c0 }, 39 .outp.external.tmds = nv50_pior_output_new,
40 { 0x008c, 0x6109c8 }, 40 .outp.external.dp = nv50_pior_dp_new,
41 { 0x0090, 0x6109b4 }, 41 .dac.nr = 3,
42 { 0x0094, 0x610970 }, 42 .dac.power = nv50_dac_power,
43 { 0x00a0, 0x610998 }, 43 .dac.sense = nv50_dac_sense,
44 { 0x00a4, 0x610964 }, 44 .sor.nr = 2,
45 { 0x00b0, 0x610c98 }, 45 .sor.power = nv50_sor_power,
46 { 0x00b4, 0x610ca4 }, 46 .sor.hdmi = g84_hdmi_ctrl,
47 { 0x00b8, 0x610cac }, 47 .pior.nr = 3,
48 { 0x00c0, 0x610958 }, 48 .pior.power = nv50_pior_power,
49 { 0x00e0, 0x6109a8 },
50 { 0x00e4, 0x6109d0 },
51 { 0x00e8, 0x6109d8 },
52 { 0x0100, 0x61094c },
53 { 0x0104, 0x610984 },
54 { 0x0108, 0x61098c },
55 { 0x0800, 0x6109f8 },
56 { 0x0808, 0x610a08 },
57 { 0x080c, 0x610a10 },
58 { 0x0810, 0x610a00 },
59 {}
60 }
61};
62
63static const struct nv50_disp_mthd_chan
64gt200_disp_ovly_mthd_chan = {
65 .name = "Overlay",
66 .addr = 0x000540,
67 .data = {
68 { "Global", 1, &gt200_disp_ovly_mthd_base },
69 {}
70 }
71};
72
73/*******************************************************************************
74 * Base display object
75 ******************************************************************************/
76
77static struct nvkm_oclass
78gt200_disp_sclass[] = {
79 { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
80 { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
81 { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
82 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
83 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
84 {}
85}; 49};
86 50
87static struct nvkm_oclass 51int
88gt200_disp_main_oclass[] = { 52gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
89 { GT200_DISP, &nv50_disp_main_ofuncs },
90 {}
91};
92
93/*******************************************************************************
94 * Display engine implementation
95 ******************************************************************************/
96
97static int
98gt200_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
99 struct nvkm_oclass *oclass, void *data, u32 size,
100 struct nvkm_object **pobject)
101{ 53{
102 struct nv50_disp_priv *priv; 54 return nv50_disp_new_(&gt200_disp, device, index, 2, pdisp);
103 int ret;
104
105 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
106 "display", &priv);
107 *pobject = nv_object(priv);
108 if (ret)
109 return ret;
110
111 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
112 if (ret)
113 return ret;
114
115 nv_engine(priv)->sclass = gt200_disp_main_oclass;
116 nv_engine(priv)->cclass = &nv50_disp_cclass;
117 nv_subdev(priv)->intr = nv50_disp_intr;
118 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
119 priv->sclass = gt200_disp_sclass;
120 priv->head.nr = 2;
121 priv->dac.nr = 3;
122 priv->sor.nr = 2;
123 priv->pior.nr = 3;
124 priv->dac.power = nv50_dac_power;
125 priv->dac.sense = nv50_dac_sense;
126 priv->sor.power = nv50_sor_power;
127 priv->sor.hdmi = g84_hdmi_ctrl;
128 priv->pior.power = nv50_pior_power;
129 return 0;
130} 55}
131
132struct nvkm_oclass *
133gt200_disp_oclass = &(struct nv50_disp_impl) {
134 .base.base.handle = NV_ENGINE(DISP, 0x83),
135 .base.base.ofuncs = &(struct nvkm_ofuncs) {
136 .ctor = gt200_disp_ctor,
137 .dtor = _nvkm_disp_dtor,
138 .init = _nvkm_disp_init,
139 .fini = _nvkm_disp_fini,
140 },
141 .base.vblank = &nv50_disp_vblank_func,
142 .base.outp = nv50_disp_outp_sclass,
143 .mthd.core = &g84_disp_core_mthd_chan,
144 .mthd.base = &g84_disp_base_mthd_chan,
145 .mthd.ovly = &gt200_disp_ovly_mthd_chan,
146 .mthd.prev = 0x000004,
147 .head.scanoutpos = nv50_disp_main_scanoutpos,
148}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 55f0d3ac591e..94026288ab4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -22,83 +22,36 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25#include "rootnv50.h"
26#include <nvif/class.h> 26
27 27static const struct nv50_disp_func
28/******************************************************************************* 28gt215_disp = {
29 * Base display object 29 .intr = nv50_disp_intr,
30 ******************************************************************************/ 30 .uevent = &nv50_disp_chan_uevent,
31 31 .super = nv50_disp_intr_supervisor,
32static struct nvkm_oclass 32 .root = &gt215_disp_root_oclass,
33gt215_disp_sclass[] = { 33 .head.vblank_init = nv50_disp_vblank_init,
34 { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base }, 34 .head.vblank_fini = nv50_disp_vblank_fini,
35 { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base }, 35 .head.scanoutpos = nv50_disp_root_scanoutpos,
36 { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 36 .outp.internal.crt = nv50_dac_output_new,
37 { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 37 .outp.internal.tmds = nv50_sor_output_new,
38 { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 38 .outp.internal.lvds = nv50_sor_output_new,
39 {} 39 .outp.internal.dp = g94_sor_dp_new,
40}; 40 .outp.external.tmds = nv50_pior_output_new,
41 41 .outp.external.dp = nv50_pior_dp_new,
42static struct nvkm_oclass 42 .dac.nr = 3,
43gt215_disp_main_oclass[] = { 43 .dac.power = nv50_dac_power,
44 { GT214_DISP, &nv50_disp_main_ofuncs }, 44 .dac.sense = nv50_dac_sense,
45 {} 45 .sor.nr = 4,
46 .sor.power = nv50_sor_power,
47 .sor.hda_eld = gt215_hda_eld,
48 .sor.hdmi = gt215_hdmi_ctrl,
49 .pior.nr = 3,
50 .pior.power = nv50_pior_power,
46}; 51};
47 52
48/******************************************************************************* 53int
49 * Display engine implementation 54gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
50 ******************************************************************************/
51
52static int
53gt215_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
54 struct nvkm_oclass *oclass, void *data, u32 size,
55 struct nvkm_object **pobject)
56{ 55{
57 struct nv50_disp_priv *priv; 56 return nv50_disp_new_(&gt215_disp, device, index, 2, pdisp);
58 int ret;
59
60 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
61 "display", &priv);
62 *pobject = nv_object(priv);
63 if (ret)
64 return ret;
65
66 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
67 if (ret)
68 return ret;
69
70 nv_engine(priv)->sclass = gt215_disp_main_oclass;
71 nv_engine(priv)->cclass = &nv50_disp_cclass;
72 nv_subdev(priv)->intr = nv50_disp_intr;
73 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
74 priv->sclass = gt215_disp_sclass;
75 priv->head.nr = 2;
76 priv->dac.nr = 3;
77 priv->sor.nr = 4;
78 priv->pior.nr = 3;
79 priv->dac.power = nv50_dac_power;
80 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power;
82 priv->sor.hda_eld = gt215_hda_eld;
83 priv->sor.hdmi = gt215_hdmi_ctrl;
84 priv->pior.power = nv50_pior_power;
85 return 0;
86} 57}
87
88struct nvkm_oclass *
89gt215_disp_oclass = &(struct nv50_disp_impl) {
90 .base.base.handle = NV_ENGINE(DISP, 0x85),
91 .base.base.ofuncs = &(struct nvkm_ofuncs) {
92 .ctor = gt215_disp_ctor,
93 .dtor = _nvkm_disp_dtor,
94 .init = _nvkm_disp_init,
95 .fini = _nvkm_disp_fini,
96 },
97 .base.vblank = &nv50_disp_vblank_func,
98 .base.outp = g94_disp_outp_sclass,
99 .mthd.core = &g94_disp_core_mthd_chan,
100 .mthd.base = &g84_disp_base_mthd_chan,
101 .mthd.ovly = &g84_disp_ovly_mthd_chan,
102 .mthd.prev = 0x000004,
103 .head.scanoutpos = nv50_disp_main_scanoutpos,
104}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
index b9813d246ba5..af99efbd63f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -33,8 +33,9 @@
33#include <nvif/unpack.h> 33#include <nvif/unpack.h>
34 34
35int 35int
36gf110_hda_eld(NV50_DISP_MTHD_V1) 36gf119_hda_eld(NV50_DISP_MTHD_V1)
37{ 37{
38 struct nvkm_device *device = disp->base.engine.subdev.device;
38 union { 39 union {
39 struct nv50_disp_sor_hda_eld_v0 v0; 40 struct nv50_disp_sor_hda_eld_v0 v0;
40 } *args = data; 41 } *args = data;
@@ -42,9 +43,10 @@ gf110_hda_eld(NV50_DISP_MTHD_V1)
42 const u32 hoff = head * 0x800; 43 const u32 hoff = head * 0x800;
43 int ret, i; 44 int ret, i;
44 45
45 nv_ioctl(object, "disp sor hda eld size %d\n", size); 46 nvif_ioctl(object, "disp sor hda eld size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, true)) { 47 if (nvif_unpack(args->v0, 0, 0, true)) {
47 nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version); 48 nvif_ioctl(object, "disp sor hda eld vers %d\n",
49 args->v0.version);
48 if (size > 0x60) 50 if (size > 0x60)
49 return -E2BIG; 51 return -E2BIG;
50 } else 52 } else
@@ -52,21 +54,29 @@ gf110_hda_eld(NV50_DISP_MTHD_V1)
52 54
53 if (size && args->v0.data[0]) { 55 if (size && args->v0.data[0]) {
54 if (outp->info.type == DCB_OUTPUT_DP) { 56 if (outp->info.type == DCB_OUTPUT_DP) {
55 nv_mask(priv, 0x616618 + hoff, 0x8000000c, 0x80000001); 57 nvkm_mask(device, 0x616618 + hoff, 0x8000000c, 0x80000001);
56 nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000); 58 nvkm_msec(device, 2000,
59 u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
60 if (!(tmp & 0x80000000))
61 break;
62 );
57 } 63 }
58 nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000); 64 nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
59 for (i = 0; i < size; i++) 65 for (i = 0; i < size; i++)
60 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]); 66 nvkm_wr32(device, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
61 for (; i < 0x60; i++) 67 for (; i < 0x60; i++)
62 nv_wr32(priv, 0x10ec00 + soff, (i << 8)); 68 nvkm_wr32(device, 0x10ec00 + soff, (i << 8));
63 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); 69 nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000003);
64 } else { 70 } else {
65 if (outp->info.type == DCB_OUTPUT_DP) { 71 if (outp->info.type == DCB_OUTPUT_DP) {
66 nv_mask(priv, 0x616618 + hoff, 0x80000001, 0x80000000); 72 nvkm_mask(device, 0x616618 + hoff, 0x80000001, 0x80000000);
67 nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000); 73 nvkm_msec(device, 2000,
74 u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
75 if (!(tmp & 0x80000000))
76 break;
77 );
68 } 78 }
69 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size); 79 nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
70 } 80 }
71 81
72 return 0; 82 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 891d1e7bf7d2..c1590b746f13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -33,15 +33,17 @@
33int 33int
34gt215_hda_eld(NV50_DISP_MTHD_V1) 34gt215_hda_eld(NV50_DISP_MTHD_V1)
35{ 35{
36 struct nvkm_device *device = disp->base.engine.subdev.device;
36 union { 37 union {
37 struct nv50_disp_sor_hda_eld_v0 v0; 38 struct nv50_disp_sor_hda_eld_v0 v0;
38 } *args = data; 39 } *args = data;
39 const u32 soff = outp->or * 0x800; 40 const u32 soff = outp->or * 0x800;
40 int ret, i; 41 int ret, i;
41 42
42 nv_ioctl(object, "disp sor hda eld size %d\n", size); 43 nvif_ioctl(object, "disp sor hda eld size %d\n", size);
43 if (nvif_unpack(args->v0, 0, 0, true)) { 44 if (nvif_unpack(args->v0, 0, 0, true)) {
44 nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version); 45 nvif_ioctl(object, "disp sor hda eld vers %d\n",
46 args->v0.version);
45 if (size > 0x60) 47 if (size > 0x60)
46 return -E2BIG; 48 return -E2BIG;
47 } else 49 } else
@@ -49,20 +51,28 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
49 51
50 if (size && args->v0.data[0]) { 52 if (size && args->v0.data[0]) {
51 if (outp->info.type == DCB_OUTPUT_DP) { 53 if (outp->info.type == DCB_OUTPUT_DP) {
52 nv_mask(priv, 0x61c1e0 + soff, 0x8000000d, 0x80000001); 54 nvkm_mask(device, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
53 nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000); 55 nvkm_msec(device, 2000,
56 u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
57 if (!(tmp & 0x80000000))
58 break;
59 );
54 } 60 }
55 for (i = 0; i < size; i++) 61 for (i = 0; i < size; i++)
56 nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]); 62 nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
57 for (; i < 0x60; i++) 63 for (; i < 0x60; i++)
58 nv_wr32(priv, 0x61c440 + soff, (i << 8)); 64 nvkm_wr32(device, 0x61c440 + soff, (i << 8));
59 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); 65 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
60 } else { 66 } else {
61 if (outp->info.type == DCB_OUTPUT_DP) { 67 if (outp->info.type == DCB_OUTPUT_DP) {
62 nv_mask(priv, 0x61c1e0 + soff, 0x80000001, 0x80000000); 68 nvkm_mask(device, 0x61c1e0 + soff, 0x80000001, 0x80000000);
63 nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000); 69 nvkm_msec(device, 2000,
70 u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
71 if (!(tmp & 0x80000000))
72 break;
73 );
64 } 74 }
65 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size); 75 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
66 } 76 }
67 77
68 return 0; 78 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
index 621cb0b7ff19..ee9e800a8f06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
@@ -31,6 +31,7 @@
31int 31int
32g84_hdmi_ctrl(NV50_DISP_MTHD_V1) 32g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
33{ 33{
34 struct nvkm_device *device = disp->base.engine.subdev.device;
34 const u32 hoff = (head * 0x800); 35 const u32 hoff = (head * 0x800);
35 union { 36 union {
36 struct nv50_disp_sor_hdmi_pwr_v0 v0; 37 struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -38,12 +39,12 @@ g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
38 u32 ctrl; 39 u32 ctrl;
39 int ret; 40 int ret;
40 41
41 nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); 42 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
42 if (nvif_unpack(args->v0, 0, 0, false)) { 43 if (nvif_unpack(args->v0, 0, 0, false)) {
43 nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " 44 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
44 "max_ac_packet %d rekey %d\n", 45 "max_ac_packet %d rekey %d\n",
45 args->v0.version, args->v0.state, 46 args->v0.version, args->v0.state,
46 args->v0.max_ac_packet, args->v0.rekey); 47 args->v0.max_ac_packet, args->v0.rekey);
47 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) 48 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
48 return -EINVAL; 49 return -EINVAL;
49 ctrl = 0x40000000 * !!args->v0.state; 50 ctrl = 0x40000000 * !!args->v0.state;
@@ -54,38 +55,38 @@ g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
54 return ret; 55 return ret;
55 56
56 if (!(ctrl & 0x40000000)) { 57 if (!(ctrl & 0x40000000)) {
57 nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000); 58 nvkm_mask(device, 0x6165a4 + hoff, 0x40000000, 0x00000000);
58 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); 59 nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
59 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); 60 nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
60 return 0; 61 return 0;
61 } 62 }
62 63
63 /* AVI InfoFrame */ 64 /* AVI InfoFrame */
64 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); 65 nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
65 nv_wr32(priv, 0x616528 + hoff, 0x000d0282); 66 nvkm_wr32(device, 0x616528 + hoff, 0x000d0282);
66 nv_wr32(priv, 0x61652c + hoff, 0x0000006f); 67 nvkm_wr32(device, 0x61652c + hoff, 0x0000006f);
67 nv_wr32(priv, 0x616530 + hoff, 0x00000000); 68 nvkm_wr32(device, 0x616530 + hoff, 0x00000000);
68 nv_wr32(priv, 0x616534 + hoff, 0x00000000); 69 nvkm_wr32(device, 0x616534 + hoff, 0x00000000);
69 nv_wr32(priv, 0x616538 + hoff, 0x00000000); 70 nvkm_wr32(device, 0x616538 + hoff, 0x00000000);
70 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001); 71 nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000001);
71 72
72 /* Audio InfoFrame */ 73 /* Audio InfoFrame */
73 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); 74 nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
74 nv_wr32(priv, 0x616508 + hoff, 0x000a0184); 75 nvkm_wr32(device, 0x616508 + hoff, 0x000a0184);
75 nv_wr32(priv, 0x61650c + hoff, 0x00000071); 76 nvkm_wr32(device, 0x61650c + hoff, 0x00000071);
76 nv_wr32(priv, 0x616510 + hoff, 0x00000000); 77 nvkm_wr32(device, 0x616510 + hoff, 0x00000000);
77 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001); 78 nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000001);
78 79
79 nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ 80 nvkm_mask(device, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
80 nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ 81 nvkm_mask(device, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
81 nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ 82 nvkm_mask(device, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
82 83
83 /* ??? */ 84 /* ??? */
84 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ 85 nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
85 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ 86 nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
86 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ 87 nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
87 88
88 /* HDMI_CTRL */ 89 /* HDMI_CTRL */
89 nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl); 90 nvkm_mask(device, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
90 return 0; 91 return 0;
91} 92}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
index c28449061bbd..b5af025d3b04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
@@ -29,8 +29,9 @@
29#include <nvif/unpack.h> 29#include <nvif/unpack.h>
30 30
31int 31int
32gf110_hdmi_ctrl(NV50_DISP_MTHD_V1) 32gf119_hdmi_ctrl(NV50_DISP_MTHD_V1)
33{ 33{
34 struct nvkm_device *device = disp->base.engine.subdev.device;
34 const u32 hoff = (head * 0x800); 35 const u32 hoff = (head * 0x800);
35 union { 36 union {
36 struct nv50_disp_sor_hdmi_pwr_v0 v0; 37 struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -38,12 +39,12 @@ gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
38 u32 ctrl; 39 u32 ctrl;
39 int ret; 40 int ret;
40 41
41 nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); 42 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
42 if (nvif_unpack(args->v0, 0, 0, false)) { 43 if (nvif_unpack(args->v0, 0, 0, false)) {
43 nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " 44 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
44 "max_ac_packet %d rekey %d\n", 45 "max_ac_packet %d rekey %d\n",
45 args->v0.version, args->v0.state, 46 args->v0.version, args->v0.state,
46 args->v0.max_ac_packet, args->v0.rekey); 47 args->v0.max_ac_packet, args->v0.rekey);
47 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) 48 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
48 return -EINVAL; 49 return -EINVAL;
49 ctrl = 0x40000000 * !!args->v0.state; 50 ctrl = 0x40000000 * !!args->v0.state;
@@ -53,27 +54,27 @@ gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
53 return ret; 54 return ret;
54 55
55 if (!(ctrl & 0x40000000)) { 56 if (!(ctrl & 0x40000000)) {
56 nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); 57 nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
57 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); 58 nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
58 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); 59 nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
59 return 0; 60 return 0;
60 } 61 }
61 62
62 /* AVI InfoFrame */ 63 /* AVI InfoFrame */
63 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); 64 nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
64 nv_wr32(priv, 0x61671c + hoff, 0x000d0282); 65 nvkm_wr32(device, 0x61671c + hoff, 0x000d0282);
65 nv_wr32(priv, 0x616720 + hoff, 0x0000006f); 66 nvkm_wr32(device, 0x616720 + hoff, 0x0000006f);
66 nv_wr32(priv, 0x616724 + hoff, 0x00000000); 67 nvkm_wr32(device, 0x616724 + hoff, 0x00000000);
67 nv_wr32(priv, 0x616728 + hoff, 0x00000000); 68 nvkm_wr32(device, 0x616728 + hoff, 0x00000000);
68 nv_wr32(priv, 0x61672c + hoff, 0x00000000); 69 nvkm_wr32(device, 0x61672c + hoff, 0x00000000);
69 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001); 70 nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000001);
70 71
71 /* ??? InfoFrame? */ 72 /* ??? InfoFrame? */
72 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); 73 nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
73 nv_wr32(priv, 0x6167ac + hoff, 0x00000010); 74 nvkm_wr32(device, 0x6167ac + hoff, 0x00000010);
74 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001); 75 nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000001);
75 76
76 /* HDMI_CTRL */ 77 /* HDMI_CTRL */
77 nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl); 78 nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
78 return 0; 79 return 0;
79} 80}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
index ca34ff81ad7f..110dc19e4f67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
@@ -31,6 +31,7 @@
31int 31int
32gk104_hdmi_ctrl(NV50_DISP_MTHD_V1) 32gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
33{ 33{
34 struct nvkm_device *device = disp->base.engine.subdev.device;
34 const u32 hoff = (head * 0x800); 35 const u32 hoff = (head * 0x800);
35 const u32 hdmi = (head * 0x400); 36 const u32 hdmi = (head * 0x400);
36 union { 37 union {
@@ -39,12 +40,12 @@ gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
39 u32 ctrl; 40 u32 ctrl;
40 int ret; 41 int ret;
41 42
42 nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); 43 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
43 if (nvif_unpack(args->v0, 0, 0, false)) { 44 if (nvif_unpack(args->v0, 0, 0, false)) {
44 nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " 45 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
45 "max_ac_packet %d rekey %d\n", 46 "max_ac_packet %d rekey %d\n",
46 args->v0.version, args->v0.state, 47 args->v0.version, args->v0.state,
47 args->v0.max_ac_packet, args->v0.rekey); 48 args->v0.max_ac_packet, args->v0.rekey);
48 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) 49 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
49 return -EINVAL; 50 return -EINVAL;
50 ctrl = 0x40000000 * !!args->v0.state; 51 ctrl = 0x40000000 * !!args->v0.state;
@@ -54,30 +55,30 @@ gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
54 return ret; 55 return ret;
55 56
56 if (!(ctrl & 0x40000000)) { 57 if (!(ctrl & 0x40000000)) {
57 nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); 58 nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
58 nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000); 59 nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
59 nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000); 60 nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
60 return 0; 61 return 0;
61 } 62 }
62 63
63 /* AVI InfoFrame */ 64 /* AVI InfoFrame */
64 nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000); 65 nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
65 nv_wr32(priv, 0x690008 + hdmi, 0x000d0282); 66 nvkm_wr32(device, 0x690008 + hdmi, 0x000d0282);
66 nv_wr32(priv, 0x69000c + hdmi, 0x0000006f); 67 nvkm_wr32(device, 0x69000c + hdmi, 0x0000006f);
67 nv_wr32(priv, 0x690010 + hdmi, 0x00000000); 68 nvkm_wr32(device, 0x690010 + hdmi, 0x00000000);
68 nv_wr32(priv, 0x690014 + hdmi, 0x00000000); 69 nvkm_wr32(device, 0x690014 + hdmi, 0x00000000);
69 nv_wr32(priv, 0x690018 + hdmi, 0x00000000); 70 nvkm_wr32(device, 0x690018 + hdmi, 0x00000000);
70 nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000001); 71 nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000001);
71 72
72 /* ??? InfoFrame? */ 73 /* ??? InfoFrame? */
73 nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000); 74 nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
74 nv_wr32(priv, 0x6900cc + hdmi, 0x00000010); 75 nvkm_wr32(device, 0x6900cc + hdmi, 0x00000010);
75 nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000001); 76 nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
76 77
77 /* ??? */ 78 /* ??? */
78 nv_wr32(priv, 0x690080 + hdmi, 0x82000000); 79 nvkm_wr32(device, 0x690080 + hdmi, 0x82000000);
79 80
80 /* HDMI_CTRL */ 81 /* HDMI_CTRL */
81 nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl); 82 nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
82 return 0; 83 return 0;
83} 84}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
index b641c167dcfa..61237dbfa35a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
@@ -32,6 +32,7 @@
32int 32int
33gt215_hdmi_ctrl(NV50_DISP_MTHD_V1) 33gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
34{ 34{
35 struct nvkm_device *device = disp->base.engine.subdev.device;
35 const u32 soff = outp->or * 0x800; 36 const u32 soff = outp->or * 0x800;
36 union { 37 union {
37 struct nv50_disp_sor_hdmi_pwr_v0 v0; 38 struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -39,12 +40,12 @@ gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
39 u32 ctrl; 40 u32 ctrl;
40 int ret; 41 int ret;
41 42
42 nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); 43 nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
43 if (nvif_unpack(args->v0, 0, 0, false)) { 44 if (nvif_unpack(args->v0, 0, 0, false)) {
44 nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " 45 nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
45 "max_ac_packet %d rekey %d\n", 46 "max_ac_packet %d rekey %d\n",
46 args->v0.version, args->v0.state, 47 args->v0.version, args->v0.state,
47 args->v0.max_ac_packet, args->v0.rekey); 48 args->v0.max_ac_packet, args->v0.rekey);
48 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) 49 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
49 return -EINVAL; 50 return -EINVAL;
50 ctrl = 0x40000000 * !!args->v0.state; 51 ctrl = 0x40000000 * !!args->v0.state;
@@ -55,38 +56,38 @@ gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
55 return ret; 56 return ret;
56 57
57 if (!(ctrl & 0x40000000)) { 58 if (!(ctrl & 0x40000000)) {
58 nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000); 59 nvkm_mask(device, 0x61c5a4 + soff, 0x40000000, 0x00000000);
59 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); 60 nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
60 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); 61 nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
61 return 0; 62 return 0;
62 } 63 }
63 64
64 /* AVI InfoFrame */ 65 /* AVI InfoFrame */
65 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); 66 nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
66 nv_wr32(priv, 0x61c528 + soff, 0x000d0282); 67 nvkm_wr32(device, 0x61c528 + soff, 0x000d0282);
67 nv_wr32(priv, 0x61c52c + soff, 0x0000006f); 68 nvkm_wr32(device, 0x61c52c + soff, 0x0000006f);
68 nv_wr32(priv, 0x61c530 + soff, 0x00000000); 69 nvkm_wr32(device, 0x61c530 + soff, 0x00000000);
69 nv_wr32(priv, 0x61c534 + soff, 0x00000000); 70 nvkm_wr32(device, 0x61c534 + soff, 0x00000000);
70 nv_wr32(priv, 0x61c538 + soff, 0x00000000); 71 nvkm_wr32(device, 0x61c538 + soff, 0x00000000);
71 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001); 72 nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000001);
72 73
73 /* Audio InfoFrame */ 74 /* Audio InfoFrame */
74 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); 75 nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
75 nv_wr32(priv, 0x61c508 + soff, 0x000a0184); 76 nvkm_wr32(device, 0x61c508 + soff, 0x000a0184);
76 nv_wr32(priv, 0x61c50c + soff, 0x00000071); 77 nvkm_wr32(device, 0x61c50c + soff, 0x00000071);
77 nv_wr32(priv, 0x61c510 + soff, 0x00000000); 78 nvkm_wr32(device, 0x61c510 + soff, 0x00000000);
78 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001); 79 nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000001);
79 80
80 nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ 81 nvkm_mask(device, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
81 nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ 82 nvkm_mask(device, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
82 nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ 83 nvkm_mask(device, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
83 84
84 /* ??? */ 85 /* ??? */
85 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ 86 nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
86 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ 87 nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
87 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ 88 nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
88 89
89 /* HDMI_CTRL */ 90 /* HDMI_CTRL */
90 nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl); 91 nvkm_mask(device, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
91 return 0; 92 return 0;
92} 93}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index ff09b2659c17..67254ce6f83f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -23,183 +23,63 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/client.h> 26static const struct nvkm_disp_oclass *
27#include <core/device.h> 27nv04_disp_root(struct nvkm_disp *disp)
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32struct nv04_disp_priv {
33 struct nvkm_disp base;
34};
35
36static int
37nv04_disp_scanoutpos(struct nvkm_object *object, struct nv04_disp_priv *priv,
38 void *data, u32 size, int head)
39{ 28{
40 const u32 hoff = head * 0x2000; 29 return &nv04_disp_root_oclass;
41 union {
42 struct nv04_disp_scanoutpos_v0 v0;
43 } *args = data;
44 u32 line;
45 int ret;
46
47 nv_ioctl(object, "disp scanoutpos size %d\n", size);
48 if (nvif_unpack(args->v0, 0, 0, false)) {
49 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
50 args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff;
51 args->v0.vtotal = nv_rd32(priv, 0x680804 + hoff) & 0xffff;
52 args->v0.vblanke = args->v0.vtotal - 1;
53
54 args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff;
55 args->v0.htotal = nv_rd32(priv, 0x680824 + hoff) & 0xffff;
56 args->v0.hblanke = args->v0.htotal - 1;
57
58 /*
59 * If output is vga instead of digital then vtotal/htotal is
60 * invalid so we have to give up and trigger the timestamping
61 * fallback in the drm core.
62 */
63 if (!args->v0.vtotal || !args->v0.htotal)
64 return -ENOTSUPP;
65
66 args->v0.time[0] = ktime_to_ns(ktime_get());
67 line = nv_rd32(priv, 0x600868 + hoff);
68 args->v0.time[1] = ktime_to_ns(ktime_get());
69 args->v0.hline = (line & 0xffff0000) >> 16;
70 args->v0.vline = (line & 0x0000ffff);
71 } else
72 return ret;
73
74 return 0;
75} 30}
76 31
77static int
78nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
79{
80 union {
81 struct nv04_disp_mthd_v0 v0;
82 } *args = data;
83 struct nv04_disp_priv *priv = (void *)object->engine;
84 int head, ret;
85
86 nv_ioctl(object, "disp mthd size %d\n", size);
87 if (nvif_unpack(args->v0, 0, 0, true)) {
88 nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
89 args->v0.version, args->v0.method, args->v0.head);
90 mthd = args->v0.method;
91 head = args->v0.head;
92 } else
93 return ret;
94
95 if (head < 0 || head >= 2)
96 return -ENXIO;
97
98 switch (mthd) {
99 case NV04_DISP_SCANOUTPOS:
100 return nv04_disp_scanoutpos(object, priv, data, size, head);
101 default:
102 break;
103 }
104
105 return -EINVAL;
106}
107
108static struct nvkm_ofuncs
109nv04_disp_ofuncs = {
110 .ctor = _nvkm_object_ctor,
111 .dtor = nvkm_object_destroy,
112 .init = nvkm_object_init,
113 .fini = nvkm_object_fini,
114 .mthd = nv04_disp_mthd,
115 .ntfy = nvkm_disp_ntfy,
116};
117
118static struct nvkm_oclass
119nv04_disp_sclass[] = {
120 { NV04_DISP, &nv04_disp_ofuncs },
121 {},
122};
123
124/*******************************************************************************
125 * Display engine implementation
126 ******************************************************************************/
127
128static void 32static void
129nv04_disp_vblank_init(struct nvkm_event *event, int type, int head) 33nv04_disp_vblank_init(struct nvkm_disp *disp, int head)
130{ 34{
131 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 35 struct nvkm_device *device = disp->engine.subdev.device;
132 nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001); 36 nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000001);
133} 37}
134 38
135static void 39static void
136nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head) 40nv04_disp_vblank_fini(struct nvkm_disp *disp, int head)
137{ 41{
138 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 42 struct nvkm_device *device = disp->engine.subdev.device;
139 nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000); 43 nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000000);
140} 44}
141 45
142static const struct nvkm_event_func
143nv04_disp_vblank_func = {
144 .ctor = nvkm_disp_vblank_ctor,
145 .init = nv04_disp_vblank_init,
146 .fini = nv04_disp_vblank_fini,
147};
148
149static void 46static void
150nv04_disp_intr(struct nvkm_subdev *subdev) 47nv04_disp_intr(struct nvkm_disp *disp)
151{ 48{
152 struct nv04_disp_priv *priv = (void *)subdev; 49 struct nvkm_subdev *subdev = &disp->engine.subdev;
153 u32 crtc0 = nv_rd32(priv, 0x600100); 50 struct nvkm_device *device = subdev->device;
154 u32 crtc1 = nv_rd32(priv, 0x602100); 51 u32 crtc0 = nvkm_rd32(device, 0x600100);
52 u32 crtc1 = nvkm_rd32(device, 0x602100);
155 u32 pvideo; 53 u32 pvideo;
156 54
157 if (crtc0 & 0x00000001) { 55 if (crtc0 & 0x00000001) {
158 nvkm_disp_vblank(&priv->base, 0); 56 nvkm_disp_vblank(disp, 0);
159 nv_wr32(priv, 0x600100, 0x00000001); 57 nvkm_wr32(device, 0x600100, 0x00000001);
160 } 58 }
161 59
162 if (crtc1 & 0x00000001) { 60 if (crtc1 & 0x00000001) {
163 nvkm_disp_vblank(&priv->base, 1); 61 nvkm_disp_vblank(disp, 1);
164 nv_wr32(priv, 0x602100, 0x00000001); 62 nvkm_wr32(device, 0x602100, 0x00000001);
165 } 63 }
166 64
167 if (nv_device(priv)->chipset >= 0x10 && 65 if (device->chipset >= 0x10 && device->chipset <= 0x40) {
168 nv_device(priv)->chipset <= 0x40) { 66 pvideo = nvkm_rd32(device, 0x8100);
169 pvideo = nv_rd32(priv, 0x8100);
170 if (pvideo & ~0x11) 67 if (pvideo & ~0x11)
171 nv_info(priv, "PVIDEO intr: %08x\n", pvideo); 68 nvkm_info(subdev, "PVIDEO intr: %08x\n", pvideo);
172 nv_wr32(priv, 0x8100, pvideo); 69 nvkm_wr32(device, 0x8100, pvideo);
173 } 70 }
174} 71}
175 72
176static int 73static const struct nvkm_disp_func
177nv04_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 74nv04_disp = {
178 struct nvkm_oclass *oclass, void *data, u32 size, 75 .intr = nv04_disp_intr,
179 struct nvkm_object **pobject) 76 .root = nv04_disp_root,
180{ 77 .head.vblank_init = nv04_disp_vblank_init,
181 struct nv04_disp_priv *priv; 78 .head.vblank_fini = nv04_disp_vblank_fini,
182 int ret; 79};
183
184 ret = nvkm_disp_create(parent, engine, oclass, 2, "DISPLAY",
185 "display", &priv);
186 *pobject = nv_object(priv);
187 if (ret)
188 return ret;
189 80
190 nv_engine(priv)->sclass = nv04_disp_sclass; 81int
191 nv_subdev(priv)->intr = nv04_disp_intr; 82nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
192 return 0; 83{
84 return nvkm_disp_new_(&nv04_disp, device, index, 2, pdisp);
193} 85}
194
195struct nvkm_oclass *
196nv04_disp_oclass = &(struct nvkm_disp_impl) {
197 .base.handle = NV_ENGINE(DISP, 0x04),
198 .base.ofuncs = &(struct nvkm_ofuncs) {
199 .ctor = nv04_disp_ctor,
200 .dtor = _nvkm_disp_dtor,
201 .init = _nvkm_disp_init,
202 .fini = _nvkm_disp_fini,
203 },
204 .vblank = &nv04_disp_vblank_func,
205}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 8ba808df24ad..32e73a975b58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -22,1291 +22,158 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "outp.h" 25#include "rootnv50.h"
26#include "outpdp.h"
27 26
28#include <core/client.h> 27#include <core/client.h>
29#include <core/device.h>
30#include <core/engctx.h>
31#include <core/enum.h> 28#include <core/enum.h>
32#include <core/handle.h> 29#include <core/gpuobj.h>
33#include <core/ramht.h>
34#include <engine/dmaobj.h>
35#include <subdev/bios.h> 30#include <subdev/bios.h>
36#include <subdev/bios/dcb.h>
37#include <subdev/bios/disp.h> 31#include <subdev/bios/disp.h>
38#include <subdev/bios/init.h> 32#include <subdev/bios/init.h>
39#include <subdev/bios/pll.h> 33#include <subdev/bios/pll.h>
40#include <subdev/devinit.h> 34#include <subdev/devinit.h>
41#include <subdev/fb.h>
42#include <subdev/timer.h>
43 35
44#include <nvif/class.h> 36static const struct nvkm_disp_oclass *
45#include <nvif/event.h> 37nv50_disp_root_(struct nvkm_disp *base)
46#include <nvif/unpack.h>
47
48/*******************************************************************************
49 * EVO channel base class
50 ******************************************************************************/
51
52static int
53nv50_disp_chan_create_(struct nvkm_object *parent,
54 struct nvkm_object *engine,
55 struct nvkm_oclass *oclass, int head,
56 int length, void **pobject)
57{
58 const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
59 struct nv50_disp_base *base = (void *)parent;
60 struct nv50_disp_chan *chan;
61 int chid = impl->chid + head;
62 int ret;
63
64 if (base->chan & (1 << chid))
65 return -EBUSY;
66 base->chan |= (1 << chid);
67
68 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
69 (1ULL << NVDEV_ENGINE_DMAOBJ),
70 length, pobject);
71 chan = *pobject;
72 if (ret)
73 return ret;
74 chan->chid = chid;
75
76 nv_parent(chan)->object_attach = impl->attach;
77 nv_parent(chan)->object_detach = impl->detach;
78 return 0;
79}
80
81static void
82nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
83{
84 struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
85 base->chan &= ~(1 << chan->chid);
86 nvkm_namedb_destroy(&chan->base);
87}
88
89static void
90nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
91{
92 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
93 nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
94 nv_wr32(priv, 0x610020, 0x00000001 << index);
95}
96
97static void
98nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
99{
100 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
101 nv_wr32(priv, 0x610020, 0x00000001 << index);
102 nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
103}
104
105void
106nv50_disp_chan_uevent_send(struct nv50_disp_priv *priv, int chid)
107{
108 struct nvif_notify_uevent_rep {
109 } rep;
110
111 nvkm_event_send(&priv->uevent, 1, chid, &rep, sizeof(rep));
112}
113
114int
115nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
116 struct nvkm_notify *notify)
117{
118 struct nv50_disp_dmac *dmac = (void *)object;
119 union {
120 struct nvif_notify_uevent_req none;
121 } *args = data;
122 int ret;
123
124 if (nvif_unvers(args->none)) {
125 notify->size = sizeof(struct nvif_notify_uevent_rep);
126 notify->types = 1;
127 notify->index = dmac->base.chid;
128 return 0;
129 }
130
131 return ret;
132}
133
134const struct nvkm_event_func
135nv50_disp_chan_uevent = {
136 .ctor = nv50_disp_chan_uevent_ctor,
137 .init = nv50_disp_chan_uevent_init,
138 .fini = nv50_disp_chan_uevent_fini,
139};
140
141int
142nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
143 struct nvkm_event **pevent)
144{
145 struct nv50_disp_priv *priv = (void *)object->engine;
146 switch (type) {
147 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
148 *pevent = &priv->uevent;
149 return 0;
150 default:
151 break;
152 }
153 return -EINVAL;
154}
155
156int
157nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
158{ 38{
159 struct nv50_disp_chan *chan = (void *)object; 39 return nv50_disp(base)->func->root;
160 *addr = nv_device_resource_start(nv_device(object), 0) +
161 0x640000 + (chan->chid * 0x1000);
162 *size = 0x001000;
163 return 0;
164} 40}
165 41
166u32
167nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr)
168{
169 struct nv50_disp_priv *priv = (void *)object->engine;
170 struct nv50_disp_chan *chan = (void *)object;
171 return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
172}
173
174void
175nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
176{
177 struct nv50_disp_priv *priv = (void *)object->engine;
178 struct nv50_disp_chan *chan = (void *)object;
179 nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
180}
181
182/*******************************************************************************
183 * EVO DMA channel base class
184 ******************************************************************************/
185
186static int 42static int
187nv50_disp_dmac_object_attach(struct nvkm_object *parent, 43nv50_disp_outp_internal_crt_(struct nvkm_disp *base, int index,
188 struct nvkm_object *object, u32 name) 44 struct dcb_output *dcb, struct nvkm_output **poutp)
189{
190 struct nv50_disp_base *base = (void *)parent->parent;
191 struct nv50_disp_chan *chan = (void *)parent;
192 u32 addr = nv_gpuobj(object)->node->offset;
193 u32 chid = chan->chid;
194 u32 data = (chid << 28) | (addr << 10) | chid;
195 return nvkm_ramht_insert(base->ramht, chid, name, data);
196}
197
198static void
199nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
200{ 45{
201 struct nv50_disp_base *base = (void *)parent->parent; 46 struct nv50_disp *disp = nv50_disp(base);
202 nvkm_ramht_remove(base->ramht, cookie); 47 return disp->func->outp.internal.crt(base, index, dcb, poutp);
203} 48}
204 49
205static int 50static int
206nv50_disp_dmac_create_(struct nvkm_object *parent, 51nv50_disp_outp_internal_tmds_(struct nvkm_disp *base, int index,
207 struct nvkm_object *engine, 52 struct dcb_output *dcb,
208 struct nvkm_oclass *oclass, u32 pushbuf, int head, 53 struct nvkm_output **poutp)
209 int length, void **pobject)
210{
211 struct nv50_disp_dmac *dmac;
212 int ret;
213
214 ret = nv50_disp_chan_create_(parent, engine, oclass, head,
215 length, pobject);
216 dmac = *pobject;
217 if (ret)
218 return ret;
219
220 dmac->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
221 if (!dmac->pushdma)
222 return -ENOENT;
223
224 switch (nv_mclass(dmac->pushdma)) {
225 case 0x0002:
226 case 0x003d:
227 if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
228 return -EINVAL;
229
230 switch (dmac->pushdma->target) {
231 case NV_MEM_TARGET_VRAM:
232 dmac->push = 0x00000001 | dmac->pushdma->start >> 8;
233 break;
234 case NV_MEM_TARGET_PCI_NOSNOOP:
235 dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
236 break;
237 default:
238 return -EINVAL;
239 }
240 break;
241 default:
242 return -EINVAL;
243 }
244
245 return 0;
246}
247
248void
249nv50_disp_dmac_dtor(struct nvkm_object *object)
250{ 54{
251 struct nv50_disp_dmac *dmac = (void *)object; 55 struct nv50_disp *disp = nv50_disp(base);
252 nvkm_object_ref(NULL, (struct nvkm_object **)&dmac->pushdma); 56 return disp->func->outp.internal.tmds(base, index, dcb, poutp);
253 nv50_disp_chan_destroy(&dmac->base);
254} 57}
255 58
256static int 59static int
257nv50_disp_dmac_init(struct nvkm_object *object) 60nv50_disp_outp_internal_lvds_(struct nvkm_disp *base, int index,
61 struct dcb_output *dcb,
62 struct nvkm_output **poutp)
258{ 63{
259 struct nv50_disp_priv *priv = (void *)object->engine; 64 struct nv50_disp *disp = nv50_disp(base);
260 struct nv50_disp_dmac *dmac = (void *)object; 65 return disp->func->outp.internal.lvds(base, index, dcb, poutp);
261 int chid = dmac->base.chid;
262 int ret;
263
264 ret = nv50_disp_chan_init(&dmac->base);
265 if (ret)
266 return ret;
267
268 /* enable error reporting */
269 nv_mask(priv, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
270
271 /* initialise channel for dma command submission */
272 nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
273 nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
274 nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
275 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
276 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
277 nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
278
279 /* wait for it to go inactive */
280 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
281 nv_error(dmac, "init timeout, 0x%08x\n",
282 nv_rd32(priv, 0x610200 + (chid * 0x10)));
283 return -EBUSY;
284 }
285
286 return 0;
287} 66}
288 67
289static int 68static int
290nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend) 69nv50_disp_outp_internal_dp_(struct nvkm_disp *base, int index,
70 struct dcb_output *dcb, struct nvkm_output **poutp)
291{ 71{
292 struct nv50_disp_priv *priv = (void *)object->engine; 72 struct nv50_disp *disp = nv50_disp(base);
293 struct nv50_disp_dmac *dmac = (void *)object; 73 if (disp->func->outp.internal.dp)
294 int chid = dmac->base.chid; 74 return disp->func->outp.internal.dp(base, index, dcb, poutp);
295 75 return -ENODEV;
296 /* deactivate channel */
297 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
298 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
299 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
300 nv_error(dmac, "fini timeout, 0x%08x\n",
301 nv_rd32(priv, 0x610200 + (chid * 0x10)));
302 if (suspend)
303 return -EBUSY;
304 }
305
306 /* disable error reporting and completion notifications */
307 nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
308
309 return nv50_disp_chan_fini(&dmac->base, suspend);
310}
311
312/*******************************************************************************
313 * EVO master channel object
314 ******************************************************************************/
315
316static void
317nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
318 const struct nv50_disp_mthd_list *list, int inst)
319{
320 struct nvkm_object *disp = nv_object(priv);
321 int i;
322
323 for (i = 0; list->data[i].mthd; i++) {
324 if (list->data[i].addr) {
325 u32 next = nv_rd32(priv, list->data[i].addr + base + 0);
326 u32 prev = nv_rd32(priv, list->data[i].addr + base + c);
327 u32 mthd = list->data[i].mthd + (list->mthd * inst);
328 const char *name = list->data[i].name;
329 char mods[16];
330
331 if (prev != next)
332 snprintf(mods, sizeof(mods), "-> 0x%08x", next);
333 else
334 snprintf(mods, sizeof(mods), "%13c", ' ');
335
336 nv_printk_(disp, debug, "\t0x%04x: 0x%08x %s%s%s\n",
337 mthd, prev, mods, name ? " // " : "",
338 name ? name : "");
339 }
340 }
341}
342
343void
344nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
345 const struct nv50_disp_mthd_chan *chan)
346{
347 struct nvkm_object *disp = nv_object(priv);
348 const struct nv50_disp_impl *impl = (void *)disp->oclass;
349 const struct nv50_disp_mthd_list *list;
350 int i, j;
351
352 if (debug > nv_subdev(priv)->debug)
353 return;
354
355 for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
356 u32 base = head * chan->addr;
357 for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
358 const char *cname = chan->name;
359 const char *sname = "";
360 char cname_[16], sname_[16];
361
362 if (chan->addr) {
363 snprintf(cname_, sizeof(cname_), "%s %d",
364 chan->name, head);
365 cname = cname_;
366 }
367
368 if (chan->data[i].nr > 1) {
369 snprintf(sname_, sizeof(sname_), " - %s %d",
370 chan->data[i].name, j);
371 sname = sname_;
372 }
373
374 nv_printk_(disp, debug, "%s%s:\n", cname, sname);
375 nv50_disp_mthd_list(priv, debug, base, impl->mthd.prev,
376 list, j);
377 }
378 }
379}
380
381const struct nv50_disp_mthd_list
382nv50_disp_core_mthd_base = {
383 .mthd = 0x0000,
384 .addr = 0x000000,
385 .data = {
386 { 0x0080, 0x000000 },
387 { 0x0084, 0x610bb8 },
388 { 0x0088, 0x610b9c },
389 { 0x008c, 0x000000 },
390 {}
391 }
392};
393
394static const struct nv50_disp_mthd_list
395nv50_disp_core_mthd_dac = {
396 .mthd = 0x0080,
397 .addr = 0x000008,
398 .data = {
399 { 0x0400, 0x610b58 },
400 { 0x0404, 0x610bdc },
401 { 0x0420, 0x610828 },
402 {}
403 }
404};
405
406const struct nv50_disp_mthd_list
407nv50_disp_core_mthd_sor = {
408 .mthd = 0x0040,
409 .addr = 0x000008,
410 .data = {
411 { 0x0600, 0x610b70 },
412 {}
413 }
414};
415
416const struct nv50_disp_mthd_list
417nv50_disp_core_mthd_pior = {
418 .mthd = 0x0040,
419 .addr = 0x000008,
420 .data = {
421 { 0x0700, 0x610b80 },
422 {}
423 }
424};
425
426static const struct nv50_disp_mthd_list
427nv50_disp_core_mthd_head = {
428 .mthd = 0x0400,
429 .addr = 0x000540,
430 .data = {
431 { 0x0800, 0x610ad8 },
432 { 0x0804, 0x610ad0 },
433 { 0x0808, 0x610a48 },
434 { 0x080c, 0x610a78 },
435 { 0x0810, 0x610ac0 },
436 { 0x0814, 0x610af8 },
437 { 0x0818, 0x610b00 },
438 { 0x081c, 0x610ae8 },
439 { 0x0820, 0x610af0 },
440 { 0x0824, 0x610b08 },
441 { 0x0828, 0x610b10 },
442 { 0x082c, 0x610a68 },
443 { 0x0830, 0x610a60 },
444 { 0x0834, 0x000000 },
445 { 0x0838, 0x610a40 },
446 { 0x0840, 0x610a24 },
447 { 0x0844, 0x610a2c },
448 { 0x0848, 0x610aa8 },
449 { 0x084c, 0x610ab0 },
450 { 0x0860, 0x610a84 },
451 { 0x0864, 0x610a90 },
452 { 0x0868, 0x610b18 },
453 { 0x086c, 0x610b20 },
454 { 0x0870, 0x610ac8 },
455 { 0x0874, 0x610a38 },
456 { 0x0880, 0x610a58 },
457 { 0x0884, 0x610a9c },
458 { 0x08a0, 0x610a70 },
459 { 0x08a4, 0x610a50 },
460 { 0x08a8, 0x610ae0 },
461 { 0x08c0, 0x610b28 },
462 { 0x08c4, 0x610b30 },
463 { 0x08c8, 0x610b40 },
464 { 0x08d4, 0x610b38 },
465 { 0x08d8, 0x610b48 },
466 { 0x08dc, 0x610b50 },
467 { 0x0900, 0x610a18 },
468 { 0x0904, 0x610ab8 },
469 {}
470 }
471};
472
473static const struct nv50_disp_mthd_chan
474nv50_disp_core_mthd_chan = {
475 .name = "Core",
476 .addr = 0x000000,
477 .data = {
478 { "Global", 1, &nv50_disp_core_mthd_base },
479 { "DAC", 3, &nv50_disp_core_mthd_dac },
480 { "SOR", 2, &nv50_disp_core_mthd_sor },
481 { "PIOR", 3, &nv50_disp_core_mthd_pior },
482 { "HEAD", 2, &nv50_disp_core_mthd_head },
483 {}
484 }
485};
486
487int
488nv50_disp_core_ctor(struct nvkm_object *parent,
489 struct nvkm_object *engine,
490 struct nvkm_oclass *oclass, void *data, u32 size,
491 struct nvkm_object **pobject)
492{
493 union {
494 struct nv50_disp_core_channel_dma_v0 v0;
495 } *args = data;
496 struct nv50_disp_dmac *mast;
497 int ret;
498
499 nv_ioctl(parent, "create disp core channel dma size %d\n", size);
500 if (nvif_unpack(args->v0, 0, 0, false)) {
501 nv_ioctl(parent, "create disp core channel dma vers %d "
502 "pushbuf %08x\n",
503 args->v0.version, args->v0.pushbuf);
504 } else
505 return ret;
506
507 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
508 0, sizeof(*mast), (void **)&mast);
509 *pobject = nv_object(mast);
510 if (ret)
511 return ret;
512
513 return 0;
514} 76}
515 77
516static int 78static int
517nv50_disp_core_init(struct nvkm_object *object) 79nv50_disp_outp_external_tmds_(struct nvkm_disp *base, int index,
80 struct dcb_output *dcb,
81 struct nvkm_output **poutp)
518{ 82{
519 struct nv50_disp_priv *priv = (void *)object->engine; 83 struct nv50_disp *disp = nv50_disp(base);
520 struct nv50_disp_dmac *mast = (void *)object; 84 if (disp->func->outp.external.tmds)
521 int ret; 85 return disp->func->outp.external.tmds(base, index, dcb, poutp);
522 86 return -ENODEV;
523 ret = nv50_disp_chan_init(&mast->base);
524 if (ret)
525 return ret;
526
527 /* enable error reporting */
528 nv_mask(priv, 0x610028, 0x00010000, 0x00010000);
529
530 /* attempt to unstick channel from some unknown state */
531 if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
532 nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
533 if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
534 nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
535
536 /* initialise channel for dma command submission */
537 nv_wr32(priv, 0x610204, mast->push);
538 nv_wr32(priv, 0x610208, 0x00010000);
539 nv_wr32(priv, 0x61020c, 0x00000000);
540 nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
541 nv_wr32(priv, 0x640000, 0x00000000);
542 nv_wr32(priv, 0x610200, 0x01000013);
543
544 /* wait for it to go inactive */
545 if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
546 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
547 return -EBUSY;
548 }
549
550 return 0;
551} 87}
552 88
553static int 89static int
554nv50_disp_core_fini(struct nvkm_object *object, bool suspend) 90nv50_disp_outp_external_dp_(struct nvkm_disp *base, int index,
555{ 91 struct dcb_output *dcb, struct nvkm_output **poutp)
556 struct nv50_disp_priv *priv = (void *)object->engine;
557 struct nv50_disp_dmac *mast = (void *)object;
558
559 /* deactivate channel */
560 nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
561 nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
562 if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
563 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
564 if (suspend)
565 return -EBUSY;
566 }
567
568 /* disable error reporting and completion notifications */
569 nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
570
571 return nv50_disp_chan_fini(&mast->base, suspend);
572}
573
574struct nv50_disp_chan_impl
575nv50_disp_core_ofuncs = {
576 .base.ctor = nv50_disp_core_ctor,
577 .base.dtor = nv50_disp_dmac_dtor,
578 .base.init = nv50_disp_core_init,
579 .base.fini = nv50_disp_core_fini,
580 .base.map = nv50_disp_chan_map,
581 .base.ntfy = nv50_disp_chan_ntfy,
582 .base.rd32 = nv50_disp_chan_rd32,
583 .base.wr32 = nv50_disp_chan_wr32,
584 .chid = 0,
585 .attach = nv50_disp_dmac_object_attach,
586 .detach = nv50_disp_dmac_object_detach,
587};
588
589/*******************************************************************************
590 * EVO sync channel objects
591 ******************************************************************************/
592
593static const struct nv50_disp_mthd_list
594nv50_disp_base_mthd_base = {
595 .mthd = 0x0000,
596 .addr = 0x000000,
597 .data = {
598 { 0x0080, 0x000000 },
599 { 0x0084, 0x0008c4 },
600 { 0x0088, 0x0008d0 },
601 { 0x008c, 0x0008dc },
602 { 0x0090, 0x0008e4 },
603 { 0x0094, 0x610884 },
604 { 0x00a0, 0x6108a0 },
605 { 0x00a4, 0x610878 },
606 { 0x00c0, 0x61086c },
607 { 0x00e0, 0x610858 },
608 { 0x00e4, 0x610860 },
609 { 0x00e8, 0x6108ac },
610 { 0x00ec, 0x6108b4 },
611 { 0x0100, 0x610894 },
612 { 0x0110, 0x6108bc },
613 { 0x0114, 0x61088c },
614 {}
615 }
616};
617
618const struct nv50_disp_mthd_list
619nv50_disp_base_mthd_image = {
620 .mthd = 0x0400,
621 .addr = 0x000000,
622 .data = {
623 { 0x0800, 0x6108f0 },
624 { 0x0804, 0x6108fc },
625 { 0x0808, 0x61090c },
626 { 0x080c, 0x610914 },
627 { 0x0810, 0x610904 },
628 {}
629 }
630};
631
632static const struct nv50_disp_mthd_chan
633nv50_disp_base_mthd_chan = {
634 .name = "Base",
635 .addr = 0x000540,
636 .data = {
637 { "Global", 1, &nv50_disp_base_mthd_base },
638 { "Image", 2, &nv50_disp_base_mthd_image },
639 {}
640 }
641};
642
643int
644nv50_disp_base_ctor(struct nvkm_object *parent,
645 struct nvkm_object *engine,
646 struct nvkm_oclass *oclass, void *data, u32 size,
647 struct nvkm_object **pobject)
648{ 92{
649 union { 93 struct nv50_disp *disp = nv50_disp(base);
650 struct nv50_disp_base_channel_dma_v0 v0; 94 if (disp->func->outp.external.dp)
651 } *args = data; 95 return disp->func->outp.external.dp(base, index, dcb, poutp);
652 struct nv50_disp_priv *priv = (void *)engine; 96 return -ENODEV;
653 struct nv50_disp_dmac *dmac;
654 int ret;
655
656 nv_ioctl(parent, "create disp base channel dma size %d\n", size);
657 if (nvif_unpack(args->v0, 0, 0, false)) {
658 nv_ioctl(parent, "create disp base channel dma vers %d "
659 "pushbuf %08x head %d\n",
660 args->v0.version, args->v0.pushbuf, args->v0.head);
661 if (args->v0.head > priv->head.nr)
662 return -EINVAL;
663 } else
664 return ret;
665
666 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
667 args->v0.head, sizeof(*dmac),
668 (void **)&dmac);
669 *pobject = nv_object(dmac);
670 if (ret)
671 return ret;
672
673 return 0;
674} 97}
675 98
676struct nv50_disp_chan_impl 99static void
677nv50_disp_base_ofuncs = { 100nv50_disp_vblank_fini_(struct nvkm_disp *base, int head)
678 .base.ctor = nv50_disp_base_ctor,
679 .base.dtor = nv50_disp_dmac_dtor,
680 .base.init = nv50_disp_dmac_init,
681 .base.fini = nv50_disp_dmac_fini,
682 .base.ntfy = nv50_disp_chan_ntfy,
683 .base.map = nv50_disp_chan_map,
684 .base.rd32 = nv50_disp_chan_rd32,
685 .base.wr32 = nv50_disp_chan_wr32,
686 .chid = 1,
687 .attach = nv50_disp_dmac_object_attach,
688 .detach = nv50_disp_dmac_object_detach,
689};
690
691/*******************************************************************************
692 * EVO overlay channel objects
693 ******************************************************************************/
694
695const struct nv50_disp_mthd_list
696nv50_disp_ovly_mthd_base = {
697 .mthd = 0x0000,
698 .addr = 0x000000,
699 .data = {
700 { 0x0080, 0x000000 },
701 { 0x0084, 0x0009a0 },
702 { 0x0088, 0x0009c0 },
703 { 0x008c, 0x0009c8 },
704 { 0x0090, 0x6109b4 },
705 { 0x0094, 0x610970 },
706 { 0x00a0, 0x610998 },
707 { 0x00a4, 0x610964 },
708 { 0x00c0, 0x610958 },
709 { 0x00e0, 0x6109a8 },
710 { 0x00e4, 0x6109d0 },
711 { 0x00e8, 0x6109d8 },
712 { 0x0100, 0x61094c },
713 { 0x0104, 0x610984 },
714 { 0x0108, 0x61098c },
715 { 0x0800, 0x6109f8 },
716 { 0x0808, 0x610a08 },
717 { 0x080c, 0x610a10 },
718 { 0x0810, 0x610a00 },
719 {}
720 }
721};
722
723static const struct nv50_disp_mthd_chan
724nv50_disp_ovly_mthd_chan = {
725 .name = "Overlay",
726 .addr = 0x000540,
727 .data = {
728 { "Global", 1, &nv50_disp_ovly_mthd_base },
729 {}
730 }
731};
732
733int
734nv50_disp_ovly_ctor(struct nvkm_object *parent,
735 struct nvkm_object *engine,
736 struct nvkm_oclass *oclass, void *data, u32 size,
737 struct nvkm_object **pobject)
738{
739 union {
740 struct nv50_disp_overlay_channel_dma_v0 v0;
741 } *args = data;
742 struct nv50_disp_priv *priv = (void *)engine;
743 struct nv50_disp_dmac *dmac;
744 int ret;
745
746 nv_ioctl(parent, "create disp overlay channel dma size %d\n", size);
747 if (nvif_unpack(args->v0, 0, 0, false)) {
748 nv_ioctl(parent, "create disp overlay channel dma vers %d "
749 "pushbuf %08x head %d\n",
750 args->v0.version, args->v0.pushbuf, args->v0.head);
751 if (args->v0.head > priv->head.nr)
752 return -EINVAL;
753 } else
754 return ret;
755
756 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
757 args->v0.head, sizeof(*dmac),
758 (void **)&dmac);
759 *pobject = nv_object(dmac);
760 if (ret)
761 return ret;
762
763 return 0;
764}
765
766struct nv50_disp_chan_impl
767nv50_disp_ovly_ofuncs = {
768 .base.ctor = nv50_disp_ovly_ctor,
769 .base.dtor = nv50_disp_dmac_dtor,
770 .base.init = nv50_disp_dmac_init,
771 .base.fini = nv50_disp_dmac_fini,
772 .base.ntfy = nv50_disp_chan_ntfy,
773 .base.map = nv50_disp_chan_map,
774 .base.rd32 = nv50_disp_chan_rd32,
775 .base.wr32 = nv50_disp_chan_wr32,
776 .chid = 3,
777 .attach = nv50_disp_dmac_object_attach,
778 .detach = nv50_disp_dmac_object_detach,
779};
780
781/*******************************************************************************
782 * EVO PIO channel base class
783 ******************************************************************************/
784
785static int
786nv50_disp_pioc_create_(struct nvkm_object *parent,
787 struct nvkm_object *engine,
788 struct nvkm_oclass *oclass, int head,
789 int length, void **pobject)
790{
791 return nv50_disp_chan_create_(parent, engine, oclass, head,
792 length, pobject);
793}
794
795void
796nv50_disp_pioc_dtor(struct nvkm_object *object)
797{
798 struct nv50_disp_pioc *pioc = (void *)object;
799 nv50_disp_chan_destroy(&pioc->base);
800}
801
802static int
803nv50_disp_pioc_init(struct nvkm_object *object)
804{ 101{
805 struct nv50_disp_priv *priv = (void *)object->engine; 102 struct nv50_disp *disp = nv50_disp(base);
806 struct nv50_disp_pioc *pioc = (void *)object; 103 disp->func->head.vblank_fini(disp, head);
807 int chid = pioc->base.chid;
808 int ret;
809
810 ret = nv50_disp_chan_init(&pioc->base);
811 if (ret)
812 return ret;
813
814 nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
815 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
816 nv_error(pioc, "timeout0: 0x%08x\n",
817 nv_rd32(priv, 0x610200 + (chid * 0x10)));
818 return -EBUSY;
819 }
820
821 nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
822 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
823 nv_error(pioc, "timeout1: 0x%08x\n",
824 nv_rd32(priv, 0x610200 + (chid * 0x10)));
825 return -EBUSY;
826 }
827
828 return 0;
829} 104}
830 105
831static int 106static void
832nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend) 107nv50_disp_vblank_init_(struct nvkm_disp *base, int head)
833{ 108{
834 struct nv50_disp_priv *priv = (void *)object->engine; 109 struct nv50_disp *disp = nv50_disp(base);
835 struct nv50_disp_pioc *pioc = (void *)object; 110 disp->func->head.vblank_init(disp, head);
836 int chid = pioc->base.chid;
837
838 nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
839 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
840 nv_error(pioc, "timeout: 0x%08x\n",
841 nv_rd32(priv, 0x610200 + (chid * 0x10)));
842 if (suspend)
843 return -EBUSY;
844 }
845
846 return nv50_disp_chan_fini(&pioc->base, suspend);
847} 111}
848 112
849/******************************************************************************* 113static void
850 * EVO immediate overlay channel objects 114nv50_disp_intr_(struct nvkm_disp *base)
851 ******************************************************************************/
852
853int
854nv50_disp_oimm_ctor(struct nvkm_object *parent,
855 struct nvkm_object *engine,
856 struct nvkm_oclass *oclass, void *data, u32 size,
857 struct nvkm_object **pobject)
858{ 115{
859 union { 116 struct nv50_disp *disp = nv50_disp(base);
860 struct nv50_disp_overlay_v0 v0; 117 disp->func->intr(disp);
861 } *args = data;
862 struct nv50_disp_priv *priv = (void *)engine;
863 struct nv50_disp_pioc *pioc;
864 int ret;
865
866 nv_ioctl(parent, "create disp overlay size %d\n", size);
867 if (nvif_unpack(args->v0, 0, 0, false)) {
868 nv_ioctl(parent, "create disp overlay vers %d head %d\n",
869 args->v0.version, args->v0.head);
870 if (args->v0.head > priv->head.nr)
871 return -EINVAL;
872 } else
873 return ret;
874
875 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
876 sizeof(*pioc), (void **)&pioc);
877 *pobject = nv_object(pioc);
878 if (ret)
879 return ret;
880
881 return 0;
882} 118}
883 119
884struct nv50_disp_chan_impl 120static void *
885nv50_disp_oimm_ofuncs = { 121nv50_disp_dtor_(struct nvkm_disp *base)
886 .base.ctor = nv50_disp_oimm_ctor,
887 .base.dtor = nv50_disp_pioc_dtor,
888 .base.init = nv50_disp_pioc_init,
889 .base.fini = nv50_disp_pioc_fini,
890 .base.ntfy = nv50_disp_chan_ntfy,
891 .base.map = nv50_disp_chan_map,
892 .base.rd32 = nv50_disp_chan_rd32,
893 .base.wr32 = nv50_disp_chan_wr32,
894 .chid = 5,
895};
896
897/*******************************************************************************
898 * EVO cursor channel objects
899 ******************************************************************************/
900
901int
902nv50_disp_curs_ctor(struct nvkm_object *parent,
903 struct nvkm_object *engine,
904 struct nvkm_oclass *oclass, void *data, u32 size,
905 struct nvkm_object **pobject)
906{ 122{
907 union { 123 struct nv50_disp *disp = nv50_disp(base);
908 struct nv50_disp_cursor_v0 v0; 124 nvkm_event_fini(&disp->uevent);
909 } *args = data; 125 return disp;
910 struct nv50_disp_priv *priv = (void *)engine;
911 struct nv50_disp_pioc *pioc;
912 int ret;
913
914 nv_ioctl(parent, "create disp cursor size %d\n", size);
915 if (nvif_unpack(args->v0, 0, 0, false)) {
916 nv_ioctl(parent, "create disp cursor vers %d head %d\n",
917 args->v0.version, args->v0.head);
918 if (args->v0.head > priv->head.nr)
919 return -EINVAL;
920 } else
921 return ret;
922
923 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
924 sizeof(*pioc), (void **)&pioc);
925 *pobject = nv_object(pioc);
926 if (ret)
927 return ret;
928
929 return 0;
930} 126}
931 127
932struct nv50_disp_chan_impl 128static const struct nvkm_disp_func
933nv50_disp_curs_ofuncs = { 129nv50_disp_ = {
934 .base.ctor = nv50_disp_curs_ctor, 130 .dtor = nv50_disp_dtor_,
935 .base.dtor = nv50_disp_pioc_dtor, 131 .intr = nv50_disp_intr_,
936 .base.init = nv50_disp_pioc_init, 132 .root = nv50_disp_root_,
937 .base.fini = nv50_disp_pioc_fini, 133 .outp.internal.crt = nv50_disp_outp_internal_crt_,
938 .base.ntfy = nv50_disp_chan_ntfy, 134 .outp.internal.tmds = nv50_disp_outp_internal_tmds_,
939 .base.map = nv50_disp_chan_map, 135 .outp.internal.lvds = nv50_disp_outp_internal_lvds_,
940 .base.rd32 = nv50_disp_chan_rd32, 136 .outp.internal.dp = nv50_disp_outp_internal_dp_,
941 .base.wr32 = nv50_disp_chan_wr32, 137 .outp.external.tmds = nv50_disp_outp_external_tmds_,
942 .chid = 7, 138 .outp.external.dp = nv50_disp_outp_external_dp_,
139 .head.vblank_init = nv50_disp_vblank_init_,
140 .head.vblank_fini = nv50_disp_vblank_fini_,
943}; 141};
944 142
945/*******************************************************************************
946 * Base display object
947 ******************************************************************************/
948
949int 143int
950nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0) 144nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
145 int index, int heads, struct nvkm_disp **pdisp)
951{ 146{
952 const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540)); 147 struct nv50_disp *disp;
953 const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
954 const u32 total = nv_rd32(priv, 0x610afc + (head * 0x540));
955 union {
956 struct nv04_disp_scanoutpos_v0 v0;
957 } *args = data;
958 int ret; 148 int ret;
959 149
960 nv_ioctl(object, "disp scanoutpos size %d\n", size); 150 if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
961 if (nvif_unpack(args->v0, 0, 0, false)) { 151 return -ENOMEM;
962 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); 152 INIT_WORK(&disp->supervisor, func->super);
963 args->v0.vblanke = (blanke & 0xffff0000) >> 16; 153 disp->func = func;
964 args->v0.hblanke = (blanke & 0x0000ffff); 154 *pdisp = &disp->base;
965 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
966 args->v0.hblanks = (blanks & 0x0000ffff);
967 args->v0.vtotal = ( total & 0xffff0000) >> 16;
968 args->v0.htotal = ( total & 0x0000ffff);
969 args->v0.time[0] = ktime_to_ns(ktime_get());
970 args->v0.vline = /* vline read locks hline */
971 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
972 args->v0.time[1] = ktime_to_ns(ktime_get());
973 args->v0.hline =
974 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
975 } else
976 return ret;
977
978 return 0;
979}
980
981int
982nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
983{
984 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
985 union {
986 struct nv50_disp_mthd_v0 v0;
987 struct nv50_disp_mthd_v1 v1;
988 } *args = data;
989 struct nv50_disp_priv *priv = (void *)object->engine;
990 struct nvkm_output *outp = NULL;
991 struct nvkm_output *temp;
992 u16 type, mask = 0;
993 int head, ret;
994
995 if (mthd != NV50_DISP_MTHD)
996 return -EINVAL;
997
998 nv_ioctl(object, "disp mthd size %d\n", size);
999 if (nvif_unpack(args->v0, 0, 0, true)) {
1000 nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
1001 args->v0.version, args->v0.method, args->v0.head);
1002 mthd = args->v0.method;
1003 head = args->v0.head;
1004 } else
1005 if (nvif_unpack(args->v1, 1, 1, true)) {
1006 nv_ioctl(object, "disp mthd vers %d mthd %02x "
1007 "type %04x mask %04x\n",
1008 args->v1.version, args->v1.method,
1009 args->v1.hasht, args->v1.hashm);
1010 mthd = args->v1.method;
1011 type = args->v1.hasht;
1012 mask = args->v1.hashm;
1013 head = ffs((mask >> 8) & 0x0f) - 1;
1014 } else
1015 return ret;
1016
1017 if (head < 0 || head >= priv->head.nr)
1018 return -ENXIO;
1019
1020 if (mask) {
1021 list_for_each_entry(temp, &priv->base.outp, head) {
1022 if ((temp->info.hasht == type) &&
1023 (temp->info.hashm & mask) == mask) {
1024 outp = temp;
1025 break;
1026 }
1027 }
1028 if (outp == NULL)
1029 return -ENXIO;
1030 }
1031
1032 switch (mthd) {
1033 case NV50_DISP_SCANOUTPOS:
1034 return impl->head.scanoutpos(object, priv, data, size, head);
1035 default:
1036 break;
1037 }
1038
1039 switch (mthd * !!outp) {
1040 case NV50_DISP_MTHD_V1_DAC_PWR:
1041 return priv->dac.power(object, priv, data, size, head, outp);
1042 case NV50_DISP_MTHD_V1_DAC_LOAD:
1043 return priv->dac.sense(object, priv, data, size, head, outp);
1044 case NV50_DISP_MTHD_V1_SOR_PWR:
1045 return priv->sor.power(object, priv, data, size, head, outp);
1046 case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
1047 if (!priv->sor.hda_eld)
1048 return -ENODEV;
1049 return priv->sor.hda_eld(object, priv, data, size, head, outp);
1050 case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
1051 if (!priv->sor.hdmi)
1052 return -ENODEV;
1053 return priv->sor.hdmi(object, priv, data, size, head, outp);
1054 case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
1055 union {
1056 struct nv50_disp_sor_lvds_script_v0 v0;
1057 } *args = data;
1058 nv_ioctl(object, "disp sor lvds script size %d\n", size);
1059 if (nvif_unpack(args->v0, 0, 0, false)) {
1060 nv_ioctl(object, "disp sor lvds script "
1061 "vers %d name %04x\n",
1062 args->v0.version, args->v0.script);
1063 priv->sor.lvdsconf = args->v0.script;
1064 return 0;
1065 } else
1066 return ret;
1067 }
1068 break;
1069 case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
1070 struct nvkm_output_dp *outpdp = (void *)outp;
1071 union {
1072 struct nv50_disp_sor_dp_pwr_v0 v0;
1073 } *args = data;
1074 nv_ioctl(object, "disp sor dp pwr size %d\n", size);
1075 if (nvif_unpack(args->v0, 0, 0, false)) {
1076 nv_ioctl(object, "disp sor dp pwr vers %d state %d\n",
1077 args->v0.version, args->v0.state);
1078 if (args->v0.state == 0) {
1079 nvkm_notify_put(&outpdp->irq);
1080 ((struct nvkm_output_dp_impl *)nv_oclass(outp))
1081 ->lnk_pwr(outpdp, 0);
1082 atomic_set(&outpdp->lt.done, 0);
1083 return 0;
1084 } else
1085 if (args->v0.state != 0) {
1086 nvkm_output_dp_train(&outpdp->base, 0, true);
1087 return 0;
1088 }
1089 } else
1090 return ret;
1091 }
1092 break;
1093 case NV50_DISP_MTHD_V1_PIOR_PWR:
1094 if (!priv->pior.power)
1095 return -ENODEV;
1096 return priv->pior.power(object, priv, data, size, head, outp);
1097 default:
1098 break;
1099 }
1100
1101 return -EINVAL;
1102}
1103
1104int
1105nv50_disp_main_ctor(struct nvkm_object *parent,
1106 struct nvkm_object *engine,
1107 struct nvkm_oclass *oclass, void *data, u32 size,
1108 struct nvkm_object **pobject)
1109{
1110 struct nv50_disp_priv *priv = (void *)engine;
1111 struct nv50_disp_base *base;
1112 int ret;
1113 155
1114 ret = nvkm_parent_create(parent, engine, oclass, 0, 156 ret = nvkm_disp_ctor(&nv50_disp_, device, index, heads, &disp->base);
1115 priv->sclass, 0, &base);
1116 *pobject = nv_object(base);
1117 if (ret) 157 if (ret)
1118 return ret; 158 return ret;
1119 159
1120 return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, 160 return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent);
1121 &base->ramht);
1122} 161}
1123 162
1124void 163void
1125nv50_disp_main_dtor(struct nvkm_object *object) 164nv50_disp_vblank_fini(struct nv50_disp *disp, int head)
1126{
1127 struct nv50_disp_base *base = (void *)object;
1128 nvkm_ramht_ref(NULL, &base->ramht);
1129 nvkm_parent_destroy(&base->base);
1130}
1131
1132static int
1133nv50_disp_main_init(struct nvkm_object *object)
1134{
1135 struct nv50_disp_priv *priv = (void *)object->engine;
1136 struct nv50_disp_base *base = (void *)object;
1137 int ret, i;
1138 u32 tmp;
1139
1140 ret = nvkm_parent_init(&base->base);
1141 if (ret)
1142 return ret;
1143
1144 /* The below segments of code copying values from one register to
1145 * another appear to inform EVO of the display capabilities or
1146 * something similar. NFI what the 0x614004 caps are for..
1147 */
1148 tmp = nv_rd32(priv, 0x614004);
1149 nv_wr32(priv, 0x610184, tmp);
1150
1151 /* ... CRTC caps */
1152 for (i = 0; i < priv->head.nr; i++) {
1153 tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
1154 nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
1155 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
1156 nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
1157 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
1158 nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
1159 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
1160 nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
1161 }
1162
1163 /* ... DAC caps */
1164 for (i = 0; i < priv->dac.nr; i++) {
1165 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
1166 nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
1167 }
1168
1169 /* ... SOR caps */
1170 for (i = 0; i < priv->sor.nr; i++) {
1171 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
1172 nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
1173 }
1174
1175 /* ... PIOR caps */
1176 for (i = 0; i < priv->pior.nr; i++) {
1177 tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
1178 nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
1179 }
1180
1181 /* steal display away from vbios, or something like that */
1182 if (nv_rd32(priv, 0x610024) & 0x00000100) {
1183 nv_wr32(priv, 0x610024, 0x00000100);
1184 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
1185 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
1186 nv_error(priv, "timeout acquiring display\n");
1187 return -EBUSY;
1188 }
1189 }
1190
1191 /* point at display engine memory area (hash table, objects) */
1192 nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
1193
1194 /* enable supervisor interrupts, disable everything else */
1195 nv_wr32(priv, 0x61002c, 0x00000370);
1196 nv_wr32(priv, 0x610028, 0x00000000);
1197 return 0;
1198}
1199
1200static int
1201nv50_disp_main_fini(struct nvkm_object *object, bool suspend)
1202{
1203 struct nv50_disp_priv *priv = (void *)object->engine;
1204 struct nv50_disp_base *base = (void *)object;
1205
1206 /* disable all interrupts */
1207 nv_wr32(priv, 0x610024, 0x00000000);
1208 nv_wr32(priv, 0x610020, 0x00000000);
1209
1210 return nvkm_parent_fini(&base->base, suspend);
1211}
1212
1213struct nvkm_ofuncs
1214nv50_disp_main_ofuncs = {
1215 .ctor = nv50_disp_main_ctor,
1216 .dtor = nv50_disp_main_dtor,
1217 .init = nv50_disp_main_init,
1218 .fini = nv50_disp_main_fini,
1219 .mthd = nv50_disp_main_mthd,
1220 .ntfy = nvkm_disp_ntfy,
1221};
1222
1223static struct nvkm_oclass
1224nv50_disp_main_oclass[] = {
1225 { NV50_DISP, &nv50_disp_main_ofuncs },
1226 {}
1227};
1228
1229static struct nvkm_oclass
1230nv50_disp_sclass[] = {
1231 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
1232 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
1233 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
1234 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
1235 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
1236 {}
1237};
1238
1239/*******************************************************************************
1240 * Display context, tracks instmem allocation and prevents more than one
1241 * client using the display hardware at any time.
1242 ******************************************************************************/
1243
1244static int
1245nv50_disp_data_ctor(struct nvkm_object *parent,
1246 struct nvkm_object *engine,
1247 struct nvkm_oclass *oclass, void *data, u32 size,
1248 struct nvkm_object **pobject)
1249{
1250 struct nv50_disp_priv *priv = (void *)engine;
1251 struct nvkm_engctx *ectx;
1252 int ret = -EBUSY;
1253
1254 /* no context needed for channel objects... */
1255 if (nv_mclass(parent) != NV_DEVICE) {
1256 atomic_inc(&parent->refcount);
1257 *pobject = parent;
1258 return 1;
1259 }
1260
1261 /* allocate display hardware to client */
1262 mutex_lock(&nv_subdev(priv)->mutex);
1263 if (list_empty(&nv_engine(priv)->contexts)) {
1264 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000,
1265 0x10000, NVOBJ_FLAG_HEAP, &ectx);
1266 *pobject = nv_object(ectx);
1267 }
1268 mutex_unlock(&nv_subdev(priv)->mutex);
1269 return ret;
1270}
1271
1272struct nvkm_oclass
1273nv50_disp_cclass = {
1274 .handle = NV_ENGCTX(DISP, 0x50),
1275 .ofuncs = &(struct nvkm_ofuncs) {
1276 .ctor = nv50_disp_data_ctor,
1277 .dtor = _nvkm_engctx_dtor,
1278 .init = _nvkm_engctx_init,
1279 .fini = _nvkm_engctx_fini,
1280 .rd32 = _nvkm_engctx_rd32,
1281 .wr32 = _nvkm_engctx_wr32,
1282 },
1283};
1284
1285/*******************************************************************************
1286 * Display engine implementation
1287 ******************************************************************************/
1288
1289static void
1290nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
1291{ 165{
1292 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 166 struct nvkm_device *device = disp->base.engine.subdev.device;
1293 nv_mask(disp, 0x61002c, (4 << head), 0); 167 nvkm_mask(device, 0x61002c, (4 << head), 0);
1294} 168}
1295 169
1296static void 170void
1297nv50_disp_vblank_init(struct nvkm_event *event, int type, int head) 171nv50_disp_vblank_init(struct nv50_disp *disp, int head)
1298{ 172{
1299 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank); 173 struct nvkm_device *device = disp->base.engine.subdev.device;
1300 nv_mask(disp, 0x61002c, (4 << head), (4 << head)); 174 nvkm_mask(device, 0x61002c, (4 << head), (4 << head));
1301} 175}
1302 176
1303const struct nvkm_event_func
1304nv50_disp_vblank_func = {
1305 .ctor = nvkm_disp_vblank_ctor,
1306 .init = nv50_disp_vblank_init,
1307 .fini = nv50_disp_vblank_fini,
1308};
1309
1310static const struct nvkm_enum 177static const struct nvkm_enum
1311nv50_disp_intr_error_type[] = { 178nv50_disp_intr_error_type[] = {
1312 { 3, "ILLEGAL_MTHD" }, 179 { 3, "ILLEGAL_MTHD" },
@@ -1323,70 +190,46 @@ nv50_disp_intr_error_code[] = {
1323}; 190};
1324 191
1325static void 192static void
1326nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid) 193nv50_disp_intr_error(struct nv50_disp *disp, int chid)
1327{ 194{
1328 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass; 195 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
1329 u32 data = nv_rd32(priv, 0x610084 + (chid * 0x08)); 196 struct nvkm_device *device = subdev->device;
1330 u32 addr = nv_rd32(priv, 0x610080 + (chid * 0x08)); 197 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
198 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
1331 u32 code = (addr & 0x00ff0000) >> 16; 199 u32 code = (addr & 0x00ff0000) >> 16;
1332 u32 type = (addr & 0x00007000) >> 12; 200 u32 type = (addr & 0x00007000) >> 12;
1333 u32 mthd = (addr & 0x00000ffc); 201 u32 mthd = (addr & 0x00000ffc);
1334 const struct nvkm_enum *ec, *et; 202 const struct nvkm_enum *ec, *et;
1335 char ecunk[6], etunk[6];
1336 203
1337 et = nvkm_enum_find(nv50_disp_intr_error_type, type); 204 et = nvkm_enum_find(nv50_disp_intr_error_type, type);
1338 if (!et)
1339 snprintf(etunk, sizeof(etunk), "UNK%02X", type);
1340
1341 ec = nvkm_enum_find(nv50_disp_intr_error_code, code); 205 ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
1342 if (!ec)
1343 snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
1344 206
1345 nv_error(priv, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n", 207 nvkm_error(subdev,
1346 et ? et->name : etunk, ec ? ec->name : ecunk, 208 "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
1347 chid, mthd, data); 209 type, et ? et->name : "", code, ec ? ec->name : "",
210 chid, mthd, data);
1348 211
1349 if (chid == 0) { 212 if (chid < ARRAY_SIZE(disp->chan)) {
1350 switch (mthd) {
1351 case 0x0080:
1352 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1353 impl->mthd.core);
1354 break;
1355 default:
1356 break;
1357 }
1358 } else
1359 if (chid <= 2) {
1360 switch (mthd) {
1361 case 0x0080:
1362 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1363 impl->mthd.base);
1364 break;
1365 default:
1366 break;
1367 }
1368 } else
1369 if (chid <= 4) {
1370 switch (mthd) { 213 switch (mthd) {
1371 case 0x0080: 214 case 0x0080:
1372 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 3, 215 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
1373 impl->mthd.ovly);
1374 break; 216 break;
1375 default: 217 default:
1376 break; 218 break;
1377 } 219 }
1378 } 220 }
1379 221
1380 nv_wr32(priv, 0x610020, 0x00010000 << chid); 222 nvkm_wr32(device, 0x610020, 0x00010000 << chid);
1381 nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000); 223 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
1382} 224}
1383 225
1384static struct nvkm_output * 226static struct nvkm_output *
1385exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, 227exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
1386 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 228 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
1387 struct nvbios_outp *info) 229 struct nvbios_outp *info)
1388{ 230{
1389 struct nvkm_bios *bios = nvkm_bios(priv); 231 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
232 struct nvkm_bios *bios = subdev->device->bios;
1390 struct nvkm_output *outp; 233 struct nvkm_output *outp;
1391 u16 mask, type; 234 u16 mask, type;
1392 235
@@ -1403,7 +246,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
1403 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; 246 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
1404 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 247 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
1405 default: 248 default:
1406 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 249 nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
1407 return NULL; 250 return NULL;
1408 } 251 }
1409 or -= 4; 252 or -= 4;
@@ -1412,9 +255,9 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
1412 type = 0x0010; 255 type = 0x0010;
1413 mask = 0; 256 mask = 0;
1414 switch (ctrl & 0x00000f00) { 257 switch (ctrl & 0x00000f00) {
1415 case 0x00000000: type |= priv->pior.type[or]; break; 258 case 0x00000000: type |= disp->pior.type[or]; break;
1416 default: 259 default:
1417 nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl); 260 nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl);
1418 return NULL; 261 return NULL;
1419 } 262 }
1420 } 263 }
@@ -1423,7 +266,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
1423 mask |= 0x0001 << or; 266 mask |= 0x0001 << or;
1424 mask |= 0x0100 << head; 267 mask |= 0x0100 << head;
1425 268
1426 list_for_each_entry(outp, &priv->base.outp, head) { 269 list_for_each_entry(outp, &disp->base.outp, head) {
1427 if ((outp->info.hasht & 0xff) == type && 270 if ((outp->info.hasht & 0xff) == type &&
1428 (outp->info.hashm & mask) == mask) { 271 (outp->info.hashm & mask) == mask) {
1429 *data = nvbios_outp_match(bios, outp->info.hasht, 272 *data = nvbios_outp_match(bios, outp->info.hasht,
@@ -1439,9 +282,11 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
1439} 282}
1440 283
1441static struct nvkm_output * 284static struct nvkm_output *
1442exec_script(struct nv50_disp_priv *priv, int head, int id) 285exec_script(struct nv50_disp *disp, int head, int id)
1443{ 286{
1444 struct nvkm_bios *bios = nvkm_bios(priv); 287 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
288 struct nvkm_device *device = subdev->device;
289 struct nvkm_bios *bios = device->bios;
1445 struct nvkm_output *outp; 290 struct nvkm_output *outp;
1446 struct nvbios_outp info; 291 struct nvbios_outp info;
1447 u8 ver, hdr, cnt, len; 292 u8 ver, hdr, cnt, len;
@@ -1450,27 +295,27 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
1450 int i; 295 int i;
1451 296
1452 /* DAC */ 297 /* DAC */
1453 for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++) 298 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
1454 ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); 299 ctrl = nvkm_rd32(device, 0x610b5c + (i * 8));
1455 300
1456 /* SOR */ 301 /* SOR */
1457 if (!(ctrl & (1 << head))) { 302 if (!(ctrl & (1 << head))) {
1458 if (nv_device(priv)->chipset < 0x90 || 303 if (device->chipset < 0x90 ||
1459 nv_device(priv)->chipset == 0x92 || 304 device->chipset == 0x92 ||
1460 nv_device(priv)->chipset == 0xa0) { 305 device->chipset == 0xa0) {
1461 reg = 0x610b74; 306 reg = 0x610b74;
1462 } else { 307 } else {
1463 reg = 0x610798; 308 reg = 0x610798;
1464 } 309 }
1465 for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++) 310 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
1466 ctrl = nv_rd32(priv, reg + (i * 8)); 311 ctrl = nvkm_rd32(device, reg + (i * 8));
1467 i += 4; 312 i += 4;
1468 } 313 }
1469 314
1470 /* PIOR */ 315 /* PIOR */
1471 if (!(ctrl & (1 << head))) { 316 if (!(ctrl & (1 << head))) {
1472 for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++) 317 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
1473 ctrl = nv_rd32(priv, 0x610b84 + (i * 8)); 318 ctrl = nvkm_rd32(device, 0x610b84 + (i * 8));
1474 i += 8; 319 i += 8;
1475 } 320 }
1476 321
@@ -1478,10 +323,10 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
1478 return NULL; 323 return NULL;
1479 i--; 324 i--;
1480 325
1481 outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info); 326 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
1482 if (outp) { 327 if (outp) {
1483 struct nvbios_init init = { 328 struct nvbios_init init = {
1484 .subdev = nv_subdev(priv), 329 .subdev = subdev,
1485 .bios = bios, 330 .bios = bios,
1486 .offset = info.script[id], 331 .offset = info.script[id],
1487 .outp = &outp->info, 332 .outp = &outp->info,
@@ -1496,9 +341,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
1496} 341}
1497 342
1498static struct nvkm_output * 343static struct nvkm_output *
1499exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf) 344exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
1500{ 345{
1501 struct nvkm_bios *bios = nvkm_bios(priv); 346 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
347 struct nvkm_device *device = subdev->device;
348 struct nvkm_bios *bios = device->bios;
1502 struct nvkm_output *outp; 349 struct nvkm_output *outp;
1503 struct nvbios_outp info1; 350 struct nvbios_outp info1;
1504 struct nvbios_ocfg info2; 351 struct nvbios_ocfg info2;
@@ -1508,27 +355,27 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
1508 int i; 355 int i;
1509 356
1510 /* DAC */ 357 /* DAC */
1511 for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++) 358 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
1512 ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); 359 ctrl = nvkm_rd32(device, 0x610b58 + (i * 8));
1513 360
1514 /* SOR */ 361 /* SOR */
1515 if (!(ctrl & (1 << head))) { 362 if (!(ctrl & (1 << head))) {
1516 if (nv_device(priv)->chipset < 0x90 || 363 if (device->chipset < 0x90 ||
1517 nv_device(priv)->chipset == 0x92 || 364 device->chipset == 0x92 ||
1518 nv_device(priv)->chipset == 0xa0) { 365 device->chipset == 0xa0) {
1519 reg = 0x610b70; 366 reg = 0x610b70;
1520 } else { 367 } else {
1521 reg = 0x610794; 368 reg = 0x610794;
1522 } 369 }
1523 for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++) 370 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
1524 ctrl = nv_rd32(priv, reg + (i * 8)); 371 ctrl = nvkm_rd32(device, reg + (i * 8));
1525 i += 4; 372 i += 4;
1526 } 373 }
1527 374
1528 /* PIOR */ 375 /* PIOR */
1529 if (!(ctrl & (1 << head))) { 376 if (!(ctrl & (1 << head))) {
1530 for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++) 377 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
1531 ctrl = nv_rd32(priv, 0x610b80 + (i * 8)); 378 ctrl = nvkm_rd32(device, 0x610b80 + (i * 8));
1532 i += 8; 379 i += 8;
1533 } 380 }
1534 381
@@ -1536,7 +383,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
1536 return NULL; 383 return NULL;
1537 i--; 384 i--;
1538 385
1539 outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1); 386 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
1540 if (!outp) 387 if (!outp)
1541 return NULL; 388 return NULL;
1542 389
@@ -1548,7 +395,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
1548 *conf |= 0x0100; 395 *conf |= 0x0100;
1549 break; 396 break;
1550 case DCB_OUTPUT_LVDS: 397 case DCB_OUTPUT_LVDS:
1551 *conf = priv->sor.lvdsconf; 398 *conf = disp->sor.lvdsconf;
1552 break; 399 break;
1553 case DCB_OUTPUT_DP: 400 case DCB_OUTPUT_DP:
1554 *conf = (ctrl & 0x00000f00) >> 8; 401 *conf = (ctrl & 0x00000f00) >> 8;
@@ -1568,7 +415,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
1568 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 415 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
1569 if (data) { 416 if (data) {
1570 struct nvbios_init init = { 417 struct nvbios_init init = {
1571 .subdev = nv_subdev(priv), 418 .subdev = subdev,
1572 .bios = bios, 419 .bios = bios,
1573 .offset = data, 420 .offset = data,
1574 .outp = &outp->info, 421 .outp = &outp->info,
@@ -1584,15 +431,16 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
1584} 431}
1585 432
1586static void 433static void
1587nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head) 434nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
1588{ 435{
1589 exec_script(priv, head, 1); 436 exec_script(disp, head, 1);
1590} 437}
1591 438
1592static void 439static void
1593nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head) 440nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
1594{ 441{
1595 struct nvkm_output *outp = exec_script(priv, head, 2); 442 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
443 struct nvkm_output *outp = exec_script(disp, head, 2);
1596 444
1597 /* the binary driver does this outside of the supervisor handling 445 /* the binary driver does this outside of the supervisor handling
1598 * (after the third supervisor from a detach). we (currently?) 446 * (after the third supervisor from a detach). we (currently?)
@@ -1608,10 +456,10 @@ nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
1608 * in a blank screen (SOR_PWR off/on can restore it) 456 * in a blank screen (SOR_PWR off/on can restore it)
1609 */ 457 */
1610 if (outp && outp->info.type == DCB_OUTPUT_DP) { 458 if (outp && outp->info.type == DCB_OUTPUT_DP) {
1611 struct nvkm_output_dp *outpdp = (void *)outp; 459 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
1612 struct nvbios_init init = { 460 struct nvbios_init init = {
1613 .subdev = nv_subdev(priv), 461 .subdev = subdev,
1614 .bios = nvkm_bios(priv), 462 .bios = subdev->device->bios,
1615 .outp = &outp->info, 463 .outp = &outp->info,
1616 .crtc = head, 464 .crtc = head,
1617 .offset = outpdp->info.script[4], 465 .offset = outpdp->info.script[4],
@@ -1624,29 +472,32 @@ nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
1624} 472}
1625 473
1626static void 474static void
1627nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head) 475nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head)
1628{ 476{
1629 struct nvkm_devinit *devinit = nvkm_devinit(priv); 477 struct nvkm_device *device = disp->base.engine.subdev.device;
1630 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; 478 struct nvkm_devinit *devinit = device->devinit;
479 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1631 if (pclk) 480 if (pclk)
1632 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk); 481 nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
1633} 482}
1634 483
1635static void 484static void
1636nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head, 485nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head,
1637 struct dcb_output *outp, u32 pclk) 486 struct dcb_output *outp, u32 pclk)
1638{ 487{
488 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
489 struct nvkm_device *device = subdev->device;
1639 const int link = !(outp->sorconf.link & 1); 490 const int link = !(outp->sorconf.link & 1);
1640 const int or = ffs(outp->or) - 1; 491 const int or = ffs(outp->or) - 1;
1641 const u32 soff = ( or * 0x800); 492 const u32 soff = ( or * 0x800);
1642 const u32 loff = (link * 0x080) + soff; 493 const u32 loff = (link * 0x080) + soff;
1643 const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8)); 494 const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8));
1644 const u32 symbol = 100000; 495 const u32 symbol = 100000;
1645 const s32 vactive = nv_rd32(priv, 0x610af8 + (head * 0x540)) & 0xffff; 496 const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff;
1646 const s32 vblanke = nv_rd32(priv, 0x610ae8 + (head * 0x540)) & 0xffff; 497 const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff;
1647 const s32 vblanks = nv_rd32(priv, 0x610af0 + (head * 0x540)) & 0xffff; 498 const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff;
1648 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff); 499 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
1649 u32 clksor = nv_rd32(priv, 0x614300 + soff); 500 u32 clksor = nvkm_rd32(device, 0x614300 + soff);
1650 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; 501 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
1651 int TU, VTUi, VTUf, VTUa; 502 int TU, VTUi, VTUf, VTUa;
1652 u64 link_data_rate, link_ratio, unk; 503 u64 link_data_rate, link_ratio, unk;
@@ -1662,14 +513,14 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
1662 value = value * link_bw; 513 value = value * link_bw;
1663 do_div(value, pclk); 514 do_div(value, pclk);
1664 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr); 515 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
1665 nv_mask(priv, 0x61c1e8 + soff, 0x0000ffff, value); 516 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value);
1666 517
1667 /* symbols/vblank - algorithm taken from comments in tegra driver */ 518 /* symbols/vblank - algorithm taken from comments in tegra driver */
1668 value = vblanks - vblanke - 25; 519 value = vblanks - vblanke - 25;
1669 value = value * link_bw; 520 value = value * link_bw;
1670 do_div(value, pclk); 521 do_div(value, pclk);
1671 value = value - ((36 / link_nr) + 3) - 1; 522 value = value - ((36 / link_nr) + 3) - 1;
1672 nv_mask(priv, 0x61c1ec + soff, 0x00ffffff, value); 523 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value);
1673 524
1674 /* watermark / activesym */ 525 /* watermark / activesym */
1675 if ((ctrl & 0xf0000) == 0x60000) bits = 30; 526 if ((ctrl & 0xf0000) == 0x60000) bits = 30;
@@ -1734,7 +585,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
1734 } 585 }
1735 586
1736 if (!bestTU) { 587 if (!bestTU) {
1737 nv_error(priv, "unable to find suitable dp config\n"); 588 nvkm_error(subdev, "unable to find suitable dp config\n");
1738 return; 589 return;
1739 } 590 }
1740 591
@@ -1745,22 +596,23 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
1745 do_div(unk, symbol); 596 do_div(unk, symbol);
1746 unk += 6; 597 unk += 6;
1747 598
1748 nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2); 599 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2);
1749 nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 | 600 nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
1750 bestVTUf << 16 | 601 bestVTUf << 16 |
1751 bestVTUi << 8 | unk); 602 bestVTUi << 8 | unk);
1752} 603}
1753 604
1754static void 605static void
1755nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) 606nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
1756{ 607{
608 struct nvkm_device *device = disp->base.engine.subdev.device;
1757 struct nvkm_output *outp; 609 struct nvkm_output *outp;
1758 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; 610 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1759 u32 hval, hreg = 0x614200 + (head * 0x800); 611 u32 hval, hreg = 0x614200 + (head * 0x800);
1760 u32 oval, oreg; 612 u32 oval, oreg;
1761 u32 mask, conf; 613 u32 mask, conf;
1762 614
1763 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf); 615 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
1764 if (!outp) 616 if (!outp)
1765 return; 617 return;
1766 618
@@ -1787,10 +639,10 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1787 u32 ctrl, datarate; 639 u32 ctrl, datarate;
1788 640
1789 if (outp->info.location == 0) { 641 if (outp->info.location == 0) {
1790 ctrl = nv_rd32(priv, 0x610794 + soff); 642 ctrl = nvkm_rd32(device, 0x610794 + soff);
1791 soff = 1; 643 soff = 1;
1792 } else { 644 } else {
1793 ctrl = nv_rd32(priv, 0x610b80 + soff); 645 ctrl = nvkm_rd32(device, 0x610b80 + soff);
1794 soff = 2; 646 soff = 2;
1795 } 647 }
1796 648
@@ -1804,10 +656,10 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1804 } 656 }
1805 657
1806 if (nvkm_output_dp_train(outp, datarate / soff, true)) 658 if (nvkm_output_dp_train(outp, datarate / soff, true))
1807 ERR("link not trained before attach\n"); 659 OUTP_ERR(outp, "link not trained before attach");
1808 } 660 }
1809 661
1810 exec_clkcmp(priv, head, 0, pclk, &conf); 662 exec_clkcmp(disp, head, 0, pclk, &conf);
1811 663
1812 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) { 664 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
1813 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800; 665 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
@@ -1817,7 +669,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1817 } else 669 } else
1818 if (!outp->info.location) { 670 if (!outp->info.location) {
1819 if (outp->info.type == DCB_OUTPUT_DP) 671 if (outp->info.type == DCB_OUTPUT_DP)
1820 nv50_disp_intr_unk20_2_dp(priv, head, &outp->info, pclk); 672 nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk);
1821 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800; 673 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
1822 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000; 674 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1823 hval = 0x00000000; 675 hval = 0x00000000;
@@ -1829,8 +681,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1829 mask = 0x00000707; 681 mask = 0x00000707;
1830 } 682 }
1831 683
1832 nv_mask(priv, hreg, 0x0000000f, hval); 684 nvkm_mask(device, hreg, 0x0000000f, hval);
1833 nv_mask(priv, oreg, mask, oval); 685 nvkm_mask(device, oreg, mask, oval);
1834} 686}
1835 687
1836/* If programming a TMDS output on a SOR that can also be configured for 688/* If programming a TMDS output on a SOR that can also be configured for
@@ -1842,10 +694,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1842 * programmed for DisplayPort. 694 * programmed for DisplayPort.
1843 */ 695 */
1844static void 696static void
1845nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, 697nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp,
1846 struct dcb_output *outp) 698 struct dcb_output *outp)
1847{ 699{
1848 struct nvkm_bios *bios = nvkm_bios(priv); 700 struct nvkm_device *device = disp->base.engine.subdev.device;
701 struct nvkm_bios *bios = device->bios;
1849 const int link = !(outp->sorconf.link & 1); 702 const int link = !(outp->sorconf.link & 1);
1850 const int or = ffs(outp->or) - 1; 703 const int or = ffs(outp->or) - 1;
1851 const u32 loff = (or * 0x800) + (link * 0x80); 704 const u32 loff = (or * 0x800) + (link * 0x80);
@@ -1854,166 +707,136 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv,
1854 u8 ver, hdr; 707 u8 ver, hdr;
1855 708
1856 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) 709 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
1857 nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); 710 nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000);
1858} 711}
1859 712
1860static void 713static void
1861nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head) 714nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
1862{ 715{
716 struct nvkm_device *device = disp->base.engine.subdev.device;
1863 struct nvkm_output *outp; 717 struct nvkm_output *outp;
1864 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; 718 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1865 u32 conf; 719 u32 conf;
1866 720
1867 outp = exec_clkcmp(priv, head, 1, pclk, &conf); 721 outp = exec_clkcmp(disp, head, 1, pclk, &conf);
1868 if (!outp) 722 if (!outp)
1869 return; 723 return;
1870 724
1871 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS) 725 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
1872 nv50_disp_intr_unk40_0_tmds(priv, &outp->info); 726 nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
1873} 727}
1874 728
1875void 729void
1876nv50_disp_intr_supervisor(struct work_struct *work) 730nv50_disp_intr_supervisor(struct work_struct *work)
1877{ 731{
1878 struct nv50_disp_priv *priv = 732 struct nv50_disp *disp =
1879 container_of(work, struct nv50_disp_priv, supervisor); 733 container_of(work, struct nv50_disp, supervisor);
1880 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass; 734 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
1881 u32 super = nv_rd32(priv, 0x610030); 735 struct nvkm_device *device = subdev->device;
736 u32 super = nvkm_rd32(device, 0x610030);
1882 int head; 737 int head;
1883 738
1884 nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super); 739 nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
1885 740
1886 if (priv->super & 0x00000010) { 741 if (disp->super & 0x00000010) {
1887 nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core); 742 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
1888 for (head = 0; head < priv->head.nr; head++) { 743 for (head = 0; head < disp->base.head.nr; head++) {
1889 if (!(super & (0x00000020 << head))) 744 if (!(super & (0x00000020 << head)))
1890 continue; 745 continue;
1891 if (!(super & (0x00000080 << head))) 746 if (!(super & (0x00000080 << head)))
1892 continue; 747 continue;
1893 nv50_disp_intr_unk10_0(priv, head); 748 nv50_disp_intr_unk10_0(disp, head);
1894 } 749 }
1895 } else 750 } else
1896 if (priv->super & 0x00000020) { 751 if (disp->super & 0x00000020) {
1897 for (head = 0; head < priv->head.nr; head++) { 752 for (head = 0; head < disp->base.head.nr; head++) {
1898 if (!(super & (0x00000080 << head))) 753 if (!(super & (0x00000080 << head)))
1899 continue; 754 continue;
1900 nv50_disp_intr_unk20_0(priv, head); 755 nv50_disp_intr_unk20_0(disp, head);
1901 } 756 }
1902 for (head = 0; head < priv->head.nr; head++) { 757 for (head = 0; head < disp->base.head.nr; head++) {
1903 if (!(super & (0x00000200 << head))) 758 if (!(super & (0x00000200 << head)))
1904 continue; 759 continue;
1905 nv50_disp_intr_unk20_1(priv, head); 760 nv50_disp_intr_unk20_1(disp, head);
1906 } 761 }
1907 for (head = 0; head < priv->head.nr; head++) { 762 for (head = 0; head < disp->base.head.nr; head++) {
1908 if (!(super & (0x00000080 << head))) 763 if (!(super & (0x00000080 << head)))
1909 continue; 764 continue;
1910 nv50_disp_intr_unk20_2(priv, head); 765 nv50_disp_intr_unk20_2(disp, head);
1911 } 766 }
1912 } else 767 } else
1913 if (priv->super & 0x00000040) { 768 if (disp->super & 0x00000040) {
1914 for (head = 0; head < priv->head.nr; head++) { 769 for (head = 0; head < disp->base.head.nr; head++) {
1915 if (!(super & (0x00000080 << head))) 770 if (!(super & (0x00000080 << head)))
1916 continue; 771 continue;
1917 nv50_disp_intr_unk40_0(priv, head); 772 nv50_disp_intr_unk40_0(disp, head);
1918 } 773 }
1919 } 774 }
1920 775
1921 nv_wr32(priv, 0x610030, 0x80000000); 776 nvkm_wr32(device, 0x610030, 0x80000000);
1922} 777}
1923 778
1924void 779void
1925nv50_disp_intr(struct nvkm_subdev *subdev) 780nv50_disp_intr(struct nv50_disp *disp)
1926{ 781{
1927 struct nv50_disp_priv *priv = (void *)subdev; 782 struct nvkm_device *device = disp->base.engine.subdev.device;
1928 u32 intr0 = nv_rd32(priv, 0x610020); 783 u32 intr0 = nvkm_rd32(device, 0x610020);
1929 u32 intr1 = nv_rd32(priv, 0x610024); 784 u32 intr1 = nvkm_rd32(device, 0x610024);
1930 785
1931 while (intr0 & 0x001f0000) { 786 while (intr0 & 0x001f0000) {
1932 u32 chid = __ffs(intr0 & 0x001f0000) - 16; 787 u32 chid = __ffs(intr0 & 0x001f0000) - 16;
1933 nv50_disp_intr_error(priv, chid); 788 nv50_disp_intr_error(disp, chid);
1934 intr0 &= ~(0x00010000 << chid); 789 intr0 &= ~(0x00010000 << chid);
1935 } 790 }
1936 791
1937 while (intr0 & 0x0000001f) { 792 while (intr0 & 0x0000001f) {
1938 u32 chid = __ffs(intr0 & 0x0000001f); 793 u32 chid = __ffs(intr0 & 0x0000001f);
1939 nv50_disp_chan_uevent_send(priv, chid); 794 nv50_disp_chan_uevent_send(disp, chid);
1940 intr0 &= ~(0x00000001 << chid); 795 intr0 &= ~(0x00000001 << chid);
1941 } 796 }
1942 797
1943 if (intr1 & 0x00000004) { 798 if (intr1 & 0x00000004) {
1944 nvkm_disp_vblank(&priv->base, 0); 799 nvkm_disp_vblank(&disp->base, 0);
1945 nv_wr32(priv, 0x610024, 0x00000004); 800 nvkm_wr32(device, 0x610024, 0x00000004);
1946 intr1 &= ~0x00000004;
1947 } 801 }
1948 802
1949 if (intr1 & 0x00000008) { 803 if (intr1 & 0x00000008) {
1950 nvkm_disp_vblank(&priv->base, 1); 804 nvkm_disp_vblank(&disp->base, 1);
1951 nv_wr32(priv, 0x610024, 0x00000008); 805 nvkm_wr32(device, 0x610024, 0x00000008);
1952 intr1 &= ~0x00000008;
1953 } 806 }
1954 807
1955 if (intr1 & 0x00000070) { 808 if (intr1 & 0x00000070) {
1956 priv->super = (intr1 & 0x00000070); 809 disp->super = (intr1 & 0x00000070);
1957 schedule_work(&priv->supervisor); 810 schedule_work(&disp->supervisor);
1958 nv_wr32(priv, 0x610024, priv->super); 811 nvkm_wr32(device, 0x610024, disp->super);
1959 intr1 &= ~0x00000070; 812 }
1960 } 813}
1961} 814
815static const struct nv50_disp_func
816nv50_disp = {
817 .intr = nv50_disp_intr,
818 .uevent = &nv50_disp_chan_uevent,
819 .super = nv50_disp_intr_supervisor,
820 .root = &nv50_disp_root_oclass,
821 .head.vblank_init = nv50_disp_vblank_init,
822 .head.vblank_fini = nv50_disp_vblank_fini,
823 .head.scanoutpos = nv50_disp_root_scanoutpos,
824 .outp.internal.crt = nv50_dac_output_new,
825 .outp.internal.tmds = nv50_sor_output_new,
826 .outp.internal.lvds = nv50_sor_output_new,
827 .outp.external.tmds = nv50_pior_output_new,
828 .outp.external.dp = nv50_pior_dp_new,
829 .dac.nr = 3,
830 .dac.power = nv50_dac_power,
831 .dac.sense = nv50_dac_sense,
832 .sor.nr = 2,
833 .sor.power = nv50_sor_power,
834 .pior.nr = 3,
835 .pior.power = nv50_pior_power,
836};
1962 837
1963static int 838int
1964nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 839nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
1965 struct nvkm_oclass *oclass, void *data, u32 size,
1966 struct nvkm_object **pobject)
1967{ 840{
1968 struct nv50_disp_priv *priv; 841 return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp);
1969 int ret;
1970
1971 ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
1972 "display", &priv);
1973 *pobject = nv_object(priv);
1974 if (ret)
1975 return ret;
1976
1977 ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
1978 if (ret)
1979 return ret;
1980
1981 nv_engine(priv)->sclass = nv50_disp_main_oclass;
1982 nv_engine(priv)->cclass = &nv50_disp_cclass;
1983 nv_subdev(priv)->intr = nv50_disp_intr;
1984 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
1985 priv->sclass = nv50_disp_sclass;
1986 priv->head.nr = 2;
1987 priv->dac.nr = 3;
1988 priv->sor.nr = 2;
1989 priv->pior.nr = 3;
1990 priv->dac.power = nv50_dac_power;
1991 priv->dac.sense = nv50_dac_sense;
1992 priv->sor.power = nv50_sor_power;
1993 priv->pior.power = nv50_pior_power;
1994 return 0;
1995} 842}
1996
1997struct nvkm_oclass *
1998nv50_disp_outp_sclass[] = {
1999 &nv50_pior_dp_impl.base.base,
2000 NULL
2001};
2002
2003struct nvkm_oclass *
2004nv50_disp_oclass = &(struct nv50_disp_impl) {
2005 .base.base.handle = NV_ENGINE(DISP, 0x50),
2006 .base.base.ofuncs = &(struct nvkm_ofuncs) {
2007 .ctor = nv50_disp_ctor,
2008 .dtor = _nvkm_disp_dtor,
2009 .init = _nvkm_disp_init,
2010 .fini = _nvkm_disp_fini,
2011 },
2012 .base.vblank = &nv50_disp_vblank_func,
2013 .base.outp = nv50_disp_outp_sclass,
2014 .mthd.core = &nv50_disp_core_mthd_chan,
2015 .mthd.base = &nv50_disp_base_mthd_chan,
2016 .mthd.ovly = &nv50_disp_ovly_mthd_chan,
2017 .mthd.prev = 0x000004,
2018 .head.scanoutpos = nv50_disp_main_scanoutpos,
2019}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index b4ed620070fa..aecebd8717e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -1,17 +1,18 @@
1#ifndef __NV50_DISP_H__ 1#ifndef __NV50_DISP_H__
2#define __NV50_DISP_H__ 2#define __NV50_DISP_H__
3#define nv50_disp(p) container_of((p), struct nv50_disp, base)
3#include "priv.h" 4#include "priv.h"
4struct nvkm_output; 5struct nvkm_output;
5struct nvkm_output_dp; 6struct nvkm_output_dp;
6 7
7#define NV50_DISP_MTHD_ struct nvkm_object *object, \ 8#define NV50_DISP_MTHD_ struct nvkm_object *object, \
8 struct nv50_disp_priv *priv, void *data, u32 size 9 struct nv50_disp *disp, void *data, u32 size
9#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head 10#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
10#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp 11#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
11 12
12struct nv50_disp_priv { 13struct nv50_disp {
14 const struct nv50_disp_func *func;
13 struct nvkm_disp base; 15 struct nvkm_disp base;
14 struct nvkm_oclass *sclass;
15 16
16 struct work_struct supervisor; 17 struct work_struct supervisor;
17 u32 super; 18 u32 super;
@@ -19,208 +20,98 @@ struct nv50_disp_priv {
19 struct nvkm_event uevent; 20 struct nvkm_event uevent;
20 21
21 struct { 22 struct {
22 int nr;
23 } head;
24 struct {
25 int nr;
26 int (*power)(NV50_DISP_MTHD_V1);
27 int (*sense)(NV50_DISP_MTHD_V1);
28 } dac;
29 struct {
30 int nr;
31 int (*power)(NV50_DISP_MTHD_V1);
32 int (*hda_eld)(NV50_DISP_MTHD_V1);
33 int (*hdmi)(NV50_DISP_MTHD_V1);
34 u32 lvdsconf; 23 u32 lvdsconf;
35 void (*magic)(struct nvkm_output *);
36 } sor; 24 } sor;
25
37 struct { 26 struct {
38 int nr;
39 int (*power)(NV50_DISP_MTHD_V1);
40 u8 type[3]; 27 u8 type[3];
41 } pior; 28 } pior;
42};
43 29
44struct nv50_disp_impl { 30 struct nv50_disp_chan *chan[17];
45 struct nvkm_disp_impl base;
46 struct {
47 const struct nv50_disp_mthd_chan *core;
48 const struct nv50_disp_mthd_chan *base;
49 const struct nv50_disp_mthd_chan *ovly;
50 int prev;
51 } mthd;
52 struct {
53 int (*scanoutpos)(NV50_DISP_MTHD_V0);
54 } head;
55}; 31};
56 32
57int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0); 33int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
58int nv50_disp_main_mthd(struct nvkm_object *, u32, void *, u32);
59 34
60int gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0); 35int gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
61 36
62int nv50_dac_power(NV50_DISP_MTHD_V1); 37int nv50_dac_power(NV50_DISP_MTHD_V1);
63int nv50_dac_sense(NV50_DISP_MTHD_V1); 38int nv50_dac_sense(NV50_DISP_MTHD_V1);
64 39
65int gt215_hda_eld(NV50_DISP_MTHD_V1); 40int gt215_hda_eld(NV50_DISP_MTHD_V1);
66int gf110_hda_eld(NV50_DISP_MTHD_V1); 41int gf119_hda_eld(NV50_DISP_MTHD_V1);
67 42
68int g84_hdmi_ctrl(NV50_DISP_MTHD_V1); 43int g84_hdmi_ctrl(NV50_DISP_MTHD_V1);
69int gt215_hdmi_ctrl(NV50_DISP_MTHD_V1); 44int gt215_hdmi_ctrl(NV50_DISP_MTHD_V1);
70int gf110_hdmi_ctrl(NV50_DISP_MTHD_V1); 45int gf119_hdmi_ctrl(NV50_DISP_MTHD_V1);
71int gk104_hdmi_ctrl(NV50_DISP_MTHD_V1); 46int gk104_hdmi_ctrl(NV50_DISP_MTHD_V1);
72 47
73int nv50_sor_power(NV50_DISP_MTHD_V1); 48int nv50_sor_power(NV50_DISP_MTHD_V1);
74int nv50_pior_power(NV50_DISP_MTHD_V1); 49int nv50_pior_power(NV50_DISP_MTHD_V1);
75 50
76#include <core/parent.h> 51int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
77 52 int index, int heads, struct nvkm_disp **);
78struct nv50_disp_base { 53int gf119_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
79 struct nvkm_parent base; 54 int index, struct nvkm_disp **);
80 struct nvkm_ramht *ramht; 55
81 u32 chan; 56struct nv50_disp_func_outp {
82}; 57 int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
83 58 struct nvkm_output **);
84struct nv50_disp_chan_impl { 59 int (* tv)(struct nvkm_disp *, int index, struct dcb_output *,
85 struct nvkm_ofuncs base; 60 struct nvkm_output **);
86 int chid; 61 int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
87 int (*attach)(struct nvkm_object *, struct nvkm_object *, u32); 62 struct nvkm_output **);
88 void (*detach)(struct nvkm_object *, int); 63 int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
64 struct nvkm_output **);
65 int (* dp)(struct nvkm_disp *, int index, struct dcb_output *,
66 struct nvkm_output **);
89}; 67};
90 68
91#include <core/namedb.h> 69struct nv50_disp_func {
70 void (*intr)(struct nv50_disp *);
92 71
93struct nv50_disp_chan { 72 const struct nvkm_event_func *uevent;
94 struct nvkm_namedb base; 73 void (*super)(struct work_struct *);
95 int chid;
96};
97 74
98int nv50_disp_chan_ntfy(struct nvkm_object *, u32, struct nvkm_event **); 75 const struct nvkm_disp_oclass *root;
99int nv50_disp_chan_map(struct nvkm_object *, u64 *, u32 *);
100u32 nv50_disp_chan_rd32(struct nvkm_object *, u64);
101void nv50_disp_chan_wr32(struct nvkm_object *, u64, u32);
102extern const struct nvkm_event_func nv50_disp_chan_uevent;
103int nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
104 struct nvkm_notify *);
105void nv50_disp_chan_uevent_send(struct nv50_disp_priv *, int);
106
107extern const struct nvkm_event_func gf110_disp_chan_uevent;
108
109#define nv50_disp_chan_init(a) \
110 nvkm_namedb_init(&(a)->base)
111#define nv50_disp_chan_fini(a,b) \
112 nvkm_namedb_fini(&(a)->base, (b))
113
114struct nv50_disp_dmac {
115 struct nv50_disp_chan base;
116 struct nvkm_dmaobj *pushdma;
117 u32 push;
118};
119 76
120void nv50_disp_dmac_dtor(struct nvkm_object *); 77 struct {
78 void (*vblank_init)(struct nv50_disp *, int head);
79 void (*vblank_fini)(struct nv50_disp *, int head);
80 int (*scanoutpos)(NV50_DISP_MTHD_V0);
81 } head;
121 82
122struct nv50_disp_pioc { 83 struct {
123 struct nv50_disp_chan base; 84 const struct nv50_disp_func_outp internal;
124}; 85 const struct nv50_disp_func_outp external;
86 } outp;
125 87
126void nv50_disp_pioc_dtor(struct nvkm_object *); 88 struct {
89 int nr;
90 int (*power)(NV50_DISP_MTHD_V1);
91 int (*sense)(NV50_DISP_MTHD_V1);
92 } dac;
127 93
128struct nv50_disp_mthd_list {
129 u32 mthd;
130 u32 addr;
131 struct { 94 struct {
132 u32 mthd; 95 int nr;
133 u32 addr; 96 int (*power)(NV50_DISP_MTHD_V1);
134 const char *name; 97 int (*hda_eld)(NV50_DISP_MTHD_V1);
135 } data[]; 98 int (*hdmi)(NV50_DISP_MTHD_V1);
136}; 99 void (*magic)(struct nvkm_output *);
100 } sor;
137 101
138struct nv50_disp_mthd_chan {
139 const char *name;
140 u32 addr;
141 struct { 102 struct {
142 const char *name;
143 int nr; 103 int nr;
144 const struct nv50_disp_mthd_list *mthd; 104 int (*power)(NV50_DISP_MTHD_V1);
145 } data[]; 105 } pior;
146}; 106};
147 107
148extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs; 108void nv50_disp_vblank_init(struct nv50_disp *, int);
149int nv50_disp_core_ctor(struct nvkm_object *, struct nvkm_object *, 109void nv50_disp_vblank_fini(struct nv50_disp *, int);
150 struct nvkm_oclass *, void *, u32, 110void nv50_disp_intr(struct nv50_disp *);
151 struct nvkm_object **);
152extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
153extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
154extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
155extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
156int nv50_disp_base_ctor(struct nvkm_object *, struct nvkm_object *,
157 struct nvkm_oclass *, void *, u32,
158 struct nvkm_object **);
159extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
160extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
161int nv50_disp_ovly_ctor(struct nvkm_object *, struct nvkm_object *,
162 struct nvkm_oclass *, void *, u32,
163 struct nvkm_object **);
164extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
165extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
166int nv50_disp_oimm_ctor(struct nvkm_object *, struct nvkm_object *,
167 struct nvkm_oclass *, void *, u32,
168 struct nvkm_object **);
169extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
170int nv50_disp_curs_ctor(struct nvkm_object *, struct nvkm_object *,
171 struct nvkm_oclass *, void *, u32,
172 struct nvkm_object **);
173extern struct nvkm_ofuncs nv50_disp_main_ofuncs;
174int nv50_disp_main_ctor(struct nvkm_object *, struct nvkm_object *,
175 struct nvkm_oclass *, void *, u32,
176 struct nvkm_object **);
177void nv50_disp_main_dtor(struct nvkm_object *);
178extern struct nvkm_omthds nv50_disp_main_omthds[];
179extern struct nvkm_oclass nv50_disp_cclass;
180void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
181 const struct nv50_disp_mthd_chan *);
182void nv50_disp_intr_supervisor(struct work_struct *); 111void nv50_disp_intr_supervisor(struct work_struct *);
183void nv50_disp_intr(struct nvkm_subdev *); 112
184extern const struct nvkm_event_func nv50_disp_vblank_func; 113void gf119_disp_vblank_init(struct nv50_disp *, int);
185 114void gf119_disp_vblank_fini(struct nv50_disp *, int);
186extern const struct nv50_disp_mthd_chan g84_disp_core_mthd_chan; 115void gf119_disp_intr(struct nv50_disp *);
187extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac; 116void gf119_disp_intr_supervisor(struct work_struct *);
188extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
189extern const struct nv50_disp_mthd_chan g84_disp_base_mthd_chan;
190extern const struct nv50_disp_mthd_chan g84_disp_ovly_mthd_chan;
191
192extern const struct nv50_disp_mthd_chan g94_disp_core_mthd_chan;
193
194extern struct nv50_disp_chan_impl gf110_disp_core_ofuncs;
195extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_base;
196extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_dac;
197extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_sor;
198extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_pior;
199extern struct nv50_disp_chan_impl gf110_disp_base_ofuncs;
200extern struct nv50_disp_chan_impl gf110_disp_ovly_ofuncs;
201extern const struct nv50_disp_mthd_chan gf110_disp_base_mthd_chan;
202extern struct nv50_disp_chan_impl gf110_disp_oimm_ofuncs;
203extern struct nv50_disp_chan_impl gf110_disp_curs_ofuncs;
204extern struct nvkm_ofuncs gf110_disp_main_ofuncs;
205extern struct nvkm_oclass gf110_disp_cclass;
206void gf110_disp_intr_supervisor(struct work_struct *);
207void gf110_disp_intr(struct nvkm_subdev *);
208extern const struct nvkm_event_func gf110_disp_vblank_func;
209
210extern const struct nv50_disp_mthd_chan gk104_disp_core_mthd_chan;
211extern const struct nv50_disp_mthd_chan gk104_disp_ovly_mthd_chan;
212
213extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
214extern struct nvkm_oclass *nv50_disp_outp_sclass[];
215
216extern struct nvkm_output_dp_impl g94_sor_dp_impl;
217int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
218extern struct nvkm_oclass *g94_disp_outp_sclass[];
219
220extern struct nvkm_output_dp_impl gf110_sor_dp_impl;
221int gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
222extern struct nvkm_oclass *gf110_disp_outp_sclass[];
223
224void gm204_sor_magic(struct nvkm_output *outp);
225extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
226#endif 117#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
index c0aac7e20d45..54a4ae8d66c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2014 Ilia Mirkin 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,18 +19,19 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ilia Mirkin 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "channv50.h"
25#include "rootnv50.h"
25 26
26struct nvkm_oclass * 27#include <nvif/class.h>
27nv4c_mc_oclass = &(struct nvkm_mc_oclass) { 28
28 .base.handle = NV_SUBDEV(MC, 0x4c), 29const struct nv50_disp_pioc_oclass
29 .base.ofuncs = &(struct nvkm_ofuncs) { 30g84_disp_oimm_oclass = {
30 .ctor = nv04_mc_ctor, 31 .base.oclass = G82_DISP_OVERLAY,
31 .dtor = _nvkm_mc_dtor, 32 .base.minver = 0,
32 .init = nv44_mc_init, 33 .base.maxver = 0,
33 .fini = _nvkm_mc_fini, 34 .ctor = nv50_disp_oimm_new,
34 }, 35 .func = &nv50_disp_pioc_func,
35 .intr = nv04_mc_intr, 36 .chid = 5,
36}.base; 37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
new file mode 100644
index 000000000000..c658db54afc5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_pioc_oclass
30gf119_disp_oimm_oclass = {
31 .base.oclass = GF110_DISP_OVERLAY,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_oimm_new,
35 .func = &gf119_disp_pioc_func,
36 .chid = 9,
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
new file mode 100644
index 000000000000..b1fde8c125d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_pioc_oclass
30gk104_disp_oimm_oclass = {
31 .base.oclass = GK104_DISP_OVERLAY,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_oimm_new,
35 .func = &gf119_disp_pioc_func,
36 .chid = 9,
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
new file mode 100644
index 000000000000..f4e7eb3d1177
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_pioc_oclass
30gt215_disp_oimm_oclass = {
31 .base.oclass = GT214_DISP_OVERLAY,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_oimm_new,
35 .func = &nv50_disp_pioc_func,
36 .chid = 5,
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
new file mode 100644
index 000000000000..cd888a1e443c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32int
33nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
34 const struct nv50_disp_chan_mthd *mthd,
35 struct nv50_disp_root *root, int chid,
36 const struct nvkm_oclass *oclass, void *data, u32 size,
37 struct nvkm_object **pobject)
38{
39 union {
40 struct nv50_disp_overlay_v0 v0;
41 } *args = data;
42 struct nvkm_object *parent = oclass->parent;
43 struct nv50_disp *disp = root->disp;
44 int head, ret;
45
46 nvif_ioctl(parent, "create disp overlay size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, false)) {
48 nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
49 args->v0.version, args->v0.head);
50 if (args->v0.head > disp->base.head.nr)
51 return -EINVAL;
52 head = args->v0.head;
53 } else
54 return ret;
55
56 return nv50_disp_chan_new_(func, mthd, root, chid + head,
57 head, oclass, pobject);
58}
59
60const struct nv50_disp_pioc_oclass
61nv50_disp_oimm_oclass = {
62 .base.oclass = NV50_DISP_OVERLAY,
63 .base.minver = 0,
64 .base.maxver = 0,
65 .ctor = nv50_disp_oimm_new,
66 .func = &nv50_disp_pioc_func,
67 .chid = 5,
68};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 9224bcbf0159..bbe5ec0dedb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,121 +22,66 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "outp.h" 24#include "outp.h"
25#include "priv.h"
26 25
27#include <subdev/bios.h> 26#include <subdev/bios.h>
28#include <subdev/bios/conn.h>
29#include <subdev/bios/dcb.h> 27#include <subdev/bios/dcb.h>
30#include <subdev/i2c.h> 28#include <subdev/i2c.h>
31 29
32int 30void
33_nvkm_output_fini(struct nvkm_object *object, bool suspend) 31nvkm_output_fini(struct nvkm_output *outp)
34{ 32{
35 struct nvkm_output *outp = (void *)object; 33 if (outp->func->fini)
36 nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend); 34 outp->func->fini(outp);
37 return nvkm_object_fini(&outp->base, suspend);
38} 35}
39 36
40int 37void
41_nvkm_output_init(struct nvkm_object *object) 38nvkm_output_init(struct nvkm_output *outp)
42{ 39{
43 struct nvkm_output *outp = (void *)object; 40 if (outp->func->init)
44 int ret = nvkm_object_init(&outp->base); 41 outp->func->init(outp);
45 if (ret == 0)
46 nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
47 return 0;
48} 42}
49 43
50void 44void
51_nvkm_output_dtor(struct nvkm_object *object) 45nvkm_output_del(struct nvkm_output **poutp)
52{ 46{
53 struct nvkm_output *outp = (void *)object; 47 struct nvkm_output *outp = *poutp;
54 list_del(&outp->head); 48 if (outp && !WARN_ON(!outp->func)) {
55 nvkm_object_ref(NULL, (void *)&outp->conn); 49 if (outp->func->dtor)
56 nvkm_object_destroy(&outp->base); 50 *poutp = outp->func->dtor(outp);
51 kfree(*poutp);
52 *poutp = NULL;
53 }
57} 54}
58 55
59int 56void
60nvkm_output_create_(struct nvkm_object *parent, 57nvkm_output_ctor(const struct nvkm_output_func *func, struct nvkm_disp *disp,
61 struct nvkm_object *engine, 58 int index, struct dcb_output *dcbE, struct nvkm_output *outp)
62 struct nvkm_oclass *oclass,
63 struct dcb_output *dcbE, int index,
64 int length, void **pobject)
65{ 59{
66 struct nvkm_disp *disp = nvkm_disp(parent); 60 struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
67 struct nvkm_bios *bios = nvkm_bios(parent);
68 struct nvkm_i2c *i2c = nvkm_i2c(parent);
69 struct nvbios_connE connE;
70 struct nvkm_output *outp;
71 u8 ver, hdr;
72 u32 data;
73 int ret;
74 61
75 ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject); 62 outp->func = func;
76 outp = *pobject; 63 outp->disp = disp;
77 if (ret)
78 return ret;
79
80 outp->info = *dcbE;
81 outp->index = index; 64 outp->index = index;
65 outp->info = *dcbE;
66 outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
82 outp->or = ffs(outp->info.or) - 1; 67 outp->or = ffs(outp->info.or) - 1;
83 68
84 DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n", 69 OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
85 dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ? 70 "edid %x bus %d head %x",
86 dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index, 71 outp->info.type, outp->info.location, outp->info.or,
87 dcbE->bus, dcbE->heads); 72 outp->info.type >= 2 ? outp->info.sorconf.link : 0,
88 73 outp->info.connector, outp->info.i2c_index,
89 if (outp->info.type != DCB_OUTPUT_DP) 74 outp->info.bus, outp->info.heads);
90 outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
91 else
92 outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
93 outp->edid = outp->port;
94
95 data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
96 if (!data) {
97 DBG("vbios connector data not found\n");
98 memset(&connE, 0x00, sizeof(connE));
99 connE.type = DCB_CONNECTOR_NONE;
100 }
101
102 ret = nvkm_object_ctor(parent, NULL, nvkm_connector_oclass,
103 &connE, outp->info.connector,
104 (struct nvkm_object **)&outp->conn);
105 if (ret < 0) {
106 ERR("error %d creating connector, disabling\n", ret);
107 return ret;
108 }
109
110 list_add_tail(&outp->head, &disp->outp);
111 return 0;
112} 75}
113 76
114int 77int
115_nvkm_output_ctor(struct nvkm_object *parent, 78nvkm_output_new_(const struct nvkm_output_func *func,
116 struct nvkm_object *engine, 79 struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
117 struct nvkm_oclass *oclass, void *dcbE, u32 index, 80 struct nvkm_output **poutp)
118 struct nvkm_object **pobject)
119{ 81{
120 struct nvkm_output *outp; 82 if (!(*poutp = kzalloc(sizeof(**poutp), GFP_KERNEL)))
121 int ret; 83 return -ENOMEM;
122
123 ret = nvkm_output_create(parent, engine, oclass, dcbE, index, &outp);
124 *pobject = nv_object(outp);
125 if (ret)
126 return ret;
127 84
85 nvkm_output_ctor(func, disp, index, dcbE, *poutp);
128 return 0; 86 return 0;
129} 87}
130
131struct nvkm_oclass *
132nvkm_output_oclass = &(struct nvkm_output_impl) {
133 .base = {
134 .handle = 0,
135 .ofuncs = &(struct nvkm_ofuncs) {
136 .ctor = _nvkm_output_ctor,
137 .dtor = _nvkm_output_dtor,
138 .init = _nvkm_output_init,
139 .fini = _nvkm_output_fini,
140 },
141 },
142}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index d9253d26c31b..2590fec67ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,61 +1,55 @@
1#ifndef __NVKM_DISP_OUTP_H__ 1#ifndef __NVKM_DISP_OUTP_H__
2#define __NVKM_DISP_OUTP_H__ 2#define __NVKM_DISP_OUTP_H__
3#include <core/object.h> 3#include <engine/disp.h>
4 4
5#include <subdev/bios.h> 5#include <subdev/bios.h>
6#include <subdev/bios/dcb.h> 6#include <subdev/bios/dcb.h>
7 7
8struct nvkm_output { 8struct nvkm_output {
9 struct nvkm_object base; 9 const struct nvkm_output_func *func;
10 struct list_head head; 10 struct nvkm_disp *disp;
11
12 struct dcb_output info;
13 int index; 11 int index;
14 int or; 12 struct dcb_output info;
15 13
16 struct nvkm_i2c_port *port; 14 // whatever (if anything) is pointed at by the dcb device entry
17 struct nvkm_i2c_port *edid; 15 struct nvkm_i2c_bus *i2c;
16 int or;
18 17
18 struct list_head head;
19 struct nvkm_connector *conn; 19 struct nvkm_connector *conn;
20}; 20};
21 21
22#define nvkm_output_create(p,e,c,b,i,d) \ 22struct nvkm_output_func {
23 nvkm_output_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d) 23 void *(*dtor)(struct nvkm_output *);
24#define nvkm_output_destroy(d) ({ \ 24 void (*init)(struct nvkm_output *);
25 struct nvkm_output *_outp = (d); \ 25 void (*fini)(struct nvkm_output *);
26 _nvkm_output_dtor(nv_object(_outp)); \
27})
28#define nvkm_output_init(d) ({ \
29 struct nvkm_output *_outp = (d); \
30 _nvkm_output_init(nv_object(_outp)); \
31})
32#define nvkm_output_fini(d,s) ({ \
33 struct nvkm_output *_outp = (d); \
34 _nvkm_output_fini(nv_object(_outp), (s)); \
35})
36
37int nvkm_output_create_(struct nvkm_object *, struct nvkm_object *,
38 struct nvkm_oclass *, struct dcb_output *,
39 int, int, void **);
40
41int _nvkm_output_ctor(struct nvkm_object *, struct nvkm_object *,
42 struct nvkm_oclass *, void *, u32,
43 struct nvkm_object **);
44void _nvkm_output_dtor(struct nvkm_object *);
45int _nvkm_output_init(struct nvkm_object *);
46int _nvkm_output_fini(struct nvkm_object *, bool);
47
48struct nvkm_output_impl {
49 struct nvkm_oclass base;
50}; 26};
51 27
52#ifndef MSG 28void nvkm_output_ctor(const struct nvkm_output_func *, struct nvkm_disp *,
53#define MSG(l,f,a...) do { \ 29 int index, struct dcb_output *, struct nvkm_output *);
54 struct nvkm_output *_outp = (void *)outp; \ 30int nvkm_output_new_(const struct nvkm_output_func *, struct nvkm_disp *,
55 nv_##l(_outp, "%02x:%04x:%04x: "f, _outp->index, \ 31 int index, struct dcb_output *, struct nvkm_output **);
56 _outp->info.hasht, _outp->info.hashm, ##a); \ 32void nvkm_output_del(struct nvkm_output **);
33void nvkm_output_init(struct nvkm_output *);
34void nvkm_output_fini(struct nvkm_output *);
35
36int nv50_dac_output_new(struct nvkm_disp *, int, struct dcb_output *,
37 struct nvkm_output **);
38int nv50_sor_output_new(struct nvkm_disp *, int, struct dcb_output *,
39 struct nvkm_output **);
40int nv50_pior_output_new(struct nvkm_disp *, int, struct dcb_output *,
41 struct nvkm_output **);
42
43u32 g94_sor_dp_lane_map(struct nvkm_device *, u8 lane);
44
45void gm204_sor_magic(struct nvkm_output *outp);
46
47#define OUTP_MSG(o,l,f,a...) do { \
48 struct nvkm_output *_outp = (o); \
49 nvkm_##l(&_outp->disp->engine.subdev, "outp %02x:%04x:%04x: "f"\n", \
50 _outp->index, _outp->info.hasht, _outp->info.hashm, ##a); \
57} while(0) 51} while(0)
58#define DBG(f,a...) MSG(debug, f, ##a) 52#define OUTP_ERR(o,f,a...) OUTP_MSG((o), error, f, ##a)
59#define ERR(f,a...) MSG(error, f, ##a) 53#define OUTP_DBG(o,f,a...) OUTP_MSG((o), debug, f, ##a)
60#endif 54#define OUTP_TRACE(o,f,a...) OUTP_MSG((o), trace, f, ##a)
61#endif 55#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 0bde0fa5b59d..3b7a9e7a1ea8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -33,16 +33,17 @@
33int 33int
34nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait) 34nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
35{ 35{
36 struct nvkm_output_dp *outp = (void *)base; 36 struct nvkm_output_dp *outp = nvkm_output_dp(base);
37 bool retrain = true; 37 bool retrain = true;
38 u8 link[2], stat[3]; 38 u8 link[2], stat[3];
39 u32 linkrate; 39 u32 linkrate;
40 int ret, i; 40 int ret, i;
41 41
42 /* check that the link is trained at a high enough rate */ 42 /* check that the link is trained at a high enough rate */
43 ret = nv_rdaux(outp->base.edid, DPCD_LC00_LINK_BW_SET, link, 2); 43 ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
44 if (ret) { 44 if (ret) {
45 DBG("failed to read link config, assuming no sink\n"); 45 OUTP_DBG(&outp->base,
46 "failed to read link config, assuming no sink");
46 goto done; 47 goto done;
47 } 48 }
48 49
@@ -50,14 +51,15 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
50 linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */ 51 linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
51 datarate = (datarate + 9) / 10; /* -> decakilobits */ 52 datarate = (datarate + 9) / 10; /* -> decakilobits */
52 if (linkrate < datarate) { 53 if (linkrate < datarate) {
53 DBG("link not trained at sufficient rate\n"); 54 OUTP_DBG(&outp->base, "link not trained at sufficient rate");
54 goto done; 55 goto done;
55 } 56 }
56 57
57 /* check that link is still trained */ 58 /* check that link is still trained */
58 ret = nv_rdaux(outp->base.edid, DPCD_LS02, stat, 3); 59 ret = nvkm_rdaux(outp->aux, DPCD_LS02, stat, 3);
59 if (ret) { 60 if (ret) {
60 DBG("failed to read link status, assuming no sink\n"); 61 OUTP_DBG(&outp->base,
62 "failed to read link status, assuming no sink");
61 goto done; 63 goto done;
62 } 64 }
63 65
@@ -67,13 +69,14 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
67 if (!(lane & DPCD_LS02_LANE0_CR_DONE) || 69 if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
68 !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) || 70 !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
69 !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) { 71 !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
70 DBG("lane %d not equalised\n", lane); 72 OUTP_DBG(&outp->base,
73 "lane %d not equalised", lane);
71 goto done; 74 goto done;
72 } 75 }
73 } 76 }
74 retrain = false; 77 retrain = false;
75 } else { 78 } else {
76 DBG("no inter-lane alignment\n"); 79 OUTP_DBG(&outp->base, "no inter-lane alignment");
77 } 80 }
78 81
79done: 82done:
@@ -102,150 +105,138 @@ done:
102} 105}
103 106
104static void 107static void
105nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present) 108nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
106{ 109{
107 struct nvkm_i2c_port *port = outp->base.edid; 110 struct nvkm_i2c_aux *aux = outp->aux;
108 if (present) { 111
112 if (enable) {
109 if (!outp->present) { 113 if (!outp->present) {
110 nvkm_i2c(port)->acquire_pad(port, 0); 114 OUTP_DBG(&outp->base, "aux power -> always");
111 DBG("aux power -> always\n"); 115 nvkm_i2c_aux_monitor(aux, true);
112 outp->present = true; 116 outp->present = true;
113 } 117 }
114 nvkm_output_dp_train(&outp->base, 0, true); 118
115 } else { 119 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
116 if (outp->present) { 120 sizeof(outp->dpcd))) {
117 nvkm_i2c(port)->release_pad(port); 121 nvkm_output_dp_train(&outp->base, 0, true);
118 DBG("aux power -> demand\n"); 122 return;
119 outp->present = false;
120 } 123 }
121 atomic_set(&outp->lt.done, 0);
122 } 124 }
123}
124 125
125static void 126 if (outp->present) {
126nvkm_output_dp_detect(struct nvkm_output_dp *outp) 127 OUTP_DBG(&outp->base, "aux power -> demand");
127{ 128 nvkm_i2c_aux_monitor(aux, false);
128 struct nvkm_i2c_port *port = outp->base.edid; 129 outp->present = false;
129 int ret = nvkm_i2c(port)->acquire_pad(port, 0);
130 if (ret == 0) {
131 ret = nv_rdaux(outp->base.edid, DPCD_RC00_DPCD_REV,
132 outp->dpcd, sizeof(outp->dpcd));
133 nvkm_output_dp_enable(outp, ret == 0);
134 nvkm_i2c(port)->release_pad(port);
135 } 130 }
131
132 atomic_set(&outp->lt.done, 0);
136} 133}
137 134
138static int 135static int
139nvkm_output_dp_hpd(struct nvkm_notify *notify) 136nvkm_output_dp_hpd(struct nvkm_notify *notify)
140{ 137{
141 struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
142 struct nvkm_output_dp *outp;
143 struct nvkm_disp *disp = nvkm_disp(conn);
144 const struct nvkm_i2c_ntfy_rep *line = notify->data; 138 const struct nvkm_i2c_ntfy_rep *line = notify->data;
139 struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), hpd);
140 struct nvkm_connector *conn = outp->base.conn;
141 struct nvkm_disp *disp = outp->base.disp;
145 struct nvif_notify_conn_rep_v0 rep = {}; 142 struct nvif_notify_conn_rep_v0 rep = {};
146 143
147 list_for_each_entry(outp, &disp->outp, base.head) { 144 OUTP_DBG(&outp->base, "HPD: %d", line->mask);
148 if (outp->base.conn == conn && 145 nvkm_output_dp_enable(outp, true);
149 outp->info.type == DCB_OUTPUT_DP) {
150 DBG("HPD: %d\n", line->mask);
151 nvkm_output_dp_detect(outp);
152 146
153 if (line->mask & NVKM_I2C_UNPLUG) 147 if (line->mask & NVKM_I2C_UNPLUG)
154 rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG; 148 rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
155 if (line->mask & NVKM_I2C_PLUG) 149 if (line->mask & NVKM_I2C_PLUG)
156 rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG; 150 rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
157 151
158 nvkm_event_send(&disp->hpd, rep.mask, conn->index, 152 nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
159 &rep, sizeof(rep)); 153 return NVKM_NOTIFY_KEEP;
160 return NVKM_NOTIFY_KEEP;
161 }
162 }
163
164 WARN_ON(1);
165 return NVKM_NOTIFY_DROP;
166} 154}
167 155
168static int 156static int
169nvkm_output_dp_irq(struct nvkm_notify *notify) 157nvkm_output_dp_irq(struct nvkm_notify *notify)
170{ 158{
171 struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
172 struct nvkm_disp *disp = nvkm_disp(outp);
173 const struct nvkm_i2c_ntfy_rep *line = notify->data; 159 const struct nvkm_i2c_ntfy_rep *line = notify->data;
160 struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
161 struct nvkm_connector *conn = outp->base.conn;
162 struct nvkm_disp *disp = outp->base.disp;
174 struct nvif_notify_conn_rep_v0 rep = { 163 struct nvif_notify_conn_rep_v0 rep = {
175 .mask = NVIF_NOTIFY_CONN_V0_IRQ, 164 .mask = NVIF_NOTIFY_CONN_V0_IRQ,
176 }; 165 };
177 int index = outp->base.info.connector;
178 166
179 DBG("IRQ: %d\n", line->mask); 167 OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
180 nvkm_output_dp_train(&outp->base, 0, true); 168 nvkm_output_dp_train(&outp->base, 0, true);
181 169
182 nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep)); 170 nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
183 return NVKM_NOTIFY_DROP; 171 return NVKM_NOTIFY_DROP;
184} 172}
185 173
186int 174static void
187_nvkm_output_dp_fini(struct nvkm_object *object, bool suspend) 175nvkm_output_dp_fini(struct nvkm_output *base)
188{ 176{
189 struct nvkm_output_dp *outp = (void *)object; 177 struct nvkm_output_dp *outp = nvkm_output_dp(base);
178 nvkm_notify_put(&outp->hpd);
190 nvkm_notify_put(&outp->irq); 179 nvkm_notify_put(&outp->irq);
180 flush_work(&outp->lt.work);
191 nvkm_output_dp_enable(outp, false); 181 nvkm_output_dp_enable(outp, false);
192 return nvkm_output_fini(&outp->base, suspend);
193} 182}
194 183
195int 184static void
196_nvkm_output_dp_init(struct nvkm_object *object) 185nvkm_output_dp_init(struct nvkm_output *base)
197{ 186{
198 struct nvkm_output_dp *outp = (void *)object; 187 struct nvkm_output_dp *outp = nvkm_output_dp(base);
199 nvkm_output_dp_detect(outp); 188 nvkm_notify_put(&outp->base.conn->hpd);
200 return nvkm_output_init(&outp->base); 189 nvkm_output_dp_enable(outp, true);
190 nvkm_notify_get(&outp->hpd);
201} 191}
202 192
203void 193static void *
204_nvkm_output_dp_dtor(struct nvkm_object *object) 194nvkm_output_dp_dtor(struct nvkm_output *base)
205{ 195{
206 struct nvkm_output_dp *outp = (void *)object; 196 struct nvkm_output_dp *outp = nvkm_output_dp(base);
197 nvkm_notify_fini(&outp->hpd);
207 nvkm_notify_fini(&outp->irq); 198 nvkm_notify_fini(&outp->irq);
208 nvkm_output_destroy(&outp->base); 199 return outp;
209} 200}
210 201
202static const struct nvkm_output_func
203nvkm_output_dp_func = {
204 .dtor = nvkm_output_dp_dtor,
205 .init = nvkm_output_dp_init,
206 .fini = nvkm_output_dp_fini,
207};
208
211int 209int
212nvkm_output_dp_create_(struct nvkm_object *parent, 210nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
213 struct nvkm_object *engine, 211 struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
214 struct nvkm_oclass *oclass, 212 struct nvkm_i2c_aux *aux, struct nvkm_output_dp *outp)
215 struct dcb_output *info, int index,
216 int length, void **pobject)
217{ 213{
218 struct nvkm_bios *bios = nvkm_bios(parent); 214 struct nvkm_device *device = disp->engine.subdev.device;
219 struct nvkm_i2c *i2c = nvkm_i2c(parent); 215 struct nvkm_bios *bios = device->bios;
220 struct nvkm_output_dp *outp; 216 struct nvkm_i2c *i2c = device->i2c;
221 u8 hdr, cnt, len; 217 u8 hdr, cnt, len;
222 u32 data; 218 u32 data;
223 int ret; 219 int ret;
224 220
225 ret = nvkm_output_create_(parent, engine, oclass, info, index, 221 nvkm_output_ctor(&nvkm_output_dp_func, disp, index, dcbE, &outp->base);
226 length, pobject); 222 outp->func = func;
227 outp = *pobject; 223 outp->aux = aux;
228 if (ret) 224 if (!outp->aux) {
229 return ret; 225 OUTP_ERR(&outp->base, "no aux");
230
231 nvkm_notify_fini(&outp->base.conn->hpd);
232
233 /* access to the aux channel is not optional... */
234 if (!outp->base.edid) {
235 ERR("aux channel not found\n");
236 return -ENODEV; 226 return -ENODEV;
237 } 227 }
238 228
239 /* nor is the bios data for this output... */ 229 /* bios data is not optional */
240 data = nvbios_dpout_match(bios, outp->base.info.hasht, 230 data = nvbios_dpout_match(bios, outp->base.info.hasht,
241 outp->base.info.hashm, &outp->version, 231 outp->base.info.hashm, &outp->version,
242 &hdr, &cnt, &len, &outp->info); 232 &hdr, &cnt, &len, &outp->info);
243 if (!data) { 233 if (!data) {
244 ERR("no bios dp data\n"); 234 OUTP_ERR(&outp->base, "no bios dp data");
245 return -ENODEV; 235 return -ENODEV;
246 } 236 }
247 237
248 DBG("bios dp %02x %02x %02x %02x\n", outp->version, hdr, cnt, len); 238 OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
239 outp->version, hdr, cnt, len);
249 240
250 /* link training */ 241 /* link training */
251 INIT_WORK(&outp->lt.work, nvkm_dp_train); 242 INIT_WORK(&outp->lt.work, nvkm_dp_train);
@@ -256,13 +247,13 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
256 ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true, 247 ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
257 &(struct nvkm_i2c_ntfy_req) { 248 &(struct nvkm_i2c_ntfy_req) {
258 .mask = NVKM_I2C_IRQ, 249 .mask = NVKM_I2C_IRQ,
259 .port = outp->base.edid->index, 250 .port = outp->aux->id,
260 }, 251 },
261 sizeof(struct nvkm_i2c_ntfy_req), 252 sizeof(struct nvkm_i2c_ntfy_req),
262 sizeof(struct nvkm_i2c_ntfy_rep), 253 sizeof(struct nvkm_i2c_ntfy_rep),
263 &outp->irq); 254 &outp->irq);
264 if (ret) { 255 if (ret) {
265 ERR("error monitoring aux irq event: %d\n", ret); 256 OUTP_ERR(&outp->base, "error monitoring aux irq: %d", ret);
266 return ret; 257 return ret;
267 } 258 }
268 259
@@ -270,13 +261,13 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
270 ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true, 261 ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
271 &(struct nvkm_i2c_ntfy_req) { 262 &(struct nvkm_i2c_ntfy_req) {
272 .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG, 263 .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
273 .port = outp->base.edid->index, 264 .port = outp->aux->id,
274 }, 265 },
275 sizeof(struct nvkm_i2c_ntfy_req), 266 sizeof(struct nvkm_i2c_ntfy_req),
276 sizeof(struct nvkm_i2c_ntfy_rep), 267 sizeof(struct nvkm_i2c_ntfy_rep),
277 &outp->base.conn->hpd); 268 &outp->hpd);
278 if (ret) { 269 if (ret) {
279 ERR("error monitoring aux hpd events: %d\n", ret); 270 OUTP_ERR(&outp->base, "error monitoring aux hpd: %d", ret);
280 return ret; 271 return ret;
281 } 272 }
282 273
@@ -284,18 +275,17 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
284} 275}
285 276
286int 277int
287_nvkm_output_dp_ctor(struct nvkm_object *parent, 278nvkm_output_dp_new_(const struct nvkm_output_dp_func *func,
288 struct nvkm_object *engine, 279 struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
289 struct nvkm_oclass *oclass, void *info, u32 index, 280 struct nvkm_output **poutp)
290 struct nvkm_object **pobject)
291{ 281{
282 struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
283 struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbE->i2c_index);
292 struct nvkm_output_dp *outp; 284 struct nvkm_output_dp *outp;
293 int ret;
294 285
295 ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp); 286 if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
296 *pobject = nv_object(outp); 287 return -ENOMEM;
297 if (ret) 288 *poutp = &outp->base;
298 return ret;
299 289
300 return 0; 290 return nvkm_output_dp_ctor(func, disp, index, dcbE, aux, outp);
301} 291}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 70c77aec4850..731136d660b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -1,5 +1,14 @@
1#ifndef __NVKM_DISP_OUTP_DP_H__ 1#ifndef __NVKM_DISP_OUTP_DP_H__
2#define __NVKM_DISP_OUTP_DP_H__ 2#define __NVKM_DISP_OUTP_DP_H__
3#define nvkm_output_dp(p) container_of((p), struct nvkm_output_dp, base)
4#ifndef MSG
5#define MSG(l,f,a...) \
6 nvkm_##l(&outp->base.disp->engine.subdev, "%02x:%04x:%04x: "f, \
7 outp->base.index, outp->base.info.hasht, \
8 outp->base.info.hashm, ##a)
9#define DBG(f,a...) MSG(debug, f, ##a)
10#define ERR(f,a...) MSG(error, f, ##a)
11#endif
3#include "outp.h" 12#include "outp.h"
4 13
5#include <core/notify.h> 14#include <core/notify.h>
@@ -7,12 +16,16 @@
7#include <subdev/bios/dp.h> 16#include <subdev/bios/dp.h>
8 17
9struct nvkm_output_dp { 18struct nvkm_output_dp {
19 const struct nvkm_output_dp_func *func;
10 struct nvkm_output base; 20 struct nvkm_output base;
11 21
12 struct nvbios_dpout info; 22 struct nvbios_dpout info;
13 u8 version; 23 u8 version;
14 24
25 struct nvkm_i2c_aux *aux;
26
15 struct nvkm_notify irq; 27 struct nvkm_notify irq;
28 struct nvkm_notify hpd;
16 bool present; 29 bool present;
17 u8 dpcd[16]; 30 u8 dpcd[16];
18 31
@@ -23,34 +36,7 @@ struct nvkm_output_dp {
23 } lt; 36 } lt;
24}; 37};
25 38
26#define nvkm_output_dp_create(p,e,c,b,i,d) \ 39struct nvkm_output_dp_func {
27 nvkm_output_dp_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
28#define nvkm_output_dp_destroy(d) ({ \
29 struct nvkm_output_dp *_outp = (d); \
30 _nvkm_output_dp_dtor(nv_object(_outp)); \
31})
32#define nvkm_output_dp_init(d) ({ \
33 struct nvkm_output_dp *_outp = (d); \
34 _nvkm_output_dp_init(nv_object(_outp)); \
35})
36#define nvkm_output_dp_fini(d,s) ({ \
37 struct nvkm_output_dp *_outp = (d); \
38 _nvkm_output_dp_fini(nv_object(_outp), (s)); \
39})
40
41int nvkm_output_dp_create_(struct nvkm_object *, struct nvkm_object *,
42 struct nvkm_oclass *, struct dcb_output *,
43 int, int, void **);
44
45int _nvkm_output_dp_ctor(struct nvkm_object *, struct nvkm_object *,
46 struct nvkm_oclass *, void *, u32,
47 struct nvkm_object **);
48void _nvkm_output_dp_dtor(struct nvkm_object *);
49int _nvkm_output_dp_init(struct nvkm_object *);
50int _nvkm_output_dp_fini(struct nvkm_object *, bool);
51
52struct nvkm_output_dp_impl {
53 struct nvkm_output_impl base;
54 int (*pattern)(struct nvkm_output_dp *, int); 40 int (*pattern)(struct nvkm_output_dp *, int);
55 int (*lnk_pwr)(struct nvkm_output_dp *, int nr); 41 int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
56 int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef); 42 int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
@@ -58,4 +44,25 @@ struct nvkm_output_dp_impl {
58}; 44};
59 45
60int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait); 46int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
47
48int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
49 int index, struct dcb_output *, struct nvkm_i2c_aux *,
50 struct nvkm_output_dp *);
51int nvkm_output_dp_new_(const struct nvkm_output_dp_func *, struct nvkm_disp *,
52 int index, struct dcb_output *,
53 struct nvkm_output **);
54
55int nv50_pior_dp_new(struct nvkm_disp *, int, struct dcb_output *,
56 struct nvkm_output **);
57
58int g94_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
59 struct nvkm_output **);
60int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
61
62int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
63 struct nvkm_output **);
64int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
65
66int gm204_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
67 struct nvkm_output **);
61#endif 68#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
new file mode 100644
index 000000000000..db6234eebc61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_mthd_list
30g84_disp_ovly_mthd_base = {
31 .mthd = 0x0000,
32 .addr = 0x000000,
33 .data = {
34 { 0x0080, 0x000000 },
35 { 0x0084, 0x6109a0 },
36 { 0x0088, 0x6109c0 },
37 { 0x008c, 0x6109c8 },
38 { 0x0090, 0x6109b4 },
39 { 0x0094, 0x610970 },
40 { 0x00a0, 0x610998 },
41 { 0x00a4, 0x610964 },
42 { 0x00c0, 0x610958 },
43 { 0x00e0, 0x6109a8 },
44 { 0x00e4, 0x6109d0 },
45 { 0x00e8, 0x6109d8 },
46 { 0x0100, 0x61094c },
47 { 0x0104, 0x610984 },
48 { 0x0108, 0x61098c },
49 { 0x0800, 0x6109f8 },
50 { 0x0808, 0x610a08 },
51 { 0x080c, 0x610a10 },
52 { 0x0810, 0x610a00 },
53 {}
54 }
55};
56
57const struct nv50_disp_chan_mthd
58g84_disp_ovly_chan_mthd = {
59 .name = "Overlay",
60 .addr = 0x000540,
61 .prev = 0x000004,
62 .data = {
63 { "Global", 1, &g84_disp_ovly_mthd_base },
64 {}
65 }
66};
67
68const struct nv50_disp_dmac_oclass
69g84_disp_ovly_oclass = {
70 .base.oclass = G82_DISP_OVERLAY_CHANNEL_DMA,
71 .base.minver = 0,
72 .base.maxver = 0,
73 .ctor = nv50_disp_ovly_new,
74 .func = &nv50_disp_dmac_func,
75 .mthd = &g84_disp_ovly_chan_mthd,
76 .chid = 3,
77};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
new file mode 100644
index 000000000000..5985879abd23
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_mthd_list
30gf119_disp_ovly_mthd_base = {
31 .mthd = 0x0000,
32 .data = {
33 { 0x0080, 0x665080 },
34 { 0x0084, 0x665084 },
35 { 0x0088, 0x665088 },
36 { 0x008c, 0x66508c },
37 { 0x0090, 0x665090 },
38 { 0x0094, 0x665094 },
39 { 0x00a0, 0x6650a0 },
40 { 0x00a4, 0x6650a4 },
41 { 0x00b0, 0x6650b0 },
42 { 0x00b4, 0x6650b4 },
43 { 0x00b8, 0x6650b8 },
44 { 0x00c0, 0x6650c0 },
45 { 0x00e0, 0x6650e0 },
46 { 0x00e4, 0x6650e4 },
47 { 0x00e8, 0x6650e8 },
48 { 0x0100, 0x665100 },
49 { 0x0104, 0x665104 },
50 { 0x0108, 0x665108 },
51 { 0x010c, 0x66510c },
52 { 0x0110, 0x665110 },
53 { 0x0118, 0x665118 },
54 { 0x011c, 0x66511c },
55 { 0x0120, 0x665120 },
56 { 0x0124, 0x665124 },
57 { 0x0130, 0x665130 },
58 { 0x0134, 0x665134 },
59 { 0x0138, 0x665138 },
60 { 0x013c, 0x66513c },
61 { 0x0140, 0x665140 },
62 { 0x0144, 0x665144 },
63 { 0x0148, 0x665148 },
64 { 0x014c, 0x66514c },
65 { 0x0150, 0x665150 },
66 { 0x0154, 0x665154 },
67 { 0x0158, 0x665158 },
68 { 0x015c, 0x66515c },
69 { 0x0160, 0x665160 },
70 { 0x0164, 0x665164 },
71 { 0x0168, 0x665168 },
72 { 0x016c, 0x66516c },
73 { 0x0400, 0x665400 },
74 { 0x0408, 0x665408 },
75 { 0x040c, 0x66540c },
76 { 0x0410, 0x665410 },
77 {}
78 }
79};
80
81static const struct nv50_disp_chan_mthd
82gf119_disp_ovly_chan_mthd = {
83 .name = "Overlay",
84 .addr = 0x001000,
85 .prev = -0x020000,
86 .data = {
87 { "Global", 1, &gf119_disp_ovly_mthd_base },
88 {}
89 }
90};
91
92const struct nv50_disp_dmac_oclass
93gf119_disp_ovly_oclass = {
94 .base.oclass = GF110_DISP_OVERLAY_CONTROL_DMA,
95 .base.minver = 0,
96 .base.maxver = 0,
97 .ctor = nv50_disp_ovly_new,
98 .func = &gf119_disp_dmac_func,
99 .mthd = &gf119_disp_ovly_chan_mthd,
100 .chid = 5,
101};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
new file mode 100644
index 000000000000..2e2dc0641ef2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -0,0 +1,103 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_mthd_list
30gk104_disp_ovly_mthd_base = {
31 .mthd = 0x0000,
32 .data = {
33 { 0x0080, 0x665080 },
34 { 0x0084, 0x665084 },
35 { 0x0088, 0x665088 },
36 { 0x008c, 0x66508c },
37 { 0x0090, 0x665090 },
38 { 0x0094, 0x665094 },
39 { 0x00a0, 0x6650a0 },
40 { 0x00a4, 0x6650a4 },
41 { 0x00b0, 0x6650b0 },
42 { 0x00b4, 0x6650b4 },
43 { 0x00b8, 0x6650b8 },
44 { 0x00c0, 0x6650c0 },
45 { 0x00c4, 0x6650c4 },
46 { 0x00e0, 0x6650e0 },
47 { 0x00e4, 0x6650e4 },
48 { 0x00e8, 0x6650e8 },
49 { 0x0100, 0x665100 },
50 { 0x0104, 0x665104 },
51 { 0x0108, 0x665108 },
52 { 0x010c, 0x66510c },
53 { 0x0110, 0x665110 },
54 { 0x0118, 0x665118 },
55 { 0x011c, 0x66511c },
56 { 0x0120, 0x665120 },
57 { 0x0124, 0x665124 },
58 { 0x0130, 0x665130 },
59 { 0x0134, 0x665134 },
60 { 0x0138, 0x665138 },
61 { 0x013c, 0x66513c },
62 { 0x0140, 0x665140 },
63 { 0x0144, 0x665144 },
64 { 0x0148, 0x665148 },
65 { 0x014c, 0x66514c },
66 { 0x0150, 0x665150 },
67 { 0x0154, 0x665154 },
68 { 0x0158, 0x665158 },
69 { 0x015c, 0x66515c },
70 { 0x0160, 0x665160 },
71 { 0x0164, 0x665164 },
72 { 0x0168, 0x665168 },
73 { 0x016c, 0x66516c },
74 { 0x0400, 0x665400 },
75 { 0x0404, 0x665404 },
76 { 0x0408, 0x665408 },
77 { 0x040c, 0x66540c },
78 { 0x0410, 0x665410 },
79 {}
80 }
81};
82
83static const struct nv50_disp_chan_mthd
84gk104_disp_ovly_chan_mthd = {
85 .name = "Overlay",
86 .addr = 0x001000,
87 .prev = -0x020000,
88 .data = {
89 { "Global", 1, &gk104_disp_ovly_mthd_base },
90 {}
91 }
92};
93
94const struct nv50_disp_dmac_oclass
95gk104_disp_ovly_oclass = {
96 .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
97 .base.minver = 0,
98 .base.maxver = 0,
99 .ctor = nv50_disp_ovly_new,
100 .func = &gf119_disp_dmac_func,
101 .mthd = &gk104_disp_ovly_chan_mthd,
102 .chid = 5,
103};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
new file mode 100644
index 000000000000..f858053db83d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
@@ -0,0 +1,80 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_mthd_list
30gt200_disp_ovly_mthd_base = {
31 .mthd = 0x0000,
32 .addr = 0x000000,
33 .data = {
34 { 0x0080, 0x000000 },
35 { 0x0084, 0x6109a0 },
36 { 0x0088, 0x6109c0 },
37 { 0x008c, 0x6109c8 },
38 { 0x0090, 0x6109b4 },
39 { 0x0094, 0x610970 },
40 { 0x00a0, 0x610998 },
41 { 0x00a4, 0x610964 },
42 { 0x00b0, 0x610c98 },
43 { 0x00b4, 0x610ca4 },
44 { 0x00b8, 0x610cac },
45 { 0x00c0, 0x610958 },
46 { 0x00e0, 0x6109a8 },
47 { 0x00e4, 0x6109d0 },
48 { 0x00e8, 0x6109d8 },
49 { 0x0100, 0x61094c },
50 { 0x0104, 0x610984 },
51 { 0x0108, 0x61098c },
52 { 0x0800, 0x6109f8 },
53 { 0x0808, 0x610a08 },
54 { 0x080c, 0x610a10 },
55 { 0x0810, 0x610a00 },
56 {}
57 }
58};
59
60static const struct nv50_disp_chan_mthd
61gt200_disp_ovly_chan_mthd = {
62 .name = "Overlay",
63 .addr = 0x000540,
64 .prev = 0x000004,
65 .data = {
66 { "Global", 1, &gt200_disp_ovly_mthd_base },
67 {}
68 }
69};
70
71const struct nv50_disp_dmac_oclass
72gt200_disp_ovly_oclass = {
73 .base.oclass = GT200_DISP_OVERLAY_CHANNEL_DMA,
74 .base.minver = 0,
75 .base.maxver = 0,
76 .ctor = nv50_disp_ovly_new,
77 .func = &nv50_disp_dmac_func,
78 .mthd = &gt200_disp_ovly_chan_mthd,
79 .chid = 3,
80};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
new file mode 100644
index 000000000000..c947e1e16a37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gt215_disp_ovly_oclass = {
31 .base.oclass = GT214_DISP_OVERLAY_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_ovly_new,
35 .func = &nv50_disp_dmac_func,
36 .mthd = &g84_disp_ovly_chan_mthd,
37 .chid = 3,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
new file mode 100644
index 000000000000..6fa296c047b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <core/client.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32int
33nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
34 const struct nv50_disp_chan_mthd *mthd,
35 struct nv50_disp_root *root, int chid,
36 const struct nvkm_oclass *oclass, void *data, u32 size,
37 struct nvkm_object **pobject)
38{
39 union {
40 struct nv50_disp_overlay_channel_dma_v0 v0;
41 } *args = data;
42 struct nvkm_object *parent = oclass->parent;
43 struct nv50_disp *disp = root->disp;
44 int head, ret;
45 u64 push;
46
47 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size);
48 if (nvif_unpack(args->v0, 0, 0, false)) {
49 nvif_ioctl(parent, "create disp overlay channel dma vers %d "
50 "pushbuf %016llx head %d\n",
51 args->v0.version, args->v0.pushbuf, args->v0.head);
52 if (args->v0.head > disp->base.head.nr)
53 return -EINVAL;
54 push = args->v0.pushbuf;
55 head = args->v0.head;
56 } else
57 return ret;
58
59 return nv50_disp_dmac_new_(func, mthd, root, chid + head,
60 head, push, oclass, pobject);
61}
62
63static const struct nv50_disp_mthd_list
64nv50_disp_ovly_mthd_base = {
65 .mthd = 0x0000,
66 .addr = 0x000000,
67 .data = {
68 { 0x0080, 0x000000 },
69 { 0x0084, 0x0009a0 },
70 { 0x0088, 0x0009c0 },
71 { 0x008c, 0x0009c8 },
72 { 0x0090, 0x6109b4 },
73 { 0x0094, 0x610970 },
74 { 0x00a0, 0x610998 },
75 { 0x00a4, 0x610964 },
76 { 0x00c0, 0x610958 },
77 { 0x00e0, 0x6109a8 },
78 { 0x00e4, 0x6109d0 },
79 { 0x00e8, 0x6109d8 },
80 { 0x0100, 0x61094c },
81 { 0x0104, 0x610984 },
82 { 0x0108, 0x61098c },
83 { 0x0800, 0x6109f8 },
84 { 0x0808, 0x610a08 },
85 { 0x080c, 0x610a10 },
86 { 0x0810, 0x610a00 },
87 {}
88 }
89};
90
91static const struct nv50_disp_chan_mthd
92nv50_disp_ovly_chan_mthd = {
93 .name = "Overlay",
94 .addr = 0x000540,
95 .prev = 0x000004,
96 .data = {
97 { "Global", 1, &nv50_disp_ovly_mthd_base },
98 {}
99 }
100};
101
102const struct nv50_disp_dmac_oclass
103nv50_disp_ovly_oclass = {
104 .base.oclass = NV50_DISP_OVERLAY_CHANNEL_DMA,
105 .base.minver = 0,
106 .base.maxver = 0,
107 .ctor = nv50_disp_ovly_new,
108 .func = &nv50_disp_dmac_func,
109 .mthd = &nv50_disp_ovly_chan_mthd,
110 .chid = 3,
111};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
new file mode 100644
index 000000000000..a625a9876e34
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <subdev/timer.h>
28
29static void
30gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
31{
32 struct nv50_disp *disp = chan->root->disp;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device;
35 int chid = chan->chid;
36
37 nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
38 if (nvkm_msec(device, 2000,
39 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
40 break;
41 ) < 0) {
42 nvkm_error(subdev, "ch %d fini: %08x\n", chid,
43 nvkm_rd32(device, 0x610490 + (chid * 0x10)));
44 }
45
46 /* disable error reporting and completion notification */
47 nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
48 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
49}
50
51static int
52gf119_disp_pioc_init(struct nv50_disp_chan *chan)
53{
54 struct nv50_disp *disp = chan->root->disp;
55 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
56 struct nvkm_device *device = subdev->device;
57 int chid = chan->chid;
58
59 /* enable error reporting */
60 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
61
62 /* activate channel */
63 nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
64 if (nvkm_msec(device, 2000,
65 u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
66 if ((tmp & 0x00030000) == 0x00010000)
67 break;
68 ) < 0) {
69 nvkm_error(subdev, "ch %d init: %08x\n", chid,
70 nvkm_rd32(device, 0x610490 + (chid * 0x10)));
71 return -EBUSY;
72 }
73
74 return 0;
75}
76
77const struct nv50_disp_chan_func
78gf119_disp_pioc_func = {
79 .init = gf119_disp_pioc_init,
80 .fini = gf119_disp_pioc_fini,
81};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
new file mode 100644
index 000000000000..9d2618dacf20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <subdev/timer.h>
28
29static void
30nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
31{
32 struct nv50_disp *disp = chan->root->disp;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device;
35 int chid = chan->chid;
36
37 nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
38 if (nvkm_msec(device, 2000,
39 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
40 break;
41 ) < 0) {
42 nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
43 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
44 }
45}
46
47static int
48nv50_disp_pioc_init(struct nv50_disp_chan *chan)
49{
50 struct nv50_disp *disp = chan->root->disp;
51 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
52 struct nvkm_device *device = subdev->device;
53 int chid = chan->chid;
54
55 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
56 if (nvkm_msec(device, 2000,
57 if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
58 break;
59 ) < 0) {
60 nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
61 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
62 return -EBUSY;
63 }
64
65 nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
66 if (nvkm_msec(device, 2000,
67 u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
68 if ((tmp & 0x00030000) == 0x00010000)
69 break;
70 ) < 0) {
71 nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
72 nvkm_rd32(device, 0x610200 + (chid * 0x10)));
73 return -EBUSY;
74 }
75
76 return 0;
77}
78
79const struct nv50_disp_chan_func
80nv50_disp_pioc_func = {
81 .init = nv50_disp_pioc_init,
82 .fini = nv50_disp_pioc_fini,
83};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index 2a1d8871bf82..ab524bde7795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -21,8 +21,8 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h"
25#include "outpdp.h" 24#include "outpdp.h"
25#include "nv50.h"
26 26
27#include <core/client.h> 27#include <core/client.h>
28#include <subdev/i2c.h> 28#include <subdev/i2c.h>
@@ -31,140 +31,101 @@
31#include <nvif/class.h> 31#include <nvif/class.h>
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34/****************************************************************************** 34int
35 * TMDS 35nv50_pior_power(NV50_DISP_MTHD_V1)
36 *****************************************************************************/
37
38static int
39nv50_pior_tmds_ctor(struct nvkm_object *parent,
40 struct nvkm_object *engine,
41 struct nvkm_oclass *oclass, void *info, u32 index,
42 struct nvkm_object **pobject)
43{ 36{
44 struct nvkm_i2c *i2c = nvkm_i2c(parent); 37 struct nvkm_device *device = disp->base.engine.subdev.device;
45 struct nvkm_output *outp; 38 const u32 soff = outp->or * 0x800;
39 union {
40 struct nv50_disp_pior_pwr_v0 v0;
41 } *args = data;
42 u32 ctrl, type;
46 int ret; 43 int ret;
47 44
48 ret = nvkm_output_create(parent, engine, oclass, info, index, &outp); 45 nvif_ioctl(object, "disp pior pwr size %d\n", size);
49 *pobject = nv_object(outp); 46 if (nvif_unpack(args->v0, 0, 0, false)) {
50 if (ret) 47 nvif_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
48 args->v0.version, args->v0.state, args->v0.type);
49 if (args->v0.type > 0x0f)
50 return -EINVAL;
51 ctrl = !!args->v0.state;
52 type = args->v0.type;
53 } else
51 return ret; 54 return ret;
52 55
53 outp->edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(outp->info.extdev)); 56 nvkm_msec(device, 2000,
57 if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
58 break;
59 );
60 nvkm_mask(device, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
61 nvkm_msec(device, 2000,
62 if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
63 break;
64 );
65 disp->pior.type[outp->or] = type;
54 return 0; 66 return 0;
55} 67}
56 68
57struct nvkm_output_impl
58nv50_pior_tmds_impl = {
59 .base.handle = DCB_OUTPUT_TMDS | 0x0100,
60 .base.ofuncs = &(struct nvkm_ofuncs) {
61 .ctor = nv50_pior_tmds_ctor,
62 .dtor = _nvkm_output_dtor,
63 .init = _nvkm_output_init,
64 .fini = _nvkm_output_fini,
65 },
66};
67
68/****************************************************************************** 69/******************************************************************************
69 * DisplayPort 70 * TMDS
70 *****************************************************************************/ 71 *****************************************************************************/
72static const struct nvkm_output_func
73nv50_pior_output_func = {
74};
71 75
72static int 76int
73nv50_pior_dp_pattern(struct nvkm_output_dp *outp, int pattern) 77nv50_pior_output_new(struct nvkm_disp *disp, int index,
78 struct dcb_output *dcbE, struct nvkm_output **poutp)
74{ 79{
75 struct nvkm_i2c_port *port = outp->base.edid; 80 return nvkm_output_new_(&nv50_pior_output_func, disp,
76 if (port && port->func->pattern) 81 index, dcbE, poutp);
77 return port->func->pattern(port, pattern);
78 return port ? 0 : -ENODEV;
79} 82}
80 83
84/******************************************************************************
85 * DisplayPort
86 *****************************************************************************/
81static int 87static int
82nv50_pior_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) 88nv50_pior_output_dp_pattern(struct nvkm_output_dp *outp, int pattern)
83{ 89{
84 return 0; 90 return 0;
85} 91}
86 92
87static int 93static int
88nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) 94nv50_pior_output_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
89{
90 struct nvkm_i2c_port *port = outp->base.edid;
91 if (port && port->func->lnk_ctl)
92 return port->func->lnk_ctl(port, nr, bw, ef);
93 return port ? 0 : -ENODEV;
94}
95
96static int
97nv50_pior_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
98{ 95{
99 struct nvkm_i2c_port *port = outp->base.edid; 96 return 0;
100 if (port && port->func->drv_ctl)
101 return port->func->drv_ctl(port, ln, vs, pe);
102 return port ? 0 : -ENODEV;
103} 97}
104 98
105static int 99static int
106nv50_pior_dp_ctor(struct nvkm_object *parent, 100nv50_pior_output_dp_lnk_ctl(struct nvkm_output_dp *outp,
107 struct nvkm_object *engine, 101 int nr, int bw, bool ef)
108 struct nvkm_oclass *oclass, void *info, u32 index,
109 struct nvkm_object **pobject)
110{ 102{
111 struct nvkm_i2c *i2c = nvkm_i2c(parent); 103 int ret = nvkm_i2c_aux_lnk_ctl(outp->aux, nr, bw, ef);
112 struct nvkm_output_dp *outp;
113 int ret;
114
115 ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
116 *pobject = nv_object(outp);
117 if (ret) 104 if (ret)
118 return ret; 105 return ret;
119 106 return 1;
120 outp->base.edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(
121 outp->base.info.extdev));
122 return 0;
123} 107}
124 108
125struct nvkm_output_dp_impl 109static const struct nvkm_output_dp_func
126nv50_pior_dp_impl = { 110nv50_pior_output_dp_func = {
127 .base.base.handle = DCB_OUTPUT_DP | 0x0010, 111 .pattern = nv50_pior_output_dp_pattern,
128 .base.base.ofuncs = &(struct nvkm_ofuncs) { 112 .lnk_pwr = nv50_pior_output_dp_lnk_pwr,
129 .ctor = nv50_pior_dp_ctor, 113 .lnk_ctl = nv50_pior_output_dp_lnk_ctl,
130 .dtor = _nvkm_output_dp_dtor,
131 .init = _nvkm_output_dp_init,
132 .fini = _nvkm_output_dp_fini,
133 },
134 .pattern = nv50_pior_dp_pattern,
135 .lnk_pwr = nv50_pior_dp_lnk_pwr,
136 .lnk_ctl = nv50_pior_dp_lnk_ctl,
137 .drv_ctl = nv50_pior_dp_drv_ctl,
138}; 114};
139 115
140/******************************************************************************
141 * General PIOR handling
142 *****************************************************************************/
143
144int 116int
145nv50_pior_power(NV50_DISP_MTHD_V1) 117nv50_pior_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
118 struct nvkm_output **poutp)
146{ 119{
147 const u32 soff = outp->or * 0x800; 120 struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
148 union { 121 struct nvkm_i2c_aux *aux =
149 struct nv50_disp_pior_pwr_v0 v0; 122 nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
150 } *args = data; 123 struct nvkm_output_dp *outp;
151 u32 ctrl, type;
152 int ret;
153 124
154 nv_ioctl(object, "disp pior pwr size %d\n", size); 125 if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
155 if (nvif_unpack(args->v0, 0, 0, false)) { 126 return -ENOMEM;
156 nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n", 127 *poutp = &outp->base;
157 args->v0.version, args->v0.state, args->v0.type);
158 if (args->v0.type > 0x0f)
159 return -EINVAL;
160 ctrl = !!args->v0.state;
161 type = args->v0.type;
162 } else
163 return ret;
164 128
165 nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); 129 return nvkm_output_dp_ctor(&nv50_pior_output_dp_func, disp,
166 nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl); 130 index, dcbE, aux, outp);
167 nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
168 priv->pior.type[outp->or] = type;
169 return 0;
170} 131}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 961ce8bb2135..c2452957fc57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -1,42 +1,52 @@
1#ifndef __NVKM_DISP_PRIV_H__ 1#ifndef __NVKM_DISP_PRIV_H__
2#define __NVKM_DISP_PRIV_H__ 2#define __NVKM_DISP_PRIV_H__
3#include <engine/disp.h> 3#include <engine/disp.h>
4#include "outp.h"
5#include "outpdp.h"
4 6
5struct nvkm_disp_impl { 7int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *,
6 struct nvkm_oclass base; 8 int index, int heads, struct nvkm_disp *);
7 struct nvkm_oclass **outp; 9int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *,
8 struct nvkm_oclass **conn; 10 int index, int heads, struct nvkm_disp **);
9 const struct nvkm_event_func *vblank; 11void nvkm_disp_vblank(struct nvkm_disp *, int head);
12
13struct nvkm_disp_func_outp {
14 int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
15 struct nvkm_output **);
16 int (* tv)(struct nvkm_disp *, int index, struct dcb_output *,
17 struct nvkm_output **);
18 int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
19 struct nvkm_output **);
20 int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
21 struct nvkm_output **);
22 int (* dp)(struct nvkm_disp *, int index, struct dcb_output *,
23 struct nvkm_output **);
24};
25
26struct nvkm_disp_func {
27 void *(*dtor)(struct nvkm_disp *);
28 void (*intr)(struct nvkm_disp *);
29
30 const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *);
31
32 struct {
33 void (*vblank_init)(struct nvkm_disp *, int head);
34 void (*vblank_fini)(struct nvkm_disp *, int head);
35 } head;
36
37 struct {
38 const struct nvkm_disp_func_outp internal;
39 const struct nvkm_disp_func_outp external;
40 } outp;
10}; 41};
11 42
12#define nvkm_disp_create(p,e,c,h,i,x,d) \
13 nvkm_disp_create_((p), (e), (c), (h), (i), (x), \
14 sizeof(**d), (void **)d)
15#define nvkm_disp_destroy(d) ({ \
16 struct nvkm_disp *disp = (d); \
17 _nvkm_disp_dtor(nv_object(disp)); \
18})
19#define nvkm_disp_init(d) ({ \
20 struct nvkm_disp *disp = (d); \
21 _nvkm_disp_init(nv_object(disp)); \
22})
23#define nvkm_disp_fini(d,s) ({ \
24 struct nvkm_disp *disp = (d); \
25 _nvkm_disp_fini(nv_object(disp), (s)); \
26})
27
28int nvkm_disp_create_(struct nvkm_object *, struct nvkm_object *,
29 struct nvkm_oclass *, int heads,
30 const char *, const char *, int, void **);
31void _nvkm_disp_dtor(struct nvkm_object *);
32int _nvkm_disp_init(struct nvkm_object *);
33int _nvkm_disp_fini(struct nvkm_object *, bool);
34
35extern struct nvkm_oclass *nvkm_output_oclass;
36extern struct nvkm_oclass *nvkm_connector_oclass;
37
38int nvkm_disp_vblank_ctor(struct nvkm_object *, void *data, u32 size,
39 struct nvkm_notify *);
40void nvkm_disp_vblank(struct nvkm_disp *, int head);
41int nvkm_disp_ntfy(struct nvkm_object *, u32, struct nvkm_event **); 43int nvkm_disp_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
44
45extern const struct nvkm_disp_oclass nv04_disp_root_oclass;
46
47struct nvkm_disp_oclass {
48 int (*ctor)(struct nvkm_disp *, const struct nvkm_oclass *,
49 void *data, u32 size, struct nvkm_object **);
50 struct nvkm_sclass base;
51};
42#endif 52#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index 6820176e5f78..721e4f74d1fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2013 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -21,37 +21,38 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gf100.h" 24#include "rootnv50.h"
25#include "dmacnv50.h"
25 26
26static int 27#include <nvif/class.h>
27gk110_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
28 struct nvkm_oclass *oclass, void *data, u32 size,
29 struct nvkm_object **pobject)
30{
31 struct gf100_pm_priv *priv;
32 int ret;
33
34 ret = nvkm_pm_create(parent, engine, oclass, &priv);
35 *pobject = nv_object(priv);
36 if (ret)
37 return ret;
38 28
39 ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr); 29static const struct nv50_disp_root_func
40 if (ret) 30g84_disp_root = {
41 return ret; 31 .init = nv50_disp_root_init,
32 .fini = nv50_disp_root_fini,
33 .dmac = {
34 &g84_disp_core_oclass,
35 &g84_disp_base_oclass,
36 &g84_disp_ovly_oclass,
37 },
38 .pioc = {
39 &g84_disp_oimm_oclass,
40 &g84_disp_curs_oclass,
41 },
42};
42 43
43 nv_engine(priv)->cclass = &nvkm_pm_cclass; 44static int
44 nv_engine(priv)->sclass = nvkm_pm_sclass; 45g84_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
45 return 0; 46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&g84_disp_root, disp, oclass,
49 data, size, pobject);
46} 50}
47 51
48struct nvkm_oclass 52const struct nvkm_disp_oclass
49gk110_pm_oclass = { 53g84_disp_root_oclass = {
50 .handle = NV_ENGINE(PM, 0xf0), 54 .base.oclass = G82_DISP,
51 .ofuncs = &(struct nvkm_ofuncs) { 55 .base.minver = -1,
52 .ctor = gk110_pm_ctor, 56 .base.maxver = -1,
53 .dtor = _nvkm_pm_dtor, 57 .ctor = g84_disp_root_new,
54 .init = _nvkm_pm_init,
55 .fini = gf100_pm_fini,
56 },
57}; 58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
new file mode 100644
index 000000000000..9493f6edf62b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30g94_disp_root = {
31 .init = nv50_disp_root_init,
32 .fini = nv50_disp_root_fini,
33 .dmac = {
34 &g94_disp_core_oclass,
35 &gt200_disp_base_oclass,
36 &gt200_disp_ovly_oclass,
37 },
38 .pioc = {
39 &g84_disp_oimm_oclass,
40 &g84_disp_curs_oclass,
41 },
42};
43
44static int
45g94_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&g94_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53g94_disp_root_oclass = {
54 .base.oclass = GT206_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = g94_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
new file mode 100644
index 000000000000..8591726871ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <core/client.h>
28#include <core/ramht.h>
29#include <subdev/timer.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34int
35gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
36{
37 struct nvkm_device *device = disp->base.engine.subdev.device;
38 const u32 total = nvkm_rd32(device, 0x640414 + (head * 0x300));
39 const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300));
40 const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300));
41 union {
42 struct nv04_disp_scanoutpos_v0 v0;
43 } *args = data;
44 int ret;
45
46 nvif_ioctl(object, "disp scanoutpos size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, false)) {
48 nvif_ioctl(object, "disp scanoutpos vers %d\n",
49 args->v0.version);
50 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
51 args->v0.hblanke = (blanke & 0x0000ffff);
52 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
53 args->v0.hblanks = (blanks & 0x0000ffff);
54 args->v0.vtotal = ( total & 0xffff0000) >> 16;
55 args->v0.htotal = ( total & 0x0000ffff);
56 args->v0.time[0] = ktime_to_ns(ktime_get());
57 args->v0.vline = /* vline read locks hline */
58 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
59 args->v0.time[1] = ktime_to_ns(ktime_get());
60 args->v0.hline =
61 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
62 } else
63 return ret;
64
65 return 0;
66}
67
68void
69gf119_disp_root_fini(struct nv50_disp_root *root)
70{
71 struct nvkm_device *device = root->disp->base.engine.subdev.device;
72 /* disable all interrupts */
73 nvkm_wr32(device, 0x6100b0, 0x00000000);
74}
75
76int
77gf119_disp_root_init(struct nv50_disp_root *root)
78{
79 struct nv50_disp *disp = root->disp;
80 struct nvkm_device *device = disp->base.engine.subdev.device;
81 u32 tmp;
82 int i;
83
84 /* The below segments of code copying values from one register to
85 * another appear to inform EVO of the display capabilities or
86 * something similar.
87 */
88
89 /* ... CRTC caps */
90 for (i = 0; i < disp->base.head.nr; i++) {
91 tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
92 nvkm_wr32(device, 0x6101b4 + (i * 0x800), tmp);
93 tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
94 nvkm_wr32(device, 0x6101b8 + (i * 0x800), tmp);
95 tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
96 nvkm_wr32(device, 0x6101bc + (i * 0x800), tmp);
97 }
98
99 /* ... DAC caps */
100 for (i = 0; i < disp->func->dac.nr; i++) {
101 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
102 nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
103 }
104
105 /* ... SOR caps */
106 for (i = 0; i < disp->func->sor.nr; i++) {
107 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
108 nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
109 }
110
111 /* steal display away from vbios, or something like that */
112 if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
113 nvkm_wr32(device, 0x6100ac, 0x00000100);
114 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
115 if (nvkm_msec(device, 2000,
116 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
117 break;
118 ) < 0)
119 return -EBUSY;
120 }
121
122 /* point at display engine memory area (hash table, objects) */
123 nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
124
125 /* enable supervisor interrupts, disable everything else */
126 nvkm_wr32(device, 0x610090, 0x00000000);
127 nvkm_wr32(device, 0x6100a0, 0x00000000);
128 nvkm_wr32(device, 0x6100b0, 0x00000307);
129
130 /* disable underflow reporting, preventing an intermittent issue
131 * on some gk104 boards where the production vbios left this
132 * setting enabled by default.
133 *
134 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
135 */
136 for (i = 0; i < disp->base.head.nr; i++)
137 nvkm_mask(device, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
138
139 return 0;
140}
141
142static const struct nv50_disp_root_func
143gf119_disp_root = {
144 .init = gf119_disp_root_init,
145 .fini = gf119_disp_root_fini,
146 .dmac = {
147 &gf119_disp_core_oclass,
148 &gf119_disp_base_oclass,
149 &gf119_disp_ovly_oclass,
150 },
151 .pioc = {
152 &gf119_disp_oimm_oclass,
153 &gf119_disp_curs_oclass,
154 },
155};
156
157static int
158gf119_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
159 void *data, u32 size, struct nvkm_object **pobject)
160{
161 return nv50_disp_root_new_(&gf119_disp_root, disp, oclass,
162 data, size, pobject);
163}
164
165const struct nvkm_disp_oclass
166gf119_disp_root_oclass = {
167 .base.oclass = GF110_DISP,
168 .base.minver = -1,
169 .base.maxver = -1,
170 .ctor = gf119_disp_root_new,
171};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
new file mode 100644
index 000000000000..0bfdb1d1c6ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30gk104_disp_root = {
31 .init = gf119_disp_root_init,
32 .fini = gf119_disp_root_fini,
33 .dmac = {
34 &gk104_disp_core_oclass,
35 &gk104_disp_base_oclass,
36 &gk104_disp_ovly_oclass,
37 },
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 },
42};
43
44static int
45gk104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&gk104_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53gk104_disp_root_oclass = {
54 .base.oclass = GK104_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = gk104_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
new file mode 100644
index 000000000000..1e8dbed8a67c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30gk110_disp_root = {
31 .init = gf119_disp_root_init,
32 .fini = gf119_disp_root_fini,
33 .dmac = {
34 &gk110_disp_core_oclass,
35 &gk110_disp_base_oclass,
36 &gk104_disp_ovly_oclass,
37 },
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 },
42};
43
44static int
45gk110_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&gk110_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53gk110_disp_root_oclass = {
54 .base.oclass = GK110_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = gk110_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
new file mode 100644
index 000000000000..44c55be69e99
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30gm107_disp_root = {
31 .init = gf119_disp_root_init,
32 .fini = gf119_disp_root_fini,
33 .dmac = {
34 &gm107_disp_core_oclass,
35 &gk110_disp_base_oclass,
36 &gk104_disp_ovly_oclass,
37 },
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 },
42};
43
44static int
45gm107_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&gm107_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53gm107_disp_root_oclass = {
54 .base.oclass = GM107_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = gm107_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
new file mode 100644
index 000000000000..168bffe0643c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30gm204_disp_root = {
31 .init = gf119_disp_root_init,
32 .fini = gf119_disp_root_fini,
33 .dmac = {
34 &gm204_disp_core_oclass,
35 &gk110_disp_base_oclass,
36 &gk104_disp_ovly_oclass,
37 },
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 },
42};
43
44static int
45gm204_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&gm204_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53gm204_disp_root_oclass = {
54 .base.oclass = GM204_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = gm204_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
new file mode 100644
index 000000000000..124a0c24f92c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30gt200_disp_root = {
31 .init = nv50_disp_root_init,
32 .fini = nv50_disp_root_fini,
33 .dmac = {
34 &gt200_disp_core_oclass,
35 &gt200_disp_base_oclass,
36 &gt200_disp_ovly_oclass,
37 },
38 .pioc = {
39 &g84_disp_oimm_oclass,
40 &g84_disp_curs_oclass,
41 },
42};
43
44static int
45gt200_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&gt200_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53gt200_disp_root_oclass = {
54 .base.oclass = GT200_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = gt200_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
new file mode 100644
index 000000000000..dff52f30668b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30gt215_disp_root = {
31 .init = nv50_disp_root_init,
32 .fini = nv50_disp_root_fini,
33 .dmac = {
34 &gt215_disp_core_oclass,
35 &gt215_disp_base_oclass,
36 &gt215_disp_ovly_oclass,
37 },
38 .pioc = {
39 &gt215_disp_oimm_oclass,
40 &gt215_disp_curs_oclass,
41 },
42};
43
44static int
45gt215_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&gt215_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53gt215_disp_root_oclass = {
54 .base.oclass = GT214_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = gt215_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
new file mode 100644
index 000000000000..62d3fb66d0ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
25#include "priv.h"
26
27#include <core/client.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32struct nv04_disp_root {
33 struct nvkm_object object;
34 struct nvkm_disp *disp;
35};
36
37static int
38nv04_disp_scanoutpos(struct nv04_disp_root *root,
39 void *data, u32 size, int head)
40{
41 struct nvkm_device *device = root->disp->engine.subdev.device;
42 struct nvkm_object *object = &root->object;
43 const u32 hoff = head * 0x2000;
44 union {
45 struct nv04_disp_scanoutpos_v0 v0;
46 } *args = data;
47 u32 line;
48 int ret;
49
50 nvif_ioctl(object, "disp scanoutpos size %d\n", size);
51 if (nvif_unpack(args->v0, 0, 0, false)) {
52 nvif_ioctl(object, "disp scanoutpos vers %d\n",
53 args->v0.version);
54 args->v0.vblanks = nvkm_rd32(device, 0x680800 + hoff) & 0xffff;
55 args->v0.vtotal = nvkm_rd32(device, 0x680804 + hoff) & 0xffff;
56 args->v0.vblanke = args->v0.vtotal - 1;
57
58 args->v0.hblanks = nvkm_rd32(device, 0x680820 + hoff) & 0xffff;
59 args->v0.htotal = nvkm_rd32(device, 0x680824 + hoff) & 0xffff;
60 args->v0.hblanke = args->v0.htotal - 1;
61
62 /*
63 * If output is vga instead of digital then vtotal/htotal is
64 * invalid so we have to give up and trigger the timestamping
65 * fallback in the drm core.
66 */
67 if (!args->v0.vtotal || !args->v0.htotal)
68 return -ENOTSUPP;
69
70 args->v0.time[0] = ktime_to_ns(ktime_get());
71 line = nvkm_rd32(device, 0x600868 + hoff);
72 args->v0.time[1] = ktime_to_ns(ktime_get());
73 args->v0.hline = (line & 0xffff0000) >> 16;
74 args->v0.vline = (line & 0x0000ffff);
75 } else
76 return ret;
77
78 return 0;
79}
80
81static int
82nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
83{
84 struct nv04_disp_root *root = nv04_disp_root(object);
85 union {
86 struct nv04_disp_mthd_v0 v0;
87 } *args = data;
88 int head, ret;
89
90 nvif_ioctl(object, "disp mthd size %d\n", size);
91 if (nvif_unpack(args->v0, 0, 0, true)) {
92 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
93 args->v0.version, args->v0.method, args->v0.head);
94 mthd = args->v0.method;
95 head = args->v0.head;
96 } else
97 return ret;
98
99 if (head < 0 || head >= 2)
100 return -ENXIO;
101
102 switch (mthd) {
103 case NV04_DISP_SCANOUTPOS:
104 return nv04_disp_scanoutpos(root, data, size, head);
105 default:
106 break;
107 }
108
109 return -EINVAL;
110}
111
112static struct nvkm_object_func
113nv04_disp_root = {
114 .mthd = nv04_disp_mthd,
115 .ntfy = nvkm_disp_ntfy,
116};
117
118static int
119nv04_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
120 void *data, u32 size, struct nvkm_object **pobject)
121{
122 struct nv04_disp_root *root;
123
124 if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
125 return -ENOMEM;
126 root->disp = disp;
127 *pobject = &root->object;
128
129 nvkm_object_ctor(&nv04_disp_root, oclass, &root->object);
130 return 0;
131}
132
133const struct nvkm_disp_oclass
134nv04_disp_root_oclass = {
135 .base.oclass = NV04_DISP,
136 .base.minver = -1,
137 .base.maxver = -1,
138 .ctor = nv04_disp_root_new,
139};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
new file mode 100644
index 000000000000..06fb24d88702
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -0,0 +1,399 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <core/client.h>
28#include <core/ramht.h>
29#include <subdev/timer.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34int
35nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
36{
37 struct nvkm_device *device = disp->base.engine.subdev.device;
38 const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540));
39 const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540));
40 const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540));
41 union {
42 struct nv04_disp_scanoutpos_v0 v0;
43 } *args = data;
44 int ret;
45
46 nvif_ioctl(object, "disp scanoutpos size %d\n", size);
47 if (nvif_unpack(args->v0, 0, 0, false)) {
48 nvif_ioctl(object, "disp scanoutpos vers %d\n",
49 args->v0.version);
50 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
51 args->v0.hblanke = (blanke & 0x0000ffff);
52 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
53 args->v0.hblanks = (blanks & 0x0000ffff);
54 args->v0.vtotal = ( total & 0xffff0000) >> 16;
55 args->v0.htotal = ( total & 0x0000ffff);
56 args->v0.time[0] = ktime_to_ns(ktime_get());
57 args->v0.vline = /* vline read locks hline */
58 nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
59 args->v0.time[1] = ktime_to_ns(ktime_get());
60 args->v0.hline =
61 nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
62 } else
63 return ret;
64
65 return 0;
66}
67
68int
69nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
70{
71 union {
72 struct nv50_disp_mthd_v0 v0;
73 struct nv50_disp_mthd_v1 v1;
74 } *args = data;
75 struct nv50_disp_root *root = nv50_disp_root(object);
76 struct nv50_disp *disp = root->disp;
77 const struct nv50_disp_func *func = disp->func;
78 struct nvkm_output *outp = NULL;
79 struct nvkm_output *temp;
80 u16 type, mask = 0;
81 int head, ret;
82
83 if (mthd != NV50_DISP_MTHD)
84 return -EINVAL;
85
86 nvif_ioctl(object, "disp mthd size %d\n", size);
87 if (nvif_unpack(args->v0, 0, 0, true)) {
88 nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
89 args->v0.version, args->v0.method, args->v0.head);
90 mthd = args->v0.method;
91 head = args->v0.head;
92 } else
93 if (nvif_unpack(args->v1, 1, 1, true)) {
94 nvif_ioctl(object, "disp mthd vers %d mthd %02x "
95 "type %04x mask %04x\n",
96 args->v1.version, args->v1.method,
97 args->v1.hasht, args->v1.hashm);
98 mthd = args->v1.method;
99 type = args->v1.hasht;
100 mask = args->v1.hashm;
101 head = ffs((mask >> 8) & 0x0f) - 1;
102 } else
103 return ret;
104
105 if (head < 0 || head >= disp->base.head.nr)
106 return -ENXIO;
107
108 if (mask) {
109 list_for_each_entry(temp, &disp->base.outp, head) {
110 if ((temp->info.hasht == type) &&
111 (temp->info.hashm & mask) == mask) {
112 outp = temp;
113 break;
114 }
115 }
116 if (outp == NULL)
117 return -ENXIO;
118 }
119
120 switch (mthd) {
121 case NV50_DISP_SCANOUTPOS:
122 return func->head.scanoutpos(object, disp, data, size, head);
123 default:
124 break;
125 }
126
127 switch (mthd * !!outp) {
128 case NV50_DISP_MTHD_V1_DAC_PWR:
129 return func->dac.power(object, disp, data, size, head, outp);
130 case NV50_DISP_MTHD_V1_DAC_LOAD:
131 return func->dac.sense(object, disp, data, size, head, outp);
132 case NV50_DISP_MTHD_V1_SOR_PWR:
133 return func->sor.power(object, disp, data, size, head, outp);
134 case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
135 if (!func->sor.hda_eld)
136 return -ENODEV;
137 return func->sor.hda_eld(object, disp, data, size, head, outp);
138 case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
139 if (!func->sor.hdmi)
140 return -ENODEV;
141 return func->sor.hdmi(object, disp, data, size, head, outp);
142 case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
143 union {
144 struct nv50_disp_sor_lvds_script_v0 v0;
145 } *args = data;
146 nvif_ioctl(object, "disp sor lvds script size %d\n", size);
147 if (nvif_unpack(args->v0, 0, 0, false)) {
148 nvif_ioctl(object, "disp sor lvds script "
149 "vers %d name %04x\n",
150 args->v0.version, args->v0.script);
151 disp->sor.lvdsconf = args->v0.script;
152 return 0;
153 } else
154 return ret;
155 }
156 break;
157 case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
158 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
159 union {
160 struct nv50_disp_sor_dp_pwr_v0 v0;
161 } *args = data;
162 nvif_ioctl(object, "disp sor dp pwr size %d\n", size);
163 if (nvif_unpack(args->v0, 0, 0, false)) {
164 nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n",
165 args->v0.version, args->v0.state);
166 if (args->v0.state == 0) {
167 nvkm_notify_put(&outpdp->irq);
168 outpdp->func->lnk_pwr(outpdp, 0);
169 atomic_set(&outpdp->lt.done, 0);
170 return 0;
171 } else
172 if (args->v0.state != 0) {
173 nvkm_output_dp_train(&outpdp->base, 0, true);
174 return 0;
175 }
176 } else
177 return ret;
178 }
179 break;
180 case NV50_DISP_MTHD_V1_PIOR_PWR:
181 if (!func->pior.power)
182 return -ENODEV;
183 return func->pior.power(object, disp, data, size, head, outp);
184 default:
185 break;
186 }
187
188 return -EINVAL;
189}
190
191static int
192nv50_disp_root_dmac_new_(const struct nvkm_oclass *oclass,
193 void *data, u32 size, struct nvkm_object **pobject)
194{
195 const struct nv50_disp_dmac_oclass *sclass = oclass->priv;
196 struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
197 return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
198 oclass, data, size, pobject);
199}
200
201static int
202nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
203 void *data, u32 size, struct nvkm_object **pobject)
204{
205 const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
206 struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
207 return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
208 oclass, data, size, pobject);
209}
210
211static int
212nv50_disp_root_child_get_(struct nvkm_object *object, int index,
213 struct nvkm_oclass *sclass)
214{
215 struct nv50_disp_root *root = nv50_disp_root(object);
216
217 if (index < ARRAY_SIZE(root->func->dmac)) {
218 sclass->base = root->func->dmac[index]->base;
219 sclass->priv = root->func->dmac[index];
220 sclass->ctor = nv50_disp_root_dmac_new_;
221 return 0;
222 }
223
224 index -= ARRAY_SIZE(root->func->dmac);
225
226 if (index < ARRAY_SIZE(root->func->pioc)) {
227 sclass->base = root->func->pioc[index]->base;
228 sclass->priv = root->func->pioc[index];
229 sclass->ctor = nv50_disp_root_pioc_new_;
230 return 0;
231 }
232
233 return -EINVAL;
234}
235
236static int
237nv50_disp_root_fini_(struct nvkm_object *object, bool suspend)
238{
239 struct nv50_disp_root *root = nv50_disp_root(object);
240 root->func->fini(root);
241 return 0;
242}
243
244static int
245nv50_disp_root_init_(struct nvkm_object *object)
246{
247 struct nv50_disp_root *root = nv50_disp_root(object);
248 return root->func->init(root);
249}
250
251static void *
252nv50_disp_root_dtor_(struct nvkm_object *object)
253{
254 struct nv50_disp_root *root = nv50_disp_root(object);
255 nvkm_ramht_del(&root->ramht);
256 nvkm_gpuobj_del(&root->instmem);
257 return root;
258}
259
260static const struct nvkm_object_func
261nv50_disp_root_ = {
262 .dtor = nv50_disp_root_dtor_,
263 .init = nv50_disp_root_init_,
264 .fini = nv50_disp_root_fini_,
265 .mthd = nv50_disp_root_mthd_,
266 .ntfy = nvkm_disp_ntfy,
267 .sclass = nv50_disp_root_child_get_,
268};
269
270int
271nv50_disp_root_new_(const struct nv50_disp_root_func *func,
272 struct nvkm_disp *base, const struct nvkm_oclass *oclass,
273 void *data, u32 size, struct nvkm_object **pobject)
274{
275 struct nv50_disp *disp = nv50_disp(base);
276 struct nv50_disp_root *root;
277 struct nvkm_device *device = disp->base.engine.subdev.device;
278 int ret;
279
280 if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
281 return -ENOMEM;
282 *pobject = &root->object;
283
284 nvkm_object_ctor(&nv50_disp_root_, oclass, &root->object);
285 root->func = func;
286 root->disp = disp;
287
288 ret = nvkm_gpuobj_new(disp->base.engine.subdev.device, 0x10000, 0x10000,
289 false, NULL, &root->instmem);
290 if (ret)
291 return ret;
292
293 return nvkm_ramht_new(device, 0x1000, 0, root->instmem, &root->ramht);
294}
295
296void
297nv50_disp_root_fini(struct nv50_disp_root *root)
298{
299 struct nvkm_device *device = root->disp->base.engine.subdev.device;
300 /* disable all interrupts */
301 nvkm_wr32(device, 0x610024, 0x00000000);
302 nvkm_wr32(device, 0x610020, 0x00000000);
303}
304
305int
306nv50_disp_root_init(struct nv50_disp_root *root)
307{
308 struct nv50_disp *disp = root->disp;
309 struct nvkm_device *device = disp->base.engine.subdev.device;
310 u32 tmp;
311 int i;
312
313 /* The below segments of code copying values from one register to
314 * another appear to inform EVO of the display capabilities or
315 * something similar. NFI what the 0x614004 caps are for..
316 */
317 tmp = nvkm_rd32(device, 0x614004);
318 nvkm_wr32(device, 0x610184, tmp);
319
320 /* ... CRTC caps */
321 for (i = 0; i < disp->base.head.nr; i++) {
322 tmp = nvkm_rd32(device, 0x616100 + (i * 0x800));
323 nvkm_wr32(device, 0x610190 + (i * 0x10), tmp);
324 tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
325 nvkm_wr32(device, 0x610194 + (i * 0x10), tmp);
326 tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
327 nvkm_wr32(device, 0x610198 + (i * 0x10), tmp);
328 tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
329 nvkm_wr32(device, 0x61019c + (i * 0x10), tmp);
330 }
331
332 /* ... DAC caps */
333 for (i = 0; i < disp->func->dac.nr; i++) {
334 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
335 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
336 }
337
338 /* ... SOR caps */
339 for (i = 0; i < disp->func->sor.nr; i++) {
340 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
341 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
342 }
343
344 /* ... PIOR caps */
345 for (i = 0; i < disp->func->pior.nr; i++) {
346 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
347 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
348 }
349
350 /* steal display away from vbios, or something like that */
351 if (nvkm_rd32(device, 0x610024) & 0x00000100) {
352 nvkm_wr32(device, 0x610024, 0x00000100);
353 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
354 if (nvkm_msec(device, 2000,
355 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
356 break;
357 ) < 0)
358 return -EBUSY;
359 }
360
361 /* point at display engine memory area (hash table, objects) */
362 nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
363
364 /* enable supervisor interrupts, disable everything else */
365 nvkm_wr32(device, 0x61002c, 0x00000370);
366 nvkm_wr32(device, 0x610028, 0x00000000);
367 return 0;
368}
369
370static const struct nv50_disp_root_func
371nv50_disp_root = {
372 .init = nv50_disp_root_init,
373 .fini = nv50_disp_root_fini,
374 .dmac = {
375 &nv50_disp_core_oclass,
376 &nv50_disp_base_oclass,
377 &nv50_disp_ovly_oclass,
378 },
379 .pioc = {
380 &nv50_disp_oimm_oclass,
381 &nv50_disp_curs_oclass,
382 },
383};
384
385static int
386nv50_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
387 void *data, u32 size, struct nvkm_object **pobject)
388{
389 return nv50_disp_root_new_(&nv50_disp_root, disp, oclass,
390 data, size, pobject);
391}
392
393const struct nvkm_disp_oclass
394nv50_disp_root_oclass = {
395 .base.oclass = NV50_DISP,
396 .base.minver = -1,
397 .base.maxver = -1,
398 .ctor = nv50_disp_root_new,
399};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
new file mode 100644
index 000000000000..5b2c903ce9ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -0,0 +1,43 @@
1#ifndef __NV50_DISP_ROOT_H__
2#define __NV50_DISP_ROOT_H__
3#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
4#include "nv50.h"
5#include "channv50.h"
6#include "dmacnv50.h"
7
8struct nv50_disp_root {
9 const struct nv50_disp_root_func *func;
10 struct nv50_disp *disp;
11 struct nvkm_object object;
12
13 struct nvkm_gpuobj *instmem;
14 struct nvkm_ramht *ramht;
15};
16
17struct nv50_disp_root_func {
18 int (*init)(struct nv50_disp_root *);
19 void (*fini)(struct nv50_disp_root *);
20 const struct nv50_disp_dmac_oclass *dmac[3];
21 const struct nv50_disp_pioc_oclass *pioc[2];
22};
23
24int nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
25 const struct nvkm_oclass *, void *data, u32 size,
26 struct nvkm_object **);
27int nv50_disp_root_init(struct nv50_disp_root *);
28void nv50_disp_root_fini(struct nv50_disp_root *);
29
30int gf119_disp_root_init(struct nv50_disp_root *);
31void gf119_disp_root_fini(struct nv50_disp_root *);
32
33extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
34extern const struct nvkm_disp_oclass g84_disp_root_oclass;
35extern const struct nvkm_disp_oclass g94_disp_root_oclass;
36extern const struct nvkm_disp_oclass gt200_disp_root_oclass;
37extern const struct nvkm_disp_oclass gt215_disp_root_oclass;
38extern const struct nvkm_disp_oclass gf119_disp_root_oclass;
39extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
40extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
41extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
42extern const struct nvkm_disp_oclass gm204_disp_root_oclass;
43#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 8918da7ffdf2..1bb9d661e9b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -24,7 +24,6 @@
24#include "nv50.h" 24#include "nv50.h"
25#include "outpdp.h" 25#include "outpdp.h"
26 26
27#include <core/device.h>
28#include <subdev/timer.h> 27#include <subdev/timer.h>
29 28
30static inline u32 29static inline u32
@@ -39,12 +38,33 @@ g94_sor_loff(struct nvkm_output_dp *outp)
39 return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80; 38 return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
40} 39}
41 40
42static inline u32 41/*******************************************************************************
43g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) 42 * TMDS/LVDS
43 ******************************************************************************/
44static const struct nvkm_output_func
45g94_sor_output_func = {
46};
47
48int
49g94_sor_output_new(struct nvkm_disp *disp, int index,
50 struct dcb_output *dcbE, struct nvkm_output **poutp)
51{
52 return nvkm_output_new_(&g94_sor_output_func, disp,
53 index, dcbE, poutp);
54}
55
56/*******************************************************************************
57 * DisplayPort
58 ******************************************************************************/
59u32
60g94_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
44{ 61{
62 static const u8 gm100[] = { 0, 8, 16, 24 };
45 static const u8 mcp89[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ 63 static const u8 mcp89[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
46 static const u8 g94[] = { 16, 8, 0, 24 }; 64 static const u8 g94[] = { 16, 8, 0, 24 };
47 if (nv_device(priv)->chipset == 0xaf) 65 if (device->chipset >= 0x110)
66 return gm100[lane];
67 if (device->chipset == 0xaf)
48 return mcp89[lane]; 68 return mcp89[lane];
49 return g94[lane]; 69 return g94[lane];
50} 70}
@@ -52,33 +72,36 @@ g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
52static int 72static int
53g94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) 73g94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
54{ 74{
55 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 75 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
56 const u32 loff = g94_sor_loff(outp); 76 const u32 loff = g94_sor_loff(outp);
57 nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24); 77 nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
58 return 0; 78 return 0;
59} 79}
60 80
61int 81int
62g94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) 82g94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
63{ 83{
64 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 84 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
65 const u32 soff = g94_sor_soff(outp); 85 const u32 soff = g94_sor_soff(outp);
66 const u32 loff = g94_sor_loff(outp); 86 const u32 loff = g94_sor_loff(outp);
67 u32 mask = 0, i; 87 u32 mask = 0, i;
68 88
69 for (i = 0; i < nr; i++) 89 for (i = 0; i < nr; i++)
70 mask |= 1 << (g94_sor_dp_lane_map(priv, i) >> 3); 90 mask |= 1 << (g94_sor_dp_lane_map(device, i) >> 3);
71 91
72 nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask); 92 nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
73 nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000); 93 nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
74 nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000); 94 nvkm_msec(device, 2000,
95 if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
96 break;
97 );
75 return 0; 98 return 0;
76} 99}
77 100
78static int 101static int
79g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) 102g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
80{ 103{
81 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 104 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
82 const u32 soff = g94_sor_soff(outp); 105 const u32 soff = g94_sor_soff(outp);
83 const u32 loff = g94_sor_loff(outp); 106 const u32 loff = g94_sor_loff(outp);
84 u32 dpctrl = 0x00000000; 107 u32 dpctrl = 0x00000000;
@@ -90,17 +113,17 @@ g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
90 if (bw > 0x06) 113 if (bw > 0x06)
91 clksor |= 0x00040000; 114 clksor |= 0x00040000;
92 115
93 nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor); 116 nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
94 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); 117 nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
95 return 0; 118 return 0;
96} 119}
97 120
98static int 121static int
99g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc) 122g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
100{ 123{
101 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 124 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
102 struct nvkm_bios *bios = nvkm_bios(priv); 125 struct nvkm_bios *bios = device->bios;
103 const u32 shift = g94_sor_dp_lane_map(priv, ln); 126 const u32 shift = g94_sor_dp_lane_map(device, ln);
104 const u32 loff = g94_sor_loff(outp); 127 const u32 loff = g94_sor_loff(outp);
105 u32 addr, data[3]; 128 u32 addr, data[3];
106 u8 ver, hdr, cnt, len; 129 u8 ver, hdr, cnt, len;
@@ -109,37 +132,37 @@ g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
109 132
110 addr = nvbios_dpout_match(bios, outp->base.info.hasht, 133 addr = nvbios_dpout_match(bios, outp->base.info.hasht,
111 outp->base.info.hashm, 134 outp->base.info.hashm,
112 &ver, &hdr, &cnt, &len, &info); 135 &ver, &hdr, &cnt, &len, &info);
113 if (!addr) 136 if (!addr)
114 return -ENODEV; 137 return -ENODEV;
115 138
116 addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe, 139 addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe,
117 &ver, &hdr, &cnt, &len, &ocfg); 140 &ver, &hdr, &cnt, &len, &ocfg);
118 if (!addr) 141 if (!addr)
119 return -EINVAL; 142 return -EINVAL;
120 143
121 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift); 144 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
122 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift); 145 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
123 data[2] = nv_rd32(priv, 0x61c130 + loff); 146 data[2] = nvkm_rd32(device, 0x61c130 + loff);
124 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0) 147 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
125 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8); 148 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
126 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift)); 149 nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
127 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift)); 150 nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
128 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8)); 151 nvkm_wr32(device, 0x61c130 + loff, data[2]);
129 return 0; 152 return 0;
130} 153}
131 154
132struct nvkm_output_dp_impl 155static const struct nvkm_output_dp_func
133g94_sor_dp_impl = { 156g94_sor_dp_func = {
134 .base.base.handle = DCB_OUTPUT_DP,
135 .base.base.ofuncs = &(struct nvkm_ofuncs) {
136 .ctor = _nvkm_output_dp_ctor,
137 .dtor = _nvkm_output_dp_dtor,
138 .init = _nvkm_output_dp_init,
139 .fini = _nvkm_output_dp_fini,
140 },
141 .pattern = g94_sor_dp_pattern, 157 .pattern = g94_sor_dp_pattern,
142 .lnk_pwr = g94_sor_dp_lnk_pwr, 158 .lnk_pwr = g94_sor_dp_lnk_pwr,
143 .lnk_ctl = g94_sor_dp_lnk_ctl, 159 .lnk_ctl = g94_sor_dp_lnk_ctl,
144 .drv_ctl = g94_sor_dp_drv_ctl, 160 .drv_ctl = g94_sor_dp_drv_ctl,
145}; 161};
162
163int
164g94_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
165 struct nvkm_output **poutp)
166{
167 return nvkm_output_dp_new_(&g94_sor_dp_func, disp, index, dcbE, poutp);
168}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 52fbe4880e13..b4b41b135643 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -25,39 +25,32 @@
25#include "outpdp.h" 25#include "outpdp.h"
26 26
27static inline u32 27static inline u32
28gf110_sor_soff(struct nvkm_output_dp *outp) 28gf119_sor_soff(struct nvkm_output_dp *outp)
29{ 29{
30 return (ffs(outp->base.info.or) - 1) * 0x800; 30 return (ffs(outp->base.info.or) - 1) * 0x800;
31} 31}
32 32
33static inline u32 33static inline u32
34gf110_sor_loff(struct nvkm_output_dp *outp) 34gf119_sor_loff(struct nvkm_output_dp *outp)
35{ 35{
36 return gf110_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80; 36 return gf119_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
37}
38
39static inline u32
40gf110_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
41{
42 static const u8 gf110[] = { 16, 8, 0, 24 };
43 return gf110[lane];
44} 37}
45 38
46static int 39static int
47gf110_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) 40gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
48{ 41{
49 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 42 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
50 const u32 loff = gf110_sor_loff(outp); 43 const u32 loff = gf119_sor_loff(outp);
51 nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); 44 nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
52 return 0; 45 return 0;
53} 46}
54 47
55int 48int
56gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) 49gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
57{ 50{
58 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 51 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
59 const u32 soff = gf110_sor_soff(outp); 52 const u32 soff = gf119_sor_soff(outp);
60 const u32 loff = gf110_sor_loff(outp); 53 const u32 loff = gf119_sor_loff(outp);
61 u32 dpctrl = 0x00000000; 54 u32 dpctrl = 0x00000000;
62 u32 clksor = 0x00000000; 55 u32 clksor = 0x00000000;
63 56
@@ -66,19 +59,19 @@ gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
66 if (ef) 59 if (ef)
67 dpctrl |= 0x00004000; 60 dpctrl |= 0x00004000;
68 61
69 nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor); 62 nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
70 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); 63 nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
71 return 0; 64 return 0;
72} 65}
73 66
74static int 67static int
75gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp, 68gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
76 int ln, int vs, int pe, int pc) 69 int ln, int vs, int pe, int pc)
77{ 70{
78 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 71 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
79 struct nvkm_bios *bios = nvkm_bios(priv); 72 struct nvkm_bios *bios = device->bios;
80 const u32 shift = gf110_sor_dp_lane_map(priv, ln); 73 const u32 shift = g94_sor_dp_lane_map(device, ln);
81 const u32 loff = gf110_sor_loff(outp); 74 const u32 loff = gf119_sor_loff(outp);
82 u32 addr, data[4]; 75 u32 addr, data[4];
83 u8 ver, hdr, cnt, len; 76 u8 ver, hdr, cnt, len;
84 struct nvbios_dpout info; 77 struct nvbios_dpout info;
@@ -95,30 +88,30 @@ gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
95 if (!addr) 88 if (!addr)
96 return -EINVAL; 89 return -EINVAL;
97 90
98 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift); 91 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
99 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift); 92 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
100 data[2] = nv_rd32(priv, 0x61c130 + loff); 93 data[2] = nvkm_rd32(device, 0x61c130 + loff);
101 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0) 94 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
102 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8); 95 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
103 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift)); 96 nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
104 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift)); 97 nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
105 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8)); 98 nvkm_wr32(device, 0x61c130 + loff, data[2]);
106 data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift); 99 data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
107 nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift)); 100 nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
108 return 0; 101 return 0;
109} 102}
110 103
111struct nvkm_output_dp_impl 104static const struct nvkm_output_dp_func
112gf110_sor_dp_impl = { 105gf119_sor_dp_func = {
113 .base.base.handle = DCB_OUTPUT_DP, 106 .pattern = gf119_sor_dp_pattern,
114 .base.base.ofuncs = &(struct nvkm_ofuncs) {
115 .ctor = _nvkm_output_dp_ctor,
116 .dtor = _nvkm_output_dp_dtor,
117 .init = _nvkm_output_dp_init,
118 .fini = _nvkm_output_dp_fini,
119 },
120 .pattern = gf110_sor_dp_pattern,
121 .lnk_pwr = g94_sor_dp_lnk_pwr, 107 .lnk_pwr = g94_sor_dp_lnk_pwr,
122 .lnk_ctl = gf110_sor_dp_lnk_ctl, 108 .lnk_ctl = gf119_sor_dp_lnk_ctl,
123 .drv_ctl = gf110_sor_dp_drv_ctl, 109 .drv_ctl = gf119_sor_dp_drv_ctl,
124}; 110};
111
112int
113gf119_sor_dp_new(struct nvkm_disp *disp, int index,
114 struct dcb_output *dcbE, struct nvkm_output **poutp)
115{
116 return nvkm_output_dp_new_(&gf119_sor_dp_func, disp, index, dcbE, poutp);
117}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
index 1e40dfe11319..029e5f16c2a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
@@ -41,17 +41,17 @@ gm204_sor_loff(struct nvkm_output_dp *outp)
41void 41void
42gm204_sor_magic(struct nvkm_output *outp) 42gm204_sor_magic(struct nvkm_output *outp)
43{ 43{
44 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 44 struct nvkm_device *device = outp->disp->engine.subdev.device;
45 const u32 soff = outp->or * 0x100; 45 const u32 soff = outp->or * 0x100;
46 const u32 data = outp->or + 1; 46 const u32 data = outp->or + 1;
47 if (outp->info.sorconf.link & 1) 47 if (outp->info.sorconf.link & 1)
48 nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data); 48 nvkm_mask(device, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
49 if (outp->info.sorconf.link & 2) 49 if (outp->info.sorconf.link & 2)
50 nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data); 50 nvkm_mask(device, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
51} 51}
52 52
53static inline u32 53static inline u32
54gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) 54gm204_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
55{ 55{
56 return lane * 0x08; 56 return lane * 0x08;
57} 57}
@@ -59,30 +59,33 @@ gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
59static int 59static int
60gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) 60gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
61{ 61{
62 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 62 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
63 const u32 soff = gm204_sor_soff(outp); 63 const u32 soff = gm204_sor_soff(outp);
64 const u32 data = 0x01010101 * pattern; 64 const u32 data = 0x01010101 * pattern;
65 if (outp->base.info.sorconf.link & 1) 65 if (outp->base.info.sorconf.link & 1)
66 nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data); 66 nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
67 else 67 else
68 nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data); 68 nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
69 return 0; 69 return 0;
70} 70}
71 71
72static int 72static int
73gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) 73gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
74{ 74{
75 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 75 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
76 const u32 soff = gm204_sor_soff(outp); 76 const u32 soff = gm204_sor_soff(outp);
77 const u32 loff = gm204_sor_loff(outp); 77 const u32 loff = gm204_sor_loff(outp);
78 u32 mask = 0, i; 78 u32 mask = 0, i;
79 79
80 for (i = 0; i < nr; i++) 80 for (i = 0; i < nr; i++)
81 mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3); 81 mask |= 1 << (gm204_sor_dp_lane_map(device, i) >> 3);
82 82
83 nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask); 83 nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
84 nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000); 84 nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
85 nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000); 85 nvkm_msec(device, 2000,
86 if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
87 break;
88 );
86 return 0; 89 return 0;
87} 90}
88 91
@@ -90,9 +93,9 @@ static int
90gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp, 93gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
91 int ln, int vs, int pe, int pc) 94 int ln, int vs, int pe, int pc)
92{ 95{
93 struct nv50_disp_priv *priv = (void *)nvkm_disp(outp); 96 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
94 struct nvkm_bios *bios = nvkm_bios(priv); 97 struct nvkm_bios *bios = device->bios;
95 const u32 shift = gm204_sor_dp_lane_map(priv, ln); 98 const u32 shift = gm204_sor_dp_lane_map(device, ln);
96 const u32 loff = gm204_sor_loff(outp); 99 const u32 loff = gm204_sor_loff(outp);
97 u32 addr, data[4]; 100 u32 addr, data[4];
98 u8 ver, hdr, cnt, len; 101 u8 ver, hdr, cnt, len;
@@ -109,31 +112,32 @@ gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
109 &ver, &hdr, &cnt, &len, &ocfg); 112 &ver, &hdr, &cnt, &len, &ocfg);
110 if (!addr) 113 if (!addr)
111 return -EINVAL; 114 return -EINVAL;
115 ocfg.tx_pu &= 0x0f;
112 116
113 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift); 117 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
114 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift); 118 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
115 data[2] = nv_rd32(priv, 0x61c130 + loff); 119 data[2] = nvkm_rd32(device, 0x61c130 + loff);
116 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0) 120 if ((data[2] & 0x00000f00) < (ocfg.tx_pu << 8) || ln == 0)
117 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8); 121 data[2] = (data[2] & ~0x00000f00) | (ocfg.tx_pu << 8);
118 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift)); 122 nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
119 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift)); 123 nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
120 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8)); 124 nvkm_wr32(device, 0x61c130 + loff, data[2]);
121 data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift); 125 data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
122 nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift)); 126 nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
123 return 0; 127 return 0;
124} 128}
125 129
126struct nvkm_output_dp_impl 130static const struct nvkm_output_dp_func
127gm204_sor_dp_impl = { 131gm204_sor_dp_func = {
128 .base.base.handle = DCB_OUTPUT_DP,
129 .base.base.ofuncs = &(struct nvkm_ofuncs) {
130 .ctor = _nvkm_output_dp_ctor,
131 .dtor = _nvkm_output_dp_dtor,
132 .init = _nvkm_output_dp_init,
133 .fini = _nvkm_output_dp_fini,
134 },
135 .pattern = gm204_sor_dp_pattern, 132 .pattern = gm204_sor_dp_pattern,
136 .lnk_pwr = gm204_sor_dp_lnk_pwr, 133 .lnk_pwr = gm204_sor_dp_lnk_pwr,
137 .lnk_ctl = gf110_sor_dp_lnk_ctl, 134 .lnk_ctl = gf119_sor_dp_lnk_ctl,
138 .drv_ctl = gm204_sor_dp_drv_ctl, 135 .drv_ctl = gm204_sor_dp_drv_ctl,
139}; 136};
137
138int
139gm204_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
140 struct nvkm_output **poutp)
141{
142 return nvkm_output_dp_new_(&gm204_sor_dp_func, disp, index, dcbE, poutp);
143}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index b229a311c78c..29e0d2a9a839 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -33,6 +33,7 @@
33int 33int
34nv50_sor_power(NV50_DISP_MTHD_V1) 34nv50_sor_power(NV50_DISP_MTHD_V1)
35{ 35{
36 struct nvkm_device *device = disp->base.engine.subdev.device;
36 union { 37 union {
37 struct nv50_disp_sor_pwr_v0 v0; 38 struct nv50_disp_sor_pwr_v0 v0;
38 } *args = data; 39 } *args = data;
@@ -40,17 +41,39 @@ nv50_sor_power(NV50_DISP_MTHD_V1)
40 u32 stat; 41 u32 stat;
41 int ret; 42 int ret;
42 43
43 nv_ioctl(object, "disp sor pwr size %d\n", size); 44 nvif_ioctl(object, "disp sor pwr size %d\n", size);
44 if (nvif_unpack(args->v0, 0, 0, false)) { 45 if (nvif_unpack(args->v0, 0, 0, false)) {
45 nv_ioctl(object, "disp sor pwr vers %d state %d\n", 46 nvif_ioctl(object, "disp sor pwr vers %d state %d\n",
46 args->v0.version, args->v0.state); 47 args->v0.version, args->v0.state);
47 stat = !!args->v0.state; 48 stat = !!args->v0.state;
48 } else 49 } else
49 return ret; 50 return ret;
50 51
51 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); 52
52 nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat); 53 nvkm_msec(device, 2000,
53 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); 54 if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
54 nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000); 55 break;
56 );
57 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
58 nvkm_msec(device, 2000,
59 if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
60 break;
61 );
62 nvkm_msec(device, 2000,
63 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
64 break;
65 );
55 return 0; 66 return 0;
56} 67}
68
69static const struct nvkm_output_func
70nv50_sor_output_func = {
71};
72
73int
74nv50_sor_output_new(struct nvkm_disp *disp, int index,
75 struct dcb_output *dcbE, struct nvkm_output **poutp)
76{
77 return nvkm_output_new_(&nv50_sor_output_func, disp,
78 index, dcbE, poutp);
79}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
index c4622c7388d0..8bff95c6343f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
@@ -23,131 +23,119 @@
23 */ 23 */
24#include <subdev/vga.h> 24#include <subdev/vga.h>
25 25
26#include <core/device.h>
27
28u8 26u8
29nv_rdport(void *obj, int head, u16 port) 27nvkm_rdport(struct nvkm_device *device, int head, u16 port)
30{ 28{
31 struct nvkm_device *device = nv_device(obj);
32
33 if (device->card_type >= NV_50) 29 if (device->card_type >= NV_50)
34 return nv_rd08(obj, 0x601000 + port); 30 return nvkm_rd08(device, 0x601000 + port);
35 31
36 if (port == 0x03c0 || port == 0x03c1 || /* AR */ 32 if (port == 0x03c0 || port == 0x03c1 || /* AR */
37 port == 0x03c2 || port == 0x03da || /* INP0 */ 33 port == 0x03c2 || port == 0x03da || /* INP0 */
38 port == 0x03d4 || port == 0x03d5) /* CR */ 34 port == 0x03d4 || port == 0x03d5) /* CR */
39 return nv_rd08(obj, 0x601000 + (head * 0x2000) + port); 35 return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port);
40 36
41 if (port == 0x03c2 || port == 0x03cc || /* MISC */ 37 if (port == 0x03c2 || port == 0x03cc || /* MISC */
42 port == 0x03c4 || port == 0x03c5 || /* SR */ 38 port == 0x03c4 || port == 0x03c5 || /* SR */
43 port == 0x03ce || port == 0x03cf) { /* GR */ 39 port == 0x03ce || port == 0x03cf) { /* GR */
44 if (device->card_type < NV_40) 40 if (device->card_type < NV_40)
45 head = 0; /* CR44 selects head */ 41 head = 0; /* CR44 selects head */
46 return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port); 42 return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port);
47 } 43 }
48 44
49 nv_error(obj, "unknown vga port 0x%04x\n", port);
50 return 0x00; 45 return 0x00;
51} 46}
52 47
53void 48void
54nv_wrport(void *obj, int head, u16 port, u8 data) 49nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data)
55{ 50{
56 struct nvkm_device *device = nv_device(obj);
57
58 if (device->card_type >= NV_50) 51 if (device->card_type >= NV_50)
59 nv_wr08(obj, 0x601000 + port, data); 52 nvkm_wr08(device, 0x601000 + port, data);
60 else 53 else
61 if (port == 0x03c0 || port == 0x03c1 || /* AR */ 54 if (port == 0x03c0 || port == 0x03c1 || /* AR */
62 port == 0x03c2 || port == 0x03da || /* INP0 */ 55 port == 0x03c2 || port == 0x03da || /* INP0 */
63 port == 0x03d4 || port == 0x03d5) /* CR */ 56 port == 0x03d4 || port == 0x03d5) /* CR */
64 nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data); 57 nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data);
65 else 58 else
66 if (port == 0x03c2 || port == 0x03cc || /* MISC */ 59 if (port == 0x03c2 || port == 0x03cc || /* MISC */
67 port == 0x03c4 || port == 0x03c5 || /* SR */ 60 port == 0x03c4 || port == 0x03c5 || /* SR */
68 port == 0x03ce || port == 0x03cf) { /* GR */ 61 port == 0x03ce || port == 0x03cf) { /* GR */
69 if (device->card_type < NV_40) 62 if (device->card_type < NV_40)
70 head = 0; /* CR44 selects head */ 63 head = 0; /* CR44 selects head */
71 nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data); 64 nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data);
72 } else 65 }
73 nv_error(obj, "unknown vga port 0x%04x\n", port);
74} 66}
75 67
76u8 68u8
77nv_rdvgas(void *obj, int head, u8 index) 69nvkm_rdvgas(struct nvkm_device *device, int head, u8 index)
78{ 70{
79 nv_wrport(obj, head, 0x03c4, index); 71 nvkm_wrport(device, head, 0x03c4, index);
80 return nv_rdport(obj, head, 0x03c5); 72 return nvkm_rdport(device, head, 0x03c5);
81} 73}
82 74
83void 75void
84nv_wrvgas(void *obj, int head, u8 index, u8 value) 76nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value)
85{ 77{
86 nv_wrport(obj, head, 0x03c4, index); 78 nvkm_wrport(device, head, 0x03c4, index);
87 nv_wrport(obj, head, 0x03c5, value); 79 nvkm_wrport(device, head, 0x03c5, value);
88} 80}
89 81
90u8 82u8
91nv_rdvgag(void *obj, int head, u8 index) 83nvkm_rdvgag(struct nvkm_device *device, int head, u8 index)
92{ 84{
93 nv_wrport(obj, head, 0x03ce, index); 85 nvkm_wrport(device, head, 0x03ce, index);
94 return nv_rdport(obj, head, 0x03cf); 86 return nvkm_rdport(device, head, 0x03cf);
95} 87}
96 88
97void 89void
98nv_wrvgag(void *obj, int head, u8 index, u8 value) 90nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value)
99{ 91{
100 nv_wrport(obj, head, 0x03ce, index); 92 nvkm_wrport(device, head, 0x03ce, index);
101 nv_wrport(obj, head, 0x03cf, value); 93 nvkm_wrport(device, head, 0x03cf, value);
102} 94}
103 95
104u8 96u8
105nv_rdvgac(void *obj, int head, u8 index) 97nvkm_rdvgac(struct nvkm_device *device, int head, u8 index)
106{ 98{
107 nv_wrport(obj, head, 0x03d4, index); 99 nvkm_wrport(device, head, 0x03d4, index);
108 return nv_rdport(obj, head, 0x03d5); 100 return nvkm_rdport(device, head, 0x03d5);
109} 101}
110 102
111void 103void
112nv_wrvgac(void *obj, int head, u8 index, u8 value) 104nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value)
113{ 105{
114 nv_wrport(obj, head, 0x03d4, index); 106 nvkm_wrport(device, head, 0x03d4, index);
115 nv_wrport(obj, head, 0x03d5, value); 107 nvkm_wrport(device, head, 0x03d5, value);
116} 108}
117 109
118u8 110u8
119nv_rdvgai(void *obj, int head, u16 port, u8 index) 111nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index)
120{ 112{
121 if (port == 0x03c4) return nv_rdvgas(obj, head, index); 113 if (port == 0x03c4) return nvkm_rdvgas(device, head, index);
122 if (port == 0x03ce) return nv_rdvgag(obj, head, index); 114 if (port == 0x03ce) return nvkm_rdvgag(device, head, index);
123 if (port == 0x03d4) return nv_rdvgac(obj, head, index); 115 if (port == 0x03d4) return nvkm_rdvgac(device, head, index);
124 nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
125 return 0x00; 116 return 0x00;
126} 117}
127 118
128void 119void
129nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value) 120nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value)
130{ 121{
131 if (port == 0x03c4) nv_wrvgas(obj, head, index, value); 122 if (port == 0x03c4) nvkm_wrvgas(device, head, index, value);
132 else if (port == 0x03ce) nv_wrvgag(obj, head, index, value); 123 else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value);
133 else if (port == 0x03d4) nv_wrvgac(obj, head, index, value); 124 else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value);
134 else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
135} 125}
136 126
137bool 127bool
138nv_lockvgac(void *obj, bool lock) 128nvkm_lockvgac(struct nvkm_device *device, bool lock)
139{ 129{
140 struct nvkm_device *dev = nv_device(obj); 130 bool locked = !nvkm_rdvgac(device, 0, 0x1f);
141
142 bool locked = !nv_rdvgac(obj, 0, 0x1f);
143 u8 data = lock ? 0x99 : 0x57; 131 u8 data = lock ? 0x99 : 0x57;
144 if (dev->card_type < NV_50) 132 if (device->card_type < NV_50)
145 nv_wrvgac(obj, 0, 0x1f, data); 133 nvkm_wrvgac(device, 0, 0x1f, data);
146 else 134 else
147 nv_wrvgac(obj, 0, 0x3f, data); 135 nvkm_wrvgac(device, 0, 0x3f, data);
148 if (dev->chipset == 0x11) { 136 if (device->chipset == 0x11) {
149 if (!(nv_rd32(obj, 0x001084) & 0x10000000)) 137 if (!(nvkm_rd32(device, 0x001084) & 0x10000000))
150 nv_wrvgac(obj, 1, 0x1f, data); 138 nvkm_wrvgac(device, 1, 0x1f, data);
151 } 139 }
152 return locked; 140 return locked;
153} 141}
@@ -171,16 +159,16 @@ nv_lockvgac(void *obj, bool lock)
171 * other values are treated as literal values to set 159 * other values are treated as literal values to set
172 */ 160 */
173u8 161u8
174nv_rdvgaowner(void *obj) 162nvkm_rdvgaowner(struct nvkm_device *device)
175{ 163{
176 if (nv_device(obj)->card_type < NV_50) { 164 if (device->card_type < NV_50) {
177 if (nv_device(obj)->chipset == 0x11) { 165 if (device->chipset == 0x11) {
178 u32 tied = nv_rd32(obj, 0x001084) & 0x10000000; 166 u32 tied = nvkm_rd32(device, 0x001084) & 0x10000000;
179 if (tied == 0) { 167 if (tied == 0) {
180 u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80; 168 u8 slA = nvkm_rdvgac(device, 0, 0x28) & 0x80;
181 u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01; 169 u8 tvA = nvkm_rdvgac(device, 0, 0x33) & 0x01;
182 u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80; 170 u8 slB = nvkm_rdvgac(device, 1, 0x28) & 0x80;
183 u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01; 171 u8 tvB = nvkm_rdvgac(device, 1, 0x33) & 0x01;
184 if (slA && !tvA) return 0x00; 172 if (slA && !tvA) return 0x00;
185 if (slB && !tvB) return 0x03; 173 if (slB && !tvB) return 0x03;
186 if (slA) return 0x00; 174 if (slA) return 0x00;
@@ -190,30 +178,28 @@ nv_rdvgaowner(void *obj)
190 return 0x04; 178 return 0x04;
191 } 179 }
192 180
193 return nv_rdvgac(obj, 0, 0x44); 181 return nvkm_rdvgac(device, 0, 0x44);
194 } 182 }
195 183
196 nv_error(obj, "rdvgaowner after nv4x\n");
197 return 0x00; 184 return 0x00;
198} 185}
199 186
200void 187void
201nv_wrvgaowner(void *obj, u8 select) 188nvkm_wrvgaowner(struct nvkm_device *device, u8 select)
202{ 189{
203 if (nv_device(obj)->card_type < NV_50) { 190 if (device->card_type < NV_50) {
204 u8 owner = (select == 1) ? 3 : select; 191 u8 owner = (select == 1) ? 3 : select;
205 if (nv_device(obj)->chipset == 0x11) { 192 if (device->chipset == 0x11) {
206 /* workaround hw lockup bug */ 193 /* workaround hw lockup bug */
207 nv_rdvgac(obj, 0, 0x1f); 194 nvkm_rdvgac(device, 0, 0x1f);
208 nv_rdvgac(obj, 1, 0x1f); 195 nvkm_rdvgac(device, 1, 0x1f);
209 } 196 }
210 197
211 nv_wrvgac(obj, 0, 0x44, owner); 198 nvkm_wrvgac(device, 0, 0x44, owner);
212 199
213 if (nv_device(obj)->chipset == 0x11) { 200 if (device->chipset == 0x11) {
214 nv_wrvgac(obj, 0, 0x2e, owner); 201 nvkm_wrvgac(device, 0, 0x2e, owner);
215 nv_wrvgac(obj, 0, 0x2e, owner); 202 nvkm_wrvgac(device, 0, 0x2e, owner);
216 } 203 }
217 } else 204 }
218 nv_error(obj, "wrvgaowner after nv4x\n");
219} 205}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
new file mode 100644
index 000000000000..c4a2ce9b0d71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
@@ -0,0 +1,11 @@
1nvkm-y += nvkm/engine/dma/base.o
2nvkm-y += nvkm/engine/dma/nv04.o
3nvkm-y += nvkm/engine/dma/nv50.o
4nvkm-y += nvkm/engine/dma/gf100.o
5nvkm-y += nvkm/engine/dma/gf119.o
6
7nvkm-y += nvkm/engine/dma/user.o
8nvkm-y += nvkm/engine/dma/usernv04.o
9nvkm-y += nvkm/engine/dma/usernv50.o
10nvkm-y += nvkm/engine/dma/usergf100.o
11nvkm-y += nvkm/engine/dma/usergf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
new file mode 100644
index 000000000000..9769fc0d5351
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <core/client.h>
27#include <engine/fifo.h>
28
29#include <nvif/class.h>
30
31struct nvkm_dmaobj *
32nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
33{
34 struct rb_node *node = client->dmaroot.rb_node;
35 while (node) {
36 struct nvkm_dmaobj *dmaobj =
37 container_of(node, typeof(*dmaobj), rb);
38 if (object < dmaobj->handle)
39 node = node->rb_left;
40 else
41 if (object > dmaobj->handle)
42 node = node->rb_right;
43 else
44 return dmaobj;
45 }
46 return NULL;
47}
48
49static int
50nvkm_dma_oclass_new(struct nvkm_device *device,
51 const struct nvkm_oclass *oclass, void *data, u32 size,
52 struct nvkm_object **pobject)
53{
54 struct nvkm_dma *dma = nvkm_dma(oclass->engine);
55 struct nvkm_dmaobj *dmaobj = NULL;
56 struct nvkm_client *client = oclass->client;
57 struct rb_node **ptr = &client->dmaroot.rb_node;
58 struct rb_node *parent = NULL;
59 int ret;
60
61 ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
62 if (dmaobj)
63 *pobject = &dmaobj->object;
64 if (ret)
65 return ret;
66
67 dmaobj->handle = oclass->object;
68
69 while (*ptr) {
70 struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
71 parent = *ptr;
72 if (dmaobj->handle < obj->handle)
73 ptr = &parent->rb_left;
74 else
75 if (dmaobj->handle > obj->handle)
76 ptr = &parent->rb_right;
77 else
78 return -EEXIST;
79 }
80
81 rb_link_node(&dmaobj->rb, parent, ptr);
82 rb_insert_color(&dmaobj->rb, &client->dmaroot);
83 return 0;
84}
85
86static const struct nvkm_device_oclass
87nvkm_dma_oclass_base = {
88 .ctor = nvkm_dma_oclass_new,
89};
90
91static int
92nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size,
93 struct nvkm_object **pobject)
94{
95 return nvkm_dma_oclass_new(oclass->engine->subdev.device,
96 oclass, data, size, pobject);
97}
98
99static const struct nvkm_sclass
100nvkm_dma_sclass[] = {
101 { 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
102 { 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
103 { 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
104};
105
106static int
107nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
108 const struct nvkm_device_oclass **class)
109{
110 const int count = ARRAY_SIZE(nvkm_dma_sclass);
111 if (index < count) {
112 const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index];
113 sclass->base = oclass[0];
114 sclass->engn = oclass;
115 *class = &nvkm_dma_oclass_base;
116 return index;
117 }
118 return count;
119}
120
121static int
122nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index)
123{
124 const int count = ARRAY_SIZE(nvkm_dma_sclass);
125 if (index < count) {
126 oclass->base = nvkm_dma_sclass[index];
127 return index;
128 }
129 return count;
130}
131
132static void *
133nvkm_dma_dtor(struct nvkm_engine *engine)
134{
135 return nvkm_dma(engine);
136}
137
138static const struct nvkm_engine_func
139nvkm_dma = {
140 .dtor = nvkm_dma_dtor,
141 .base.sclass = nvkm_dma_oclass_base_get,
142 .fifo.sclass = nvkm_dma_oclass_fifo_get,
143};
144
145int
146nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
147 int index, struct nvkm_dma **pdma)
148{
149 struct nvkm_dma *dma;
150
151 if (!(dma = *pdma = kzalloc(sizeof(*dma), GFP_KERNEL)))
152 return -ENOMEM;
153 dma->func = func;
154
155 return nvkm_engine_ctor(&nvkm_dma, device, index,
156 0, true, &dma->engine);
157}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
index b7613059da08..efec5d322179 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
@@ -21,24 +21,16 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25#include "user.h"
25 26
26void 27static const struct nvkm_dma_func
27nv40_mc_msi_rearm(struct nvkm_mc *pmc) 28gf100_dma = {
29 .class_new = gf100_dmaobj_new,
30};
31
32int
33gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
28{ 34{
29 struct nv04_mc_priv *priv = (void *)pmc; 35 return nvkm_dma_new_(&gf100_dma, device, index, pdma);
30 nv_wr08(priv, 0x088068, 0xff);
31} 36}
32
33struct nvkm_oclass *
34nv40_mc_oclass = &(struct nvkm_mc_oclass) {
35 .base.handle = NV_SUBDEV(MC, 0x40),
36 .base.ofuncs = &(struct nvkm_ofuncs) {
37 .ctor = nv04_mc_ctor,
38 .dtor = _nvkm_mc_dtor,
39 .init = nv04_mc_init,
40 .fini = _nvkm_mc_fini,
41 },
42 .intr = nv04_mc_intr,
43 .msi_rearm = nv40_mc_msi_rearm,
44}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
new file mode 100644
index 000000000000..34c766039aed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "user.h"
26
27static const struct nvkm_dma_func
28gf119_dma = {
29 .class_new = gf119_dmaobj_new,
30};
31
32int
33gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
34{
35 return nvkm_dma_new_(&gf119_dma, device, index, pdma);
36}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
new file mode 100644
index 000000000000..30747a0ce488
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "user.h"
26
27static const struct nvkm_dma_func
28nv04_dma = {
29 .class_new = nv04_dmaobj_new,
30};
31
32int
33nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
34{
35 return nvkm_dma_new_(&nv04_dma, device, index, pdma);
36}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
new file mode 100644
index 000000000000..77aca7b71c83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "user.h"
26
27static const struct nvkm_dma_func
28nv50_dma = {
29 .class_new = nv50_dmaobj_new,
30};
31
32int
33nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
34{
35 return nvkm_dma_new_(&nv50_dma, device, index, pdma);
36}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
new file mode 100644
index 000000000000..deb37ee55c0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -0,0 +1,18 @@
1#ifndef __NVKM_DMA_PRIV_H__
2#define __NVKM_DMA_PRIV_H__
3#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
4#include <engine/dma.h>
5
6struct nvkm_dmaobj_func {
7 int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *, int align,
8 struct nvkm_gpuobj **);
9};
10
11int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *,
12 int index, struct nvkm_dma **);
13
14struct nvkm_dma_func {
15 int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
16 void *data, u32 size, struct nvkm_dmaobj **);
17};
18#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index a2b60d86baba..45ab062661a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -21,10 +21,10 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "user.h"
25 25
26#include <core/client.h> 26#include <core/client.h>
27#include <core/device.h> 27#include <core/gpuobj.h>
28#include <subdev/fb.h> 28#include <subdev/fb.h>
29#include <subdev/instmem.h> 29#include <subdev/instmem.h>
30 30
@@ -32,56 +32,56 @@
32#include <nvif/unpack.h> 32#include <nvif/unpack.h>
33 33
34static int 34static int
35nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent, 35nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
36 struct nvkm_gpuobj **pgpuobj) 36 int align, struct nvkm_gpuobj **pgpuobj)
37{ 37{
38 const struct nvkm_dmaeng_impl *impl = (void *) 38 struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
39 nv_oclass(nv_object(dmaobj)->engine); 39 return dmaobj->func->bind(dmaobj, gpuobj, align, pgpuobj);
40 int ret = 0; 40}
41
42 if (nv_object(dmaobj) == parent) { /* ctor bind */
43 if (nv_mclass(parent->parent) == NV_DEVICE) {
44 /* delayed, or no, binding */
45 return 0;
46 }
47 ret = impl->bind(dmaobj, parent, pgpuobj);
48 if (ret == 0)
49 nvkm_object_ref(NULL, &parent);
50 return ret;
51 }
52 41
53 return impl->bind(dmaobj, parent, pgpuobj); 42static void *
43nvkm_dmaobj_dtor(struct nvkm_object *base)
44{
45 struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
46 if (!RB_EMPTY_NODE(&dmaobj->rb))
47 rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
48 return dmaobj;
54} 49}
55 50
51static const struct nvkm_object_func
52nvkm_dmaobj_func = {
53 .dtor = nvkm_dmaobj_dtor,
54 .bind = nvkm_dmaobj_bind,
55};
56
56int 57int
57nvkm_dmaobj_create_(struct nvkm_object *parent, 58nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
58 struct nvkm_object *engine, 59 const struct nvkm_oclass *oclass, void **pdata, u32 *psize,
59 struct nvkm_oclass *oclass, void **pdata, u32 *psize, 60 struct nvkm_dmaobj *dmaobj)
60 int length, void **pobject)
61{ 61{
62 union { 62 union {
63 struct nv_dma_v0 v0; 63 struct nv_dma_v0 v0;
64 } *args = *pdata; 64 } *args = *pdata;
65 struct nvkm_instmem *instmem = nvkm_instmem(parent); 65 struct nvkm_device *device = dma->engine.subdev.device;
66 struct nvkm_client *client = nvkm_client(parent); 66 struct nvkm_client *client = oclass->client;
67 struct nvkm_device *device = nv_device(parent); 67 struct nvkm_object *parent = oclass->parent;
68 struct nvkm_fb *pfb = nvkm_fb(parent); 68 struct nvkm_instmem *instmem = device->imem;
69 struct nvkm_dmaobj *dmaobj; 69 struct nvkm_fb *fb = device->fb;
70 void *data = *pdata; 70 void *data = *pdata;
71 u32 size = *psize; 71 u32 size = *psize;
72 int ret; 72 int ret;
73 73
74 ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject); 74 nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
75 dmaobj = *pobject; 75 dmaobj->func = func;
76 if (ret) 76 dmaobj->dma = dma;
77 return ret; 77 RB_CLEAR_NODE(&dmaobj->rb);
78 78
79 nv_ioctl(parent, "create dma size %d\n", *psize); 79 nvif_ioctl(parent, "create dma size %d\n", *psize);
80 if (nvif_unpack(args->v0, 0, 0, true)) { 80 if (nvif_unpack(args->v0, 0, 0, true)) {
81 nv_ioctl(parent, "create dma vers %d target %d access %d " 81 nvif_ioctl(parent, "create dma vers %d target %d access %d "
82 "start %016llx limit %016llx\n", 82 "start %016llx limit %016llx\n",
83 args->v0.version, args->v0.target, args->v0.access, 83 args->v0.version, args->v0.target, args->v0.access,
84 args->v0.start, args->v0.limit); 84 args->v0.start, args->v0.limit);
85 dmaobj->target = args->v0.target; 85 dmaobj->target = args->v0.target;
86 dmaobj->access = args->v0.access; 86 dmaobj->access = args->v0.access;
87 dmaobj->start = args->v0.start; 87 dmaobj->start = args->v0.start;
@@ -101,7 +101,7 @@ nvkm_dmaobj_create_(struct nvkm_object *parent,
101 break; 101 break;
102 case NV_DMA_V0_TARGET_VRAM: 102 case NV_DMA_V0_TARGET_VRAM:
103 if (!client->super) { 103 if (!client->super) {
104 if (dmaobj->limit >= pfb->ram->size - instmem->reserved) 104 if (dmaobj->limit >= fb->ram->size - instmem->reserved)
105 return -EACCES; 105 return -EACCES;
106 if (device->card_type >= NV_50) 106 if (device->card_type >= NV_50)
107 return -EACCES; 107 return -EACCES;
@@ -142,23 +142,3 @@ nvkm_dmaobj_create_(struct nvkm_object *parent,
142 142
143 return ret; 143 return ret;
144} 144}
145
146int
147_nvkm_dmaeng_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
148 struct nvkm_oclass *oclass, void *data, u32 size,
149 struct nvkm_object **pobject)
150{
151 const struct nvkm_dmaeng_impl *impl = (void *)oclass;
152 struct nvkm_dmaeng *dmaeng;
153 int ret;
154
155 ret = nvkm_engine_create(parent, engine, oclass, true, "DMAOBJ",
156 "dmaobj", &dmaeng);
157 *pobject = nv_object(dmaeng);
158 if (ret)
159 return ret;
160
161 nv_engine(dmaeng)->sclass = impl->sclass;
162 dmaeng->bind = nvkm_dmaobj_bind;
163 return 0;
164}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
new file mode 100644
index 000000000000..69a7f1034024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -0,0 +1,18 @@
1#ifndef __NVKM_DMA_USER_H__
2#define __NVKM_DMA_USER_H__
3#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
4#include "priv.h"
5
6int nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *, struct nvkm_dma *,
7 const struct nvkm_oclass *, void **data, u32 *size,
8 struct nvkm_dmaobj *);
9
10int nv04_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
11 struct nvkm_dmaobj **);
12int nv50_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
13 struct nvkm_dmaobj **);
14int gf100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
15 struct nvkm_dmaobj **);
16int gf119_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
17 struct nvkm_dmaobj **);
18#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
new file mode 100644
index 000000000000..13e341cc4e32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#define gf100_dmaobj(p) container_of((p), struct gf100_dmaobj, base)
25#include "user.h"
26
27#include <core/client.h>
28#include <core/gpuobj.h>
29#include <subdev/fb.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34struct gf100_dmaobj {
35 struct nvkm_dmaobj base;
36 u32 flags0;
37 u32 flags5;
38};
39
40static int
41gf100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
42 int align, struct nvkm_gpuobj **pgpuobj)
43{
44 struct gf100_dmaobj *dmaobj = gf100_dmaobj(base);
45 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
46 int ret;
47
48 ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
49 if (ret == 0) {
50 nvkm_kmap(*pgpuobj);
51 nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
52 nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
53 nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
54 nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
55 upper_32_bits(dmaobj->base.start));
56 nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
57 nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
58 nvkm_done(*pgpuobj);
59 }
60
61 return ret;
62}
63
64static const struct nvkm_dmaobj_func
65gf100_dmaobj_func = {
66 .bind = gf100_dmaobj_bind,
67};
68
69int
70gf100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
71 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
72{
73 union {
74 struct gf100_dma_v0 v0;
75 } *args;
76 struct nvkm_object *parent = oclass->parent;
77 struct gf100_dmaobj *dmaobj;
78 u32 kind, user, unkn;
79 int ret;
80
81 if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
82 return -ENOMEM;
83 *pdmaobj = &dmaobj->base;
84
85 ret = nvkm_dmaobj_ctor(&gf100_dmaobj_func, dma, oclass,
86 &data, &size, &dmaobj->base);
87 if (ret)
88 return ret;
89
90 args = data;
91
92 nvif_ioctl(parent, "create gf100 dma size %d\n", size);
93 if (nvif_unpack(args->v0, 0, 0, false)) {
94 nvif_ioctl(parent,
95 "create gf100 dma vers %d priv %d kind %02x\n",
96 args->v0.version, args->v0.priv, args->v0.kind);
97 kind = args->v0.kind;
98 user = args->v0.priv;
99 unkn = 0;
100 } else
101 if (size == 0) {
102 if (dmaobj->base.target != NV_MEM_TARGET_VM) {
103 kind = GF100_DMA_V0_KIND_PITCH;
104 user = GF100_DMA_V0_PRIV_US;
105 unkn = 2;
106 } else {
107 kind = GF100_DMA_V0_KIND_VM;
108 user = GF100_DMA_V0_PRIV_VM;
109 unkn = 0;
110 }
111 } else
112 return ret;
113
114 if (user > 2)
115 return -EINVAL;
116 dmaobj->flags0 |= (kind << 22) | (user << 20) | oclass->base.oclass;
117 dmaobj->flags5 |= (unkn << 16);
118
119 switch (dmaobj->base.target) {
120 case NV_MEM_TARGET_VM:
121 dmaobj->flags0 |= 0x00000000;
122 break;
123 case NV_MEM_TARGET_VRAM:
124 dmaobj->flags0 |= 0x00010000;
125 break;
126 case NV_MEM_TARGET_PCI:
127 dmaobj->flags0 |= 0x00020000;
128 break;
129 case NV_MEM_TARGET_PCI_NOSNOOP:
130 dmaobj->flags0 |= 0x00030000;
131 break;
132 default:
133 return -EINVAL;
134 }
135
136 switch (dmaobj->base.access) {
137 case NV_MEM_ACCESS_VM:
138 break;
139 case NV_MEM_ACCESS_RO:
140 dmaobj->flags0 |= 0x00040000;
141 break;
142 case NV_MEM_ACCESS_WO:
143 case NV_MEM_ACCESS_RW:
144 dmaobj->flags0 |= 0x00080000;
145 break;
146 }
147
148 return 0;
149}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
new file mode 100644
index 000000000000..0e1af8b4db84
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#define gf119_dmaobj(p) container_of((p), struct gf119_dmaobj, base)
25#include "user.h"
26
27#include <core/client.h>
28#include <core/gpuobj.h>
29#include <subdev/fb.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34struct gf119_dmaobj {
35 struct nvkm_dmaobj base;
36 u32 flags0;
37};
38
39static int
40gf119_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
41 int align, struct nvkm_gpuobj **pgpuobj)
42{
43 struct gf119_dmaobj *dmaobj = gf119_dmaobj(base);
44 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
45 int ret;
46
47 ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
48 if (ret == 0) {
49 nvkm_kmap(*pgpuobj);
50 nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
51 nvkm_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8);
52 nvkm_wo32(*pgpuobj, 0x08, dmaobj->base.limit >> 8);
53 nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
54 nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
55 nvkm_wo32(*pgpuobj, 0x14, 0x00000000);
56 nvkm_done(*pgpuobj);
57 }
58
59 return ret;
60}
61
62static const struct nvkm_dmaobj_func
63gf119_dmaobj_func = {
64 .bind = gf119_dmaobj_bind,
65};
66
67int
68gf119_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
69 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
70{
71 union {
72 struct gf119_dma_v0 v0;
73 } *args;
74 struct nvkm_object *parent = oclass->parent;
75 struct gf119_dmaobj *dmaobj;
76 u32 kind, page;
77 int ret;
78
79 if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
80 return -ENOMEM;
81 *pdmaobj = &dmaobj->base;
82
83 ret = nvkm_dmaobj_ctor(&gf119_dmaobj_func, dma, oclass,
84 &data, &size, &dmaobj->base);
85 if (ret)
86 return ret;
87
88 args = data;
89
90 nvif_ioctl(parent, "create gf119 dma size %d\n", size);
91 if (nvif_unpack(args->v0, 0, 0, false)) {
92 nvif_ioctl(parent,
93 "create gf100 dma vers %d page %d kind %02x\n",
94 args->v0.version, args->v0.page, args->v0.kind);
95 kind = args->v0.kind;
96 page = args->v0.page;
97 } else
98 if (size == 0) {
99 if (dmaobj->base.target != NV_MEM_TARGET_VM) {
100 kind = GF119_DMA_V0_KIND_PITCH;
101 page = GF119_DMA_V0_PAGE_SP;
102 } else {
103 kind = GF119_DMA_V0_KIND_VM;
104 page = GF119_DMA_V0_PAGE_LP;
105 }
106 } else
107 return ret;
108
109 if (page > 1)
110 return -EINVAL;
111 dmaobj->flags0 = (kind << 20) | (page << 6);
112
113 switch (dmaobj->base.target) {
114 case NV_MEM_TARGET_VRAM:
115 dmaobj->flags0 |= 0x00000009;
116 break;
117 case NV_MEM_TARGET_VM:
118 case NV_MEM_TARGET_PCI:
119 case NV_MEM_TARGET_PCI_NOSNOOP:
120 /* XXX: don't currently know how to construct a real one
121 * of these. we only use them to represent pushbufs
122 * on these chipsets, and the classes that use them
123 * deal with the target themselves.
124 */
125 break;
126 default:
127 return -EINVAL;
128 }
129
130 return 0;
131}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
new file mode 100644
index 000000000000..c95942ef8216
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -0,0 +1,133 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
25#include "user.h"
26
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29#include <subdev/mmu/nv04.h>
30
31#include <nvif/class.h>
32
33struct nv04_dmaobj {
34 struct nvkm_dmaobj base;
35 bool clone;
36 u32 flags0;
37 u32 flags2;
38};
39
40static int
41nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
42 int align, struct nvkm_gpuobj **pgpuobj)
43{
44 struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
45 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
46 u64 offset = dmaobj->base.start & 0xfffff000;
47 u64 adjust = dmaobj->base.start & 0x00000fff;
48 u32 length = dmaobj->base.limit - dmaobj->base.start;
49 int ret;
50
51 if (dmaobj->clone) {
52 struct nv04_mmu *mmu = nv04_mmu(device->mmu);
53 struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
54 if (!dmaobj->base.start)
55 return nvkm_gpuobj_wrap(pgt, pgpuobj);
56 nvkm_kmap(pgt);
57 offset = nvkm_ro32(pgt, 8 + (offset >> 10));
58 offset &= 0xfffff000;
59 nvkm_done(pgt);
60 }
61
62 ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
63 if (ret == 0) {
64 nvkm_kmap(*pgpuobj);
65 nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
66 nvkm_wo32(*pgpuobj, 0x04, length);
67 nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
68 nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
69 nvkm_done(*pgpuobj);
70 }
71
72 return ret;
73}
74
75static const struct nvkm_dmaobj_func
76nv04_dmaobj_func = {
77 .bind = nv04_dmaobj_bind,
78};
79
80int
81nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
82 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
83{
84 struct nvkm_device *device = dma->engine.subdev.device;
85 struct nv04_dmaobj *dmaobj;
86 int ret;
87
88 if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
89 return -ENOMEM;
90 *pdmaobj = &dmaobj->base;
91
92 ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
93 &data, &size, &dmaobj->base);
94 if (ret)
95 return ret;
96
97 if (dmaobj->base.target == NV_MEM_TARGET_VM) {
98 if (device->mmu->func == &nv04_mmu)
99 dmaobj->clone = true;
100 dmaobj->base.target = NV_MEM_TARGET_PCI;
101 dmaobj->base.access = NV_MEM_ACCESS_RW;
102 }
103
104 dmaobj->flags0 = oclass->base.oclass;
105 switch (dmaobj->base.target) {
106 case NV_MEM_TARGET_VRAM:
107 dmaobj->flags0 |= 0x00003000;
108 break;
109 case NV_MEM_TARGET_PCI:
110 dmaobj->flags0 |= 0x00023000;
111 break;
112 case NV_MEM_TARGET_PCI_NOSNOOP:
113 dmaobj->flags0 |= 0x00033000;
114 break;
115 default:
116 return -EINVAL;
117 }
118
119 switch (dmaobj->base.access) {
120 case NV_MEM_ACCESS_RO:
121 dmaobj->flags0 |= 0x00004000;
122 break;
123 case NV_MEM_ACCESS_WO:
124 dmaobj->flags0 |= 0x00008000;
125 case NV_MEM_ACCESS_RW:
126 dmaobj->flags2 |= 0x00000002;
127 break;
128 default:
129 return -EINVAL;
130 }
131
132 return 0;
133}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
new file mode 100644
index 000000000000..5b7ce313ea14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#define nv50_dmaobj(p) container_of((p), struct nv50_dmaobj, base)
25#include "user.h"
26
27#include <core/client.h>
28#include <core/gpuobj.h>
29#include <subdev/fb.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34struct nv50_dmaobj {
35 struct nvkm_dmaobj base;
36 u32 flags0;
37 u32 flags5;
38};
39
40static int
41nv50_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
42 int align, struct nvkm_gpuobj **pgpuobj)
43{
44 struct nv50_dmaobj *dmaobj = nv50_dmaobj(base);
45 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
46 int ret;
47
48 ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
49 if (ret == 0) {
50 nvkm_kmap(*pgpuobj);
51 nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
52 nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
53 nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
54 nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
55 upper_32_bits(dmaobj->base.start));
56 nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
57 nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
58 nvkm_done(*pgpuobj);
59 }
60
61 return ret;
62}
63
64static const struct nvkm_dmaobj_func
65nv50_dmaobj_func = {
66 .bind = nv50_dmaobj_bind,
67};
68
69int
70nv50_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
71 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
72{
73 union {
74 struct nv50_dma_v0 v0;
75 } *args;
76 struct nvkm_object *parent = oclass->parent;
77 struct nv50_dmaobj *dmaobj;
78 u32 user, part, comp, kind;
79 int ret;
80
81 if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
82 return -ENOMEM;
83 *pdmaobj = &dmaobj->base;
84
85 ret = nvkm_dmaobj_ctor(&nv50_dmaobj_func, dma, oclass,
86 &data, &size, &dmaobj->base);
87 if (ret)
88 return ret;
89
90 args = data;
91
92 nvif_ioctl(parent, "create nv50 dma size %d\n", size);
93 if (nvif_unpack(args->v0, 0, 0, false)) {
94 nvif_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
95 "comp %d kind %02x\n", args->v0.version,
96 args->v0.priv, args->v0.part, args->v0.comp,
97 args->v0.kind);
98 user = args->v0.priv;
99 part = args->v0.part;
100 comp = args->v0.comp;
101 kind = args->v0.kind;
102 } else
103 if (size == 0) {
104 if (dmaobj->base.target != NV_MEM_TARGET_VM) {
105 user = NV50_DMA_V0_PRIV_US;
106 part = NV50_DMA_V0_PART_256;
107 comp = NV50_DMA_V0_COMP_NONE;
108 kind = NV50_DMA_V0_KIND_PITCH;
109 } else {
110 user = NV50_DMA_V0_PRIV_VM;
111 part = NV50_DMA_V0_PART_VM;
112 comp = NV50_DMA_V0_COMP_VM;
113 kind = NV50_DMA_V0_KIND_VM;
114 }
115 } else
116 return ret;
117
118 if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
119 return -EINVAL;
120 dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20) |
121 oclass->base.oclass;
122 dmaobj->flags5 = (part << 16);
123
124 switch (dmaobj->base.target) {
125 case NV_MEM_TARGET_VM:
126 dmaobj->flags0 |= 0x00000000;
127 break;
128 case NV_MEM_TARGET_VRAM:
129 dmaobj->flags0 |= 0x00010000;
130 break;
131 case NV_MEM_TARGET_PCI:
132 dmaobj->flags0 |= 0x00020000;
133 break;
134 case NV_MEM_TARGET_PCI_NOSNOOP:
135 dmaobj->flags0 |= 0x00030000;
136 break;
137 default:
138 return -EINVAL;
139 }
140
141 switch (dmaobj->base.access) {
142 case NV_MEM_ACCESS_VM:
143 break;
144 case NV_MEM_ACCESS_RO:
145 dmaobj->flags0 |= 0x00040000;
146 break;
147 case NV_MEM_ACCESS_WO:
148 case NV_MEM_ACCESS_RW:
149 dmaobj->flags0 |= 0x00080000;
150 break;
151 default:
152 return -EINVAL;
153 }
154
155 return 0;
156}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
deleted file mode 100644
index 7529632dbedb..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
1nvkm-y += nvkm/engine/dmaobj/base.o
2nvkm-y += nvkm/engine/dmaobj/nv04.o
3nvkm-y += nvkm/engine/dmaobj/nv50.o
4nvkm-y += nvkm/engine/dmaobj/gf100.o
5nvkm-y += nvkm/engine/dmaobj/gf110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
deleted file mode 100644
index f880e5167e45..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <core/client.h>
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29
30#include <nvif/class.h>
31#include <nvif/unpack.h>
32
33struct gf100_dmaobj_priv {
34 struct nvkm_dmaobj base;
35 u32 flags0;
36 u32 flags5;
37};
38
39static int
40gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
41 struct nvkm_gpuobj **pgpuobj)
42{
43 struct gf100_dmaobj_priv *priv = (void *)dmaobj;
44 int ret;
45
46 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
47 switch (nv_mclass(parent->parent)) {
48 case GT214_DISP_CORE_CHANNEL_DMA:
49 case GT214_DISP_BASE_CHANNEL_DMA:
50 case GT214_DISP_OVERLAY_CHANNEL_DMA:
51 break;
52 default:
53 return -EINVAL;
54 }
55 } else
56 return 0;
57
58 ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
59 if (ret == 0) {
60 nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
61 nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
62 nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
63 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
64 upper_32_bits(priv->base.start));
65 nv_wo32(*pgpuobj, 0x10, 0x00000000);
66 nv_wo32(*pgpuobj, 0x14, priv->flags5);
67 }
68
69 return ret;
70}
71
72static int
73gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
74 struct nvkm_oclass *oclass, void *data, u32 size,
75 struct nvkm_object **pobject)
76{
77 struct nvkm_dmaeng *dmaeng = (void *)engine;
78 union {
79 struct gf100_dma_v0 v0;
80 } *args;
81 struct gf100_dmaobj_priv *priv;
82 u32 kind, user, unkn;
83 int ret;
84
85 ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
86 *pobject = nv_object(priv);
87 if (ret)
88 return ret;
89 args = data;
90
91 nv_ioctl(parent, "create gf100 dma size %d\n", size);
92 if (nvif_unpack(args->v0, 0, 0, false)) {
93 nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
94 args->v0.version, args->v0.priv, args->v0.kind);
95 kind = args->v0.kind;
96 user = args->v0.priv;
97 unkn = 0;
98 } else
99 if (size == 0) {
100 if (priv->base.target != NV_MEM_TARGET_VM) {
101 kind = GF100_DMA_V0_KIND_PITCH;
102 user = GF100_DMA_V0_PRIV_US;
103 unkn = 2;
104 } else {
105 kind = GF100_DMA_V0_KIND_VM;
106 user = GF100_DMA_V0_PRIV_VM;
107 unkn = 0;
108 }
109 } else
110 return ret;
111
112 if (user > 2)
113 return -EINVAL;
114 priv->flags0 |= (kind << 22) | (user << 20);
115 priv->flags5 |= (unkn << 16);
116
117 switch (priv->base.target) {
118 case NV_MEM_TARGET_VM:
119 priv->flags0 |= 0x00000000;
120 break;
121 case NV_MEM_TARGET_VRAM:
122 priv->flags0 |= 0x00010000;
123 break;
124 case NV_MEM_TARGET_PCI:
125 priv->flags0 |= 0x00020000;
126 break;
127 case NV_MEM_TARGET_PCI_NOSNOOP:
128 priv->flags0 |= 0x00030000;
129 break;
130 default:
131 return -EINVAL;
132 }
133
134 switch (priv->base.access) {
135 case NV_MEM_ACCESS_VM:
136 break;
137 case NV_MEM_ACCESS_RO:
138 priv->flags0 |= 0x00040000;
139 break;
140 case NV_MEM_ACCESS_WO:
141 case NV_MEM_ACCESS_RW:
142 priv->flags0 |= 0x00080000;
143 break;
144 }
145
146 return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
147}
148
149static struct nvkm_ofuncs
150gf100_dmaobj_ofuncs = {
151 .ctor = gf100_dmaobj_ctor,
152 .dtor = _nvkm_dmaobj_dtor,
153 .init = _nvkm_dmaobj_init,
154 .fini = _nvkm_dmaobj_fini,
155};
156
157static struct nvkm_oclass
158gf100_dmaeng_sclass[] = {
159 { NV_DMA_FROM_MEMORY, &gf100_dmaobj_ofuncs },
160 { NV_DMA_TO_MEMORY, &gf100_dmaobj_ofuncs },
161 { NV_DMA_IN_MEMORY, &gf100_dmaobj_ofuncs },
162 {}
163};
164
165struct nvkm_oclass *
166gf100_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
167 .base.handle = NV_ENGINE(DMAOBJ, 0xc0),
168 .base.ofuncs = &(struct nvkm_ofuncs) {
169 .ctor = _nvkm_dmaeng_ctor,
170 .dtor = _nvkm_dmaeng_dtor,
171 .init = _nvkm_dmaeng_init,
172 .fini = _nvkm_dmaeng_fini,
173 },
174 .sclass = gf100_dmaeng_sclass,
175 .bind = gf100_dmaobj_bind,
176}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
deleted file mode 100644
index bf8f0f20976c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <core/client.h>
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29
30#include <nvif/class.h>
31#include <nvif/unpack.h>
32
33struct gf110_dmaobj_priv {
34 struct nvkm_dmaobj base;
35 u32 flags0;
36};
37
38static int
39gf110_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
40 struct nvkm_gpuobj **pgpuobj)
41{
42 struct gf110_dmaobj_priv *priv = (void *)dmaobj;
43 int ret;
44
45 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
46 switch (nv_mclass(parent->parent)) {
47 case GF110_DISP_CORE_CHANNEL_DMA:
48 case GK104_DISP_CORE_CHANNEL_DMA:
49 case GK110_DISP_CORE_CHANNEL_DMA:
50 case GM107_DISP_CORE_CHANNEL_DMA:
51 case GM204_DISP_CORE_CHANNEL_DMA:
52 case GF110_DISP_BASE_CHANNEL_DMA:
53 case GK104_DISP_BASE_CHANNEL_DMA:
54 case GK110_DISP_BASE_CHANNEL_DMA:
55 case GF110_DISP_OVERLAY_CONTROL_DMA:
56 case GK104_DISP_OVERLAY_CONTROL_DMA:
57 break;
58 default:
59 return -EINVAL;
60 }
61 } else
62 return 0;
63
64 ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
65 if (ret == 0) {
66 nv_wo32(*pgpuobj, 0x00, priv->flags0);
67 nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
68 nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8);
69 nv_wo32(*pgpuobj, 0x0c, 0x00000000);
70 nv_wo32(*pgpuobj, 0x10, 0x00000000);
71 nv_wo32(*pgpuobj, 0x14, 0x00000000);
72 }
73
74 return ret;
75}
76
77static int
78gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
79 struct nvkm_oclass *oclass, void *data, u32 size,
80 struct nvkm_object **pobject)
81{
82 struct nvkm_dmaeng *dmaeng = (void *)engine;
83 union {
84 struct gf110_dma_v0 v0;
85 } *args;
86 struct gf110_dmaobj_priv *priv;
87 u32 kind, page;
88 int ret;
89
90 ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
91 *pobject = nv_object(priv);
92 if (ret)
93 return ret;
94 args = data;
95
96 nv_ioctl(parent, "create gf110 dma size %d\n", size);
97 if (nvif_unpack(args->v0, 0, 0, false)) {
98 nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
99 args->v0.version, args->v0.page, args->v0.kind);
100 kind = args->v0.kind;
101 page = args->v0.page;
102 } else
103 if (size == 0) {
104 if (priv->base.target != NV_MEM_TARGET_VM) {
105 kind = GF110_DMA_V0_KIND_PITCH;
106 page = GF110_DMA_V0_PAGE_SP;
107 } else {
108 kind = GF110_DMA_V0_KIND_VM;
109 page = GF110_DMA_V0_PAGE_LP;
110 }
111 } else
112 return ret;
113
114 if (page > 1)
115 return -EINVAL;
116 priv->flags0 = (kind << 20) | (page << 6);
117
118 switch (priv->base.target) {
119 case NV_MEM_TARGET_VRAM:
120 priv->flags0 |= 0x00000009;
121 break;
122 case NV_MEM_TARGET_VM:
123 case NV_MEM_TARGET_PCI:
124 case NV_MEM_TARGET_PCI_NOSNOOP:
125 /* XXX: don't currently know how to construct a real one
126 * of these. we only use them to represent pushbufs
127 * on these chipsets, and the classes that use them
128 * deal with the target themselves.
129 */
130 break;
131 default:
132 return -EINVAL;
133 }
134
135 return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
136}
137
138static struct nvkm_ofuncs
139gf110_dmaobj_ofuncs = {
140 .ctor = gf110_dmaobj_ctor,
141 .dtor = _nvkm_dmaobj_dtor,
142 .init = _nvkm_dmaobj_init,
143 .fini = _nvkm_dmaobj_fini,
144};
145
146static struct nvkm_oclass
147gf110_dmaeng_sclass[] = {
148 { NV_DMA_FROM_MEMORY, &gf110_dmaobj_ofuncs },
149 { NV_DMA_TO_MEMORY, &gf110_dmaobj_ofuncs },
150 { NV_DMA_IN_MEMORY, &gf110_dmaobj_ofuncs },
151 {}
152};
153
154struct nvkm_oclass *
155gf110_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
156 .base.handle = NV_ENGINE(DMAOBJ, 0xd0),
157 .base.ofuncs = &(struct nvkm_ofuncs) {
158 .ctor = _nvkm_dmaeng_ctor,
159 .dtor = _nvkm_dmaeng_dtor,
160 .init = _nvkm_dmaeng_init,
161 .fini = _nvkm_dmaeng_fini,
162 },
163 .sclass = gf110_dmaeng_sclass,
164 .bind = gf110_dmaobj_bind,
165}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
deleted file mode 100644
index b4379c2a2fb5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <core/gpuobj.h>
27#include <subdev/fb.h>
28#include <subdev/mmu/nv04.h>
29
30#include <nvif/class.h>
31
32struct nv04_dmaobj_priv {
33 struct nvkm_dmaobj base;
34 bool clone;
35 u32 flags0;
36 u32 flags2;
37};
38
39static int
40nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
41 struct nvkm_gpuobj **pgpuobj)
42{
43 struct nv04_dmaobj_priv *priv = (void *)dmaobj;
44 struct nvkm_gpuobj *gpuobj;
45 u64 offset = priv->base.start & 0xfffff000;
46 u64 adjust = priv->base.start & 0x00000fff;
47 u32 length = priv->base.limit - priv->base.start;
48 int ret;
49
50 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
51 switch (nv_mclass(parent->parent)) {
52 case NV03_CHANNEL_DMA:
53 case NV10_CHANNEL_DMA:
54 case NV17_CHANNEL_DMA:
55 case NV40_CHANNEL_DMA:
56 break;
57 default:
58 return -EINVAL;
59 }
60 }
61
62 if (priv->clone) {
63 struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj);
64 struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
65 if (!dmaobj->start)
66 return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
67 offset = nv_ro32(pgt, 8 + (offset >> 10));
68 offset &= 0xfffff000;
69 }
70
71 ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
72 *pgpuobj = gpuobj;
73 if (ret == 0) {
74 nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
75 nv_wo32(*pgpuobj, 0x04, length);
76 nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset);
77 nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset);
78 }
79
80 return ret;
81}
82
83static int
84nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
85 struct nvkm_oclass *oclass, void *data, u32 size,
86 struct nvkm_object **pobject)
87{
88 struct nvkm_dmaeng *dmaeng = (void *)engine;
89 struct nv04_mmu_priv *mmu = nv04_mmu(engine);
90 struct nv04_dmaobj_priv *priv;
91 int ret;
92
93 ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
94 *pobject = nv_object(priv);
95 if (ret || (ret = -ENOSYS, size))
96 return ret;
97
98 if (priv->base.target == NV_MEM_TARGET_VM) {
99 if (nv_object(mmu)->oclass == &nv04_mmu_oclass)
100 priv->clone = true;
101 priv->base.target = NV_MEM_TARGET_PCI;
102 priv->base.access = NV_MEM_ACCESS_RW;
103 }
104
105 priv->flags0 = nv_mclass(priv);
106 switch (priv->base.target) {
107 case NV_MEM_TARGET_VRAM:
108 priv->flags0 |= 0x00003000;
109 break;
110 case NV_MEM_TARGET_PCI:
111 priv->flags0 |= 0x00023000;
112 break;
113 case NV_MEM_TARGET_PCI_NOSNOOP:
114 priv->flags0 |= 0x00033000;
115 break;
116 default:
117 return -EINVAL;
118 }
119
120 switch (priv->base.access) {
121 case NV_MEM_ACCESS_RO:
122 priv->flags0 |= 0x00004000;
123 break;
124 case NV_MEM_ACCESS_WO:
125 priv->flags0 |= 0x00008000;
126 case NV_MEM_ACCESS_RW:
127 priv->flags2 |= 0x00000002;
128 break;
129 default:
130 return -EINVAL;
131 }
132
133 return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
134}
135
136static struct nvkm_ofuncs
137nv04_dmaobj_ofuncs = {
138 .ctor = nv04_dmaobj_ctor,
139 .dtor = _nvkm_dmaobj_dtor,
140 .init = _nvkm_dmaobj_init,
141 .fini = _nvkm_dmaobj_fini,
142};
143
144static struct nvkm_oclass
145nv04_dmaeng_sclass[] = {
146 { NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
147 { NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
148 { NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
149 {}
150};
151
152struct nvkm_oclass *
153nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
154 .base.handle = NV_ENGINE(DMAOBJ, 0x04),
155 .base.ofuncs = &(struct nvkm_ofuncs) {
156 .ctor = _nvkm_dmaeng_ctor,
157 .dtor = _nvkm_dmaeng_dtor,
158 .init = _nvkm_dmaeng_init,
159 .fini = _nvkm_dmaeng_fini,
160 },
161 .sclass = nv04_dmaeng_sclass,
162 .bind = nv04_dmaobj_bind,
163}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
deleted file mode 100644
index 4d3c828fe0e6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <core/client.h>
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29
30#include <nvif/class.h>
31#include <nvif/unpack.h>
32
33struct nv50_dmaobj_priv {
34 struct nvkm_dmaobj base;
35 u32 flags0;
36 u32 flags5;
37};
38
39static int
40nv50_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
41 struct nvkm_gpuobj **pgpuobj)
42{
43 struct nv50_dmaobj_priv *priv = (void *)dmaobj;
44 int ret;
45
46 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
47 switch (nv_mclass(parent->parent)) {
48 case NV40_CHANNEL_DMA:
49 case NV50_CHANNEL_GPFIFO:
50 case G82_CHANNEL_GPFIFO:
51 case NV50_DISP_CORE_CHANNEL_DMA:
52 case G82_DISP_CORE_CHANNEL_DMA:
53 case GT206_DISP_CORE_CHANNEL_DMA:
54 case GT200_DISP_CORE_CHANNEL_DMA:
55 case GT214_DISP_CORE_CHANNEL_DMA:
56 case NV50_DISP_BASE_CHANNEL_DMA:
57 case G82_DISP_BASE_CHANNEL_DMA:
58 case GT200_DISP_BASE_CHANNEL_DMA:
59 case GT214_DISP_BASE_CHANNEL_DMA:
60 case NV50_DISP_OVERLAY_CHANNEL_DMA:
61 case G82_DISP_OVERLAY_CHANNEL_DMA:
62 case GT200_DISP_OVERLAY_CHANNEL_DMA:
63 case GT214_DISP_OVERLAY_CHANNEL_DMA:
64 break;
65 default:
66 return -EINVAL;
67 }
68 }
69
70 ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
71 if (ret == 0) {
72 nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
73 nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
74 nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
75 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
76 upper_32_bits(priv->base.start));
77 nv_wo32(*pgpuobj, 0x10, 0x00000000);
78 nv_wo32(*pgpuobj, 0x14, priv->flags5);
79 }
80
81 return ret;
82}
83
84static int
85nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
86 struct nvkm_oclass *oclass, void *data, u32 size,
87 struct nvkm_object **pobject)
88{
89 struct nvkm_dmaeng *dmaeng = (void *)engine;
90 union {
91 struct nv50_dma_v0 v0;
92 } *args;
93 struct nv50_dmaobj_priv *priv;
94 u32 user, part, comp, kind;
95 int ret;
96
97 ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
98 *pobject = nv_object(priv);
99 if (ret)
100 return ret;
101 args = data;
102
103 nv_ioctl(parent, "create nv50 dma size %d\n", size);
104 if (nvif_unpack(args->v0, 0, 0, false)) {
105 nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
106 "comp %d kind %02x\n", args->v0.version,
107 args->v0.priv, args->v0.part, args->v0.comp,
108 args->v0.kind);
109 user = args->v0.priv;
110 part = args->v0.part;
111 comp = args->v0.comp;
112 kind = args->v0.kind;
113 } else
114 if (size == 0) {
115 if (priv->base.target != NV_MEM_TARGET_VM) {
116 user = NV50_DMA_V0_PRIV_US;
117 part = NV50_DMA_V0_PART_256;
118 comp = NV50_DMA_V0_COMP_NONE;
119 kind = NV50_DMA_V0_KIND_PITCH;
120 } else {
121 user = NV50_DMA_V0_PRIV_VM;
122 part = NV50_DMA_V0_PART_VM;
123 comp = NV50_DMA_V0_COMP_VM;
124 kind = NV50_DMA_V0_KIND_VM;
125 }
126 } else
127 return ret;
128
129 if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
130 return -EINVAL;
131 priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
132 priv->flags5 = (part << 16);
133
134 switch (priv->base.target) {
135 case NV_MEM_TARGET_VM:
136 priv->flags0 |= 0x00000000;
137 break;
138 case NV_MEM_TARGET_VRAM:
139 priv->flags0 |= 0x00010000;
140 break;
141 case NV_MEM_TARGET_PCI:
142 priv->flags0 |= 0x00020000;
143 break;
144 case NV_MEM_TARGET_PCI_NOSNOOP:
145 priv->flags0 |= 0x00030000;
146 break;
147 default:
148 return -EINVAL;
149 }
150
151 switch (priv->base.access) {
152 case NV_MEM_ACCESS_VM:
153 break;
154 case NV_MEM_ACCESS_RO:
155 priv->flags0 |= 0x00040000;
156 break;
157 case NV_MEM_ACCESS_WO:
158 case NV_MEM_ACCESS_RW:
159 priv->flags0 |= 0x00080000;
160 break;
161 default:
162 return -EINVAL;
163 }
164
165 return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
166}
167
168static struct nvkm_ofuncs
169nv50_dmaobj_ofuncs = {
170 .ctor = nv50_dmaobj_ctor,
171 .dtor = _nvkm_dmaobj_dtor,
172 .init = _nvkm_dmaobj_init,
173 .fini = _nvkm_dmaobj_fini,
174};
175
176static struct nvkm_oclass
177nv50_dmaeng_sclass[] = {
178 { NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
179 { NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
180 { NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
181 {}
182};
183
184struct nvkm_oclass *
185nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
186 .base.handle = NV_ENGINE(DMAOBJ, 0x50),
187 .base.ofuncs = &(struct nvkm_ofuncs) {
188 .ctor = _nvkm_dmaeng_ctor,
189 .dtor = _nvkm_dmaeng_dtor,
190 .init = _nvkm_dmaeng_init,
191 .fini = _nvkm_dmaeng_fini,
192 },
193 .sclass = nv50_dmaeng_sclass,
194 .bind = nv50_dmaobj_bind,
195}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
deleted file mode 100644
index 44ae8a0ca65c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef __NVKM_DMAOBJ_PRIV_H__
2#define __NVKM_DMAOBJ_PRIV_H__
3#include <engine/dmaobj.h>
4
5#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \
6 nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
7
8int nvkm_dmaobj_create_(struct nvkm_object *, struct nvkm_object *,
9 struct nvkm_oclass *, void **, u32 *,
10 int, void **);
11#define _nvkm_dmaobj_dtor nvkm_object_destroy
12#define _nvkm_dmaobj_init nvkm_object_init
13#define _nvkm_dmaobj_fini nvkm_object_fini
14
15int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *,
16 struct nvkm_oclass *, void *, u32,
17 struct nvkm_object **);
18#define _nvkm_dmaeng_dtor _nvkm_engine_dtor
19#define _nvkm_dmaeng_init _nvkm_engine_init
20#define _nvkm_dmaeng_fini _nvkm_engine_fini
21
22struct nvkm_dmaeng_impl {
23 struct nvkm_oclass base;
24 struct nvkm_oclass *sclass;
25 int (*bind)(struct nvkm_dmaobj *, struct nvkm_object *,
26 struct nvkm_gpuobj **);
27};
28#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 30958c19e61d..74000602fbb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -21,40 +21,95 @@
21 */ 21 */
22#include <engine/falcon.h> 22#include <engine/falcon.h>
23 23
24#include <core/device.h> 24#include <core/gpuobj.h>
25#include <subdev/timer.h> 25#include <subdev/timer.h>
26#include <engine/fifo.h>
26 27
27void 28static int
28nvkm_falcon_intr(struct nvkm_subdev *subdev) 29nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index)
29{ 30{
30 struct nvkm_falcon *falcon = (void *)subdev; 31 struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine);
31 u32 dispatch = nv_ro32(falcon, 0x01c); 32 int c = 0;
32 u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); 33
34 while (falcon->func->sclass[c].oclass) {
35 if (c++ == index) {
36 oclass->base = falcon->func->sclass[index];
37 return index;
38 }
39 }
40
41 return c;
42}
43
44static int
45nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
46 int align, struct nvkm_gpuobj **pgpuobj)
47{
48 return nvkm_gpuobj_new(object->engine->subdev.device, 256,
49 align, true, parent, pgpuobj);
50}
51
52static const struct nvkm_object_func
53nvkm_falcon_cclass = {
54 .bind = nvkm_falcon_cclass_bind,
55};
56
57static void
58nvkm_falcon_intr(struct nvkm_engine *engine)
59{
60 struct nvkm_falcon *falcon = nvkm_falcon(engine);
61 struct nvkm_subdev *subdev = &falcon->engine.subdev;
62 struct nvkm_device *device = subdev->device;
63 const u32 base = falcon->addr;
64 u32 dest = nvkm_rd32(device, base + 0x01c);
65 u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16);
66 u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff;
67 struct nvkm_fifo_chan *chan;
68 unsigned long flags;
69
70 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
71
72 if (intr & 0x00000040) {
73 if (falcon->func->intr) {
74 falcon->func->intr(falcon, chan);
75 nvkm_wr32(device, base + 0x004, 0x00000040);
76 intr &= ~0x00000040;
77 }
78 }
33 79
34 if (intr & 0x00000010) { 80 if (intr & 0x00000010) {
35 nv_debug(falcon, "ucode halted\n"); 81 nvkm_debug(subdev, "ucode halted\n");
36 nv_wo32(falcon, 0x004, 0x00000010); 82 nvkm_wr32(device, base + 0x004, 0x00000010);
37 intr &= ~0x00000010; 83 intr &= ~0x00000010;
38 } 84 }
39 85
40 if (intr) { 86 if (intr) {
41 nv_error(falcon, "unhandled intr 0x%08x\n", intr); 87 nvkm_error(subdev, "intr %08x\n", intr);
42 nv_wo32(falcon, 0x004, intr); 88 nvkm_wr32(device, base + 0x004, intr);
43 } 89 }
44}
45 90
46u32 91 nvkm_fifo_chan_put(device->fifo, flags, &chan);
47_nvkm_falcon_rd32(struct nvkm_object *object, u64 addr)
48{
49 struct nvkm_falcon *falcon = (void *)object;
50 return nv_rd32(falcon, falcon->addr + addr);
51} 92}
52 93
53void 94static int
54_nvkm_falcon_wr32(struct nvkm_object *object, u64 addr, u32 data) 95nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
55{ 96{
56 struct nvkm_falcon *falcon = (void *)object; 97 struct nvkm_falcon *falcon = nvkm_falcon(engine);
57 nv_wr32(falcon, falcon->addr + addr, data); 98 struct nvkm_device *device = falcon->engine.subdev.device;
99 const u32 base = falcon->addr;
100
101 if (!suspend) {
102 nvkm_memory_del(&falcon->core);
103 if (falcon->external) {
104 vfree(falcon->data.data);
105 vfree(falcon->code.data);
106 falcon->code.data = NULL;
107 }
108 }
109
110 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
111 nvkm_wr32(device, base + 0x014, 0xffffffff);
112 return 0;
58} 113}
59 114
60static void * 115static void *
@@ -67,51 +122,66 @@ vmemdup(const void *src, size_t len)
67 return p; 122 return p;
68} 123}
69 124
70int 125static int
71_nvkm_falcon_init(struct nvkm_object *object) 126nvkm_falcon_oneinit(struct nvkm_engine *engine)
72{ 127{
73 struct nvkm_device *device = nv_device(object); 128 struct nvkm_falcon *falcon = nvkm_falcon(engine);
74 struct nvkm_falcon *falcon = (void *)object; 129 struct nvkm_subdev *subdev = &falcon->engine.subdev;
75 const struct firmware *fw; 130 struct nvkm_device *device = subdev->device;
76 char name[32] = "internal"; 131 const u32 base = falcon->addr;
77 int ret, i;
78 u32 caps; 132 u32 caps;
79 133
80 /* enable engine, and determine its capabilities */ 134 /* determine falcon capabilities */
81 ret = nvkm_engine_init(&falcon->base);
82 if (ret)
83 return ret;
84
85 if (device->chipset < 0xa3 || 135 if (device->chipset < 0xa3 ||
86 device->chipset == 0xaa || device->chipset == 0xac) { 136 device->chipset == 0xaa || device->chipset == 0xac) {
87 falcon->version = 0; 137 falcon->version = 0;
88 falcon->secret = (falcon->addr == 0x087000) ? 1 : 0; 138 falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
89 } else { 139 } else {
90 caps = nv_ro32(falcon, 0x12c); 140 caps = nvkm_rd32(device, base + 0x12c);
91 falcon->version = (caps & 0x0000000f); 141 falcon->version = (caps & 0x0000000f);
92 falcon->secret = (caps & 0x00000030) >> 4; 142 falcon->secret = (caps & 0x00000030) >> 4;
93 } 143 }
94 144
95 caps = nv_ro32(falcon, 0x108); 145 caps = nvkm_rd32(device, base + 0x108);
96 falcon->code.limit = (caps & 0x000001ff) << 8; 146 falcon->code.limit = (caps & 0x000001ff) << 8;
97 falcon->data.limit = (caps & 0x0003fe00) >> 1; 147 falcon->data.limit = (caps & 0x0003fe00) >> 1;
98 148
99 nv_debug(falcon, "falcon version: %d\n", falcon->version); 149 nvkm_debug(subdev, "falcon version: %d\n", falcon->version);
100 nv_debug(falcon, "secret level: %d\n", falcon->secret); 150 nvkm_debug(subdev, "secret level: %d\n", falcon->secret);
101 nv_debug(falcon, "code limit: %d\n", falcon->code.limit); 151 nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit);
102 nv_debug(falcon, "data limit: %d\n", falcon->data.limit); 152 nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit);
153 return 0;
154}
155
156static int
157nvkm_falcon_init(struct nvkm_engine *engine)
158{
159 struct nvkm_falcon *falcon = nvkm_falcon(engine);
160 struct nvkm_subdev *subdev = &falcon->engine.subdev;
161 struct nvkm_device *device = subdev->device;
162 const struct firmware *fw;
163 char name[32] = "internal";
164 const u32 base = falcon->addr;
165 int ret, i;
103 166
104 /* wait for 'uc halted' to be signalled before continuing */ 167 /* wait for 'uc halted' to be signalled before continuing */
105 if (falcon->secret && falcon->version < 4) { 168 if (falcon->secret && falcon->version < 4) {
106 if (!falcon->version) 169 if (!falcon->version) {
107 nv_wait(falcon, 0x008, 0x00000010, 0x00000010); 170 nvkm_msec(device, 2000,
108 else 171 if (nvkm_rd32(device, base + 0x008) & 0x00000010)
109 nv_wait(falcon, 0x180, 0x80000000, 0); 172 break;
110 nv_wo32(falcon, 0x004, 0x00000010); 173 );
174 } else {
175 nvkm_msec(device, 2000,
176 if (!(nvkm_rd32(device, base + 0x180) & 0x80000000))
177 break;
178 );
179 }
180 nvkm_wr32(device, base + 0x004, 0x00000010);
111 } 181 }
112 182
113 /* disable all interrupts */ 183 /* disable all interrupts */
114 nv_wo32(falcon, 0x014, 0xffffffff); 184 nvkm_wr32(device, base + 0x014, 0xffffffff);
115 185
116 /* no default ucode provided by the engine implementation, try and 186 /* no default ucode provided by the engine implementation, try and
117 * locate a "self-bootstrapping" firmware image for the engine 187 * locate a "self-bootstrapping" firmware image for the engine
@@ -120,7 +190,7 @@ _nvkm_falcon_init(struct nvkm_object *object)
120 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x", 190 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
121 device->chipset, falcon->addr >> 12); 191 device->chipset, falcon->addr >> 12);
122 192
123 ret = request_firmware(&fw, name, nv_device_base(device)); 193 ret = request_firmware(&fw, name, device->dev);
124 if (ret == 0) { 194 if (ret == 0) {
125 falcon->code.data = vmemdup(fw->data, fw->size); 195 falcon->code.data = vmemdup(fw->data, fw->size);
126 falcon->code.size = fw->size; 196 falcon->code.size = fw->size;
@@ -139,10 +209,10 @@ _nvkm_falcon_init(struct nvkm_object *object)
139 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd", 209 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
140 device->chipset, falcon->addr >> 12); 210 device->chipset, falcon->addr >> 12);
141 211
142 ret = request_firmware(&fw, name, nv_device_base(device)); 212 ret = request_firmware(&fw, name, device->dev);
143 if (ret) { 213 if (ret) {
144 nv_error(falcon, "unable to load firmware data\n"); 214 nvkm_error(subdev, "unable to load firmware data\n");
145 return ret; 215 return -ENODEV;
146 } 216 }
147 217
148 falcon->data.data = vmemdup(fw->data, fw->size); 218 falcon->data.data = vmemdup(fw->data, fw->size);
@@ -154,10 +224,10 @@ _nvkm_falcon_init(struct nvkm_object *object)
154 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc", 224 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
155 device->chipset, falcon->addr >> 12); 225 device->chipset, falcon->addr >> 12);
156 226
157 ret = request_firmware(&fw, name, nv_device_base(device)); 227 ret = request_firmware(&fw, name, device->dev);
158 if (ret) { 228 if (ret) {
159 nv_error(falcon, "unable to load firmware code\n"); 229 nvkm_error(subdev, "unable to load firmware code\n");
160 return ret; 230 return -ENODEV;
161 } 231 }
162 232
163 falcon->code.data = vmemdup(fw->data, fw->size); 233 falcon->code.data = vmemdup(fw->data, fw->size);
@@ -167,111 +237,117 @@ _nvkm_falcon_init(struct nvkm_object *object)
167 return -ENOMEM; 237 return -ENOMEM;
168 } 238 }
169 239
170 nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ? 240 nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ?
171 "static code/data segments" : "self-bootstrapping"); 241 "static code/data segments" : "self-bootstrapping");
172 242
173 /* ensure any "self-bootstrapping" firmware image is in vram */ 243 /* ensure any "self-bootstrapping" firmware image is in vram */
174 if (!falcon->data.data && !falcon->core) { 244 if (!falcon->data.data && !falcon->core) {
175 ret = nvkm_gpuobj_new(object->parent, NULL, falcon->code.size, 245 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
176 256, 0, &falcon->core); 246 falcon->code.size, 256, false,
247 &falcon->core);
177 if (ret) { 248 if (ret) {
178 nv_error(falcon, "core allocation failed, %d\n", ret); 249 nvkm_error(subdev, "core allocation failed, %d\n", ret);
179 return ret; 250 return ret;
180 } 251 }
181 252
253 nvkm_kmap(falcon->core);
182 for (i = 0; i < falcon->code.size; i += 4) 254 for (i = 0; i < falcon->code.size; i += 4)
183 nv_wo32(falcon->core, i, falcon->code.data[i / 4]); 255 nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]);
256 nvkm_done(falcon->core);
184 } 257 }
185 258
186 /* upload firmware bootloader (or the full code segments) */ 259 /* upload firmware bootloader (or the full code segments) */
187 if (falcon->core) { 260 if (falcon->core) {
261 u64 addr = nvkm_memory_addr(falcon->core);
188 if (device->card_type < NV_C0) 262 if (device->card_type < NV_C0)
189 nv_wo32(falcon, 0x618, 0x04000000); 263 nvkm_wr32(device, base + 0x618, 0x04000000);
190 else 264 else
191 nv_wo32(falcon, 0x618, 0x00000114); 265 nvkm_wr32(device, base + 0x618, 0x00000114);
192 nv_wo32(falcon, 0x11c, 0); 266 nvkm_wr32(device, base + 0x11c, 0);
193 nv_wo32(falcon, 0x110, falcon->core->addr >> 8); 267 nvkm_wr32(device, base + 0x110, addr >> 8);
194 nv_wo32(falcon, 0x114, 0); 268 nvkm_wr32(device, base + 0x114, 0);
195 nv_wo32(falcon, 0x118, 0x00006610); 269 nvkm_wr32(device, base + 0x118, 0x00006610);
196 } else { 270 } else {
197 if (falcon->code.size > falcon->code.limit || 271 if (falcon->code.size > falcon->code.limit ||
198 falcon->data.size > falcon->data.limit) { 272 falcon->data.size > falcon->data.limit) {
199 nv_error(falcon, "ucode exceeds falcon limit(s)\n"); 273 nvkm_error(subdev, "ucode exceeds falcon limit(s)\n");
200 return -EINVAL; 274 return -EINVAL;
201 } 275 }
202 276
203 if (falcon->version < 3) { 277 if (falcon->version < 3) {
204 nv_wo32(falcon, 0xff8, 0x00100000); 278 nvkm_wr32(device, base + 0xff8, 0x00100000);
205 for (i = 0; i < falcon->code.size / 4; i++) 279 for (i = 0; i < falcon->code.size / 4; i++)
206 nv_wo32(falcon, 0xff4, falcon->code.data[i]); 280 nvkm_wr32(device, base + 0xff4, falcon->code.data[i]);
207 } else { 281 } else {
208 nv_wo32(falcon, 0x180, 0x01000000); 282 nvkm_wr32(device, base + 0x180, 0x01000000);
209 for (i = 0; i < falcon->code.size / 4; i++) { 283 for (i = 0; i < falcon->code.size / 4; i++) {
210 if ((i & 0x3f) == 0) 284 if ((i & 0x3f) == 0)
211 nv_wo32(falcon, 0x188, i >> 6); 285 nvkm_wr32(device, base + 0x188, i >> 6);
212 nv_wo32(falcon, 0x184, falcon->code.data[i]); 286 nvkm_wr32(device, base + 0x184, falcon->code.data[i]);
213 } 287 }
214 } 288 }
215 } 289 }
216 290
217 /* upload data segment (if necessary), zeroing the remainder */ 291 /* upload data segment (if necessary), zeroing the remainder */
218 if (falcon->version < 3) { 292 if (falcon->version < 3) {
219 nv_wo32(falcon, 0xff8, 0x00000000); 293 nvkm_wr32(device, base + 0xff8, 0x00000000);
220 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) 294 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
221 nv_wo32(falcon, 0xff4, falcon->data.data[i]); 295 nvkm_wr32(device, base + 0xff4, falcon->data.data[i]);
222 for (; i < falcon->data.limit; i += 4) 296 for (; i < falcon->data.limit; i += 4)
223 nv_wo32(falcon, 0xff4, 0x00000000); 297 nvkm_wr32(device, base + 0xff4, 0x00000000);
224 } else { 298 } else {
225 nv_wo32(falcon, 0x1c0, 0x01000000); 299 nvkm_wr32(device, base + 0x1c0, 0x01000000);
226 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) 300 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
227 nv_wo32(falcon, 0x1c4, falcon->data.data[i]); 301 nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]);
228 for (; i < falcon->data.limit / 4; i++) 302 for (; i < falcon->data.limit / 4; i++)
229 nv_wo32(falcon, 0x1c4, 0x00000000); 303 nvkm_wr32(device, base + 0x1c4, 0x00000000);
230 } 304 }
231 305
232 /* start it running */ 306 /* start it running */
233 nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */ 307 nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
234 nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */ 308 nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */
235 nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */ 309 nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */
236 nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */ 310 nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */
311
312 if (falcon->func->init)
313 falcon->func->init(falcon);
237 return 0; 314 return 0;
238} 315}
239 316
240int 317static void *
241_nvkm_falcon_fini(struct nvkm_object *object, bool suspend) 318nvkm_falcon_dtor(struct nvkm_engine *engine)
242{ 319{
243 struct nvkm_falcon *falcon = (void *)object; 320 return nvkm_falcon(engine);
244
245 if (!suspend) {
246 nvkm_gpuobj_ref(NULL, &falcon->core);
247 if (falcon->external) {
248 vfree(falcon->data.data);
249 vfree(falcon->code.data);
250 falcon->code.data = NULL;
251 }
252 }
253
254 nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
255 nv_wo32(falcon, 0x014, 0xffffffff);
256
257 return nvkm_engine_fini(&falcon->base, suspend);
258} 321}
259 322
323static const struct nvkm_engine_func
324nvkm_falcon = {
325 .dtor = nvkm_falcon_dtor,
326 .oneinit = nvkm_falcon_oneinit,
327 .init = nvkm_falcon_init,
328 .fini = nvkm_falcon_fini,
329 .intr = nvkm_falcon_intr,
330 .fifo.sclass = nvkm_falcon_oclass_get,
331 .cclass = &nvkm_falcon_cclass,
332};
333
260int 334int
261nvkm_falcon_create_(struct nvkm_object *parent, struct nvkm_object *engine, 335nvkm_falcon_new_(const struct nvkm_falcon_func *func,
262 struct nvkm_oclass *oclass, u32 addr, bool enable, 336 struct nvkm_device *device, int index, bool enable,
263 const char *iname, const char *fname, 337 u32 addr, struct nvkm_engine **pengine)
264 int length, void **pobject)
265{ 338{
266 struct nvkm_falcon *falcon; 339 struct nvkm_falcon *falcon;
267 int ret;
268
269 ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
270 fname, length, pobject);
271 falcon = *pobject;
272 if (ret)
273 return ret;
274 340
341 if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
342 return -ENOMEM;
343 falcon->func = func;
275 falcon->addr = addr; 344 falcon->addr = addr;
276 return 0; 345 falcon->code.data = func->code.data;
346 falcon->code.size = func->code.size;
347 falcon->data.data = func->data.data;
348 falcon->data.size = func->data.size;
349 *pengine = &falcon->engine;
350
351 return nvkm_engine_ctor(&nvkm_falcon, device, index, func->pmc_enable,
352 enable, &falcon->engine);
277} 353}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 42891cb71ea3..74993c144a84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -7,6 +7,24 @@ nvkm-y += nvkm/engine/fifo/nv50.o
7nvkm-y += nvkm/engine/fifo/g84.o 7nvkm-y += nvkm/engine/fifo/g84.o
8nvkm-y += nvkm/engine/fifo/gf100.o 8nvkm-y += nvkm/engine/fifo/gf100.o
9nvkm-y += nvkm/engine/fifo/gk104.o 9nvkm-y += nvkm/engine/fifo/gk104.o
10nvkm-y += nvkm/engine/fifo/gk20a.o
11nvkm-y += nvkm/engine/fifo/gk208.o 10nvkm-y += nvkm/engine/fifo/gk208.o
11nvkm-y += nvkm/engine/fifo/gk20a.o
12nvkm-y += nvkm/engine/fifo/gm204.o 12nvkm-y += nvkm/engine/fifo/gm204.o
13nvkm-y += nvkm/engine/fifo/gm20b.o
14
15nvkm-y += nvkm/engine/fifo/chan.o
16nvkm-y += nvkm/engine/fifo/channv50.o
17nvkm-y += nvkm/engine/fifo/chang84.o
18
19nvkm-y += nvkm/engine/fifo/dmanv04.o
20nvkm-y += nvkm/engine/fifo/dmanv10.o
21nvkm-y += nvkm/engine/fifo/dmanv17.o
22nvkm-y += nvkm/engine/fifo/dmanv40.o
23nvkm-y += nvkm/engine/fifo/dmanv50.o
24nvkm-y += nvkm/engine/fifo/dmag84.o
25
26nvkm-y += nvkm/engine/fifo/gpfifonv50.o
27nvkm-y += nvkm/engine/fifo/gpfifog84.o
28nvkm-y += nvkm/engine/fifo/gpfifogf100.o
29nvkm-y += nvkm/engine/fifo/gpfifogk104.o
30nvkm-y += nvkm/engine/fifo/gpfifogm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index fa223f88d25e..1fbbfbe6ca9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -21,156 +21,108 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/fifo.h> 24#include "priv.h"
25#include "chan.h"
25 26
26#include <core/client.h> 27#include <core/client.h>
27#include <core/device.h> 28#include <core/gpuobj.h>
28#include <core/handle.h>
29#include <core/notify.h> 29#include <core/notify.h>
30#include <engine/dmaobj.h>
31 30
32#include <nvif/class.h>
33#include <nvif/event.h> 31#include <nvif/event.h>
34#include <nvif/unpack.h> 32#include <nvif/unpack.h>
35 33
36static int 34void
37nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, 35nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
38 struct nvkm_notify *notify)
39{ 36{
40 if (size == 0) { 37 return fifo->func->pause(fifo, flags);
41 notify->size = 0;
42 notify->types = 1;
43 notify->index = 0;
44 return 0;
45 }
46 return -ENOSYS;
47} 38}
48 39
49static const struct nvkm_event_func 40void
50nvkm_fifo_event_func = { 41nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
51 .ctor = nvkm_fifo_event_ctor,
52};
53
54int
55nvkm_fifo_channel_create_(struct nvkm_object *parent,
56 struct nvkm_object *engine,
57 struct nvkm_oclass *oclass,
58 int bar, u32 addr, u32 size, u32 pushbuf,
59 u64 engmask, int len, void **ptr)
60{ 42{
61 struct nvkm_device *device = nv_device(engine); 43 return fifo->func->start(fifo, flags);
62 struct nvkm_fifo *priv = (void *)engine; 44}
63 struct nvkm_fifo_chan *chan;
64 struct nvkm_dmaeng *dmaeng;
65 unsigned long flags;
66 int ret;
67
68 /* create base object class */
69 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
70 engmask, len, ptr);
71 chan = *ptr;
72 if (ret)
73 return ret;
74 45
75 /* validate dma object representing push buffer */ 46void
76 chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf); 47nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
77 if (!chan->pushdma) 48 struct nvkm_fifo_chan **pchan)
78 return -ENOENT; 49{
79 50 struct nvkm_fifo_chan *chan = *pchan;
80 dmaeng = (void *)chan->pushdma->base.engine; 51 if (likely(chan)) {
81 switch (chan->pushdma->base.oclass->handle) { 52 *pchan = NULL;
82 case NV_DMA_FROM_MEMORY: 53 spin_unlock_irqrestore(&fifo->lock, flags);
83 case NV_DMA_IN_MEMORY:
84 break;
85 default:
86 return -EINVAL;
87 } 54 }
55}
88 56
89 ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu); 57struct nvkm_fifo_chan *
90 if (ret) 58nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
91 return ret; 59{
92 60 struct nvkm_fifo_chan *chan;
93 /* find a free fifo channel */ 61 unsigned long flags;
94 spin_lock_irqsave(&priv->lock, flags); 62 spin_lock_irqsave(&fifo->lock, flags);
95 for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) { 63 list_for_each_entry(chan, &fifo->chan, head) {
96 if (!priv->channel[chan->chid]) { 64 if (chan->inst->addr == inst) {
97 priv->channel[chan->chid] = nv_object(chan); 65 list_del(&chan->head);
98 break; 66 list_add(&chan->head, &fifo->chan);
67 *rflags = flags;
68 return chan;
99 } 69 }
100 } 70 }
101 spin_unlock_irqrestore(&priv->lock, flags); 71 spin_unlock_irqrestore(&fifo->lock, flags);
102 72 return NULL;
103 if (chan->chid == priv->max) {
104 nv_error(priv, "no free channels\n");
105 return -ENOSPC;
106 }
107
108 chan->addr = nv_device_resource_start(device, bar) +
109 addr + size * chan->chid;
110 chan->size = size;
111 nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
112 return 0;
113} 73}
114 74
115void 75struct nvkm_fifo_chan *
116nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan) 76nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
117{ 77{
118 struct nvkm_fifo *priv = (void *)nv_object(chan)->engine; 78 struct nvkm_fifo_chan *chan;
119 unsigned long flags; 79 unsigned long flags;
120 80 spin_lock_irqsave(&fifo->lock, flags);
121 if (chan->user) 81 list_for_each_entry(chan, &fifo->chan, head) {
122 iounmap(chan->user); 82 if (chan->chid == chid) {
123 83 list_del(&chan->head);
124 spin_lock_irqsave(&priv->lock, flags); 84 list_add(&chan->head, &fifo->chan);
125 priv->channel[chan->chid] = NULL; 85 *rflags = flags;
126 spin_unlock_irqrestore(&priv->lock, flags); 86 return chan;
127 87 }
128 nvkm_gpuobj_ref(NULL, &chan->pushgpu); 88 }
129 nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma); 89 spin_unlock_irqrestore(&fifo->lock, flags);
130 nvkm_namedb_destroy(&chan->namedb); 90 return NULL;
131} 91}
132 92
133void 93static int
134_nvkm_fifo_channel_dtor(struct nvkm_object *object) 94nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
95 struct nvkm_notify *notify)
135{ 96{
136 struct nvkm_fifo_chan *chan = (void *)object; 97 if (size == 0) {
137 nvkm_fifo_channel_destroy(chan); 98 notify->size = 0;
99 notify->types = 1;
100 notify->index = 0;
101 return 0;
102 }
103 return -ENOSYS;
138} 104}
139 105
140int 106static const struct nvkm_event_func
141_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size) 107nvkm_fifo_event_func = {
142{ 108 .ctor = nvkm_fifo_event_ctor,
143 struct nvkm_fifo_chan *chan = (void *)object; 109};
144 *addr = chan->addr;
145 *size = chan->size;
146 return 0;
147}
148 110
149u32 111static void
150_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr) 112nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
151{ 113{
152 struct nvkm_fifo_chan *chan = (void *)object; 114 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
153 if (unlikely(!chan->user)) { 115 fifo->func->uevent_fini(fifo);
154 chan->user = ioremap(chan->addr, chan->size);
155 if (WARN_ON_ONCE(chan->user == NULL))
156 return 0;
157 }
158 return ioread32_native(chan->user + addr);
159} 116}
160 117
161void 118static void
162_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data) 119nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
163{ 120{
164 struct nvkm_fifo_chan *chan = (void *)object; 121 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
165 if (unlikely(!chan->user)) { 122 fifo->func->uevent_init(fifo);
166 chan->user = ioremap(chan->addr, chan->size);
167 if (WARN_ON_ONCE(chan->user == NULL))
168 return;
169 }
170 iowrite32_native(data, chan->user + addr);
171} 123}
172 124
173int 125static int
174nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size, 126nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
175 struct nvkm_notify *notify) 127 struct nvkm_notify *notify)
176{ 128{
@@ -188,6 +140,13 @@ nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
188 return ret; 140 return ret;
189} 141}
190 142
143static const struct nvkm_event_func
144nvkm_fifo_uevent_func = {
145 .ctor = nvkm_fifo_uevent_ctor,
146 .init = nvkm_fifo_uevent_init,
147 .fini = nvkm_fifo_uevent_fini,
148};
149
191void 150void
192nvkm_fifo_uevent(struct nvkm_fifo *fifo) 151nvkm_fifo_uevent(struct nvkm_fifo *fifo)
193{ 152{
@@ -196,87 +155,123 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo)
196 nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); 155 nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
197} 156}
198 157
199int 158static int
200_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type, 159nvkm_fifo_class_new(struct nvkm_device *device,
201 struct nvkm_event **event) 160 const struct nvkm_oclass *oclass, void *data, u32 size,
161 struct nvkm_object **pobject)
202{ 162{
203 struct nvkm_fifo *fifo = (void *)object->engine; 163 const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
204 switch (type) { 164 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
205 case G82_CHANNEL_DMA_V0_NTFY_UEVENT: 165 return sclass->ctor(fifo, oclass, data, size, pobject);
206 if (nv_mclass(object) >= G82_CHANNEL_DMA) {
207 *event = &fifo->uevent;
208 return 0;
209 }
210 break;
211 default:
212 break;
213 }
214 return -EINVAL;
215} 166}
216 167
168static const struct nvkm_device_oclass
169nvkm_fifo_class = {
170 .ctor = nvkm_fifo_class_new,
171};
172
217static int 173static int
218nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object) 174nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
175 const struct nvkm_device_oclass **class)
219{ 176{
220 int engidx = nv_hclass(priv) & 0xff; 177 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
221 178 const struct nvkm_fifo_chan_oclass *sclass;
222 while (object && object->parent) { 179 int c = 0;
223 if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) && 180
224 (nv_hclass(object->parent) & 0xff) == engidx) 181 while ((sclass = fifo->func->chan[c])) {
225 return nvkm_fifo_chan(object)->chid; 182 if (c++ == index) {
226 object = object->parent; 183 oclass->base = sclass->base;
184 oclass->engn = sclass;
185 *class = &nvkm_fifo_class;
186 return 0;
187 }
227 } 188 }
228 189
229 return -1; 190 return c;
230} 191}
231 192
232const char * 193static void
233nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid) 194nvkm_fifo_intr(struct nvkm_engine *engine)
234{ 195{
235 struct nvkm_fifo_chan *chan = NULL; 196 struct nvkm_fifo *fifo = nvkm_fifo(engine);
236 unsigned long flags; 197 fifo->func->intr(fifo);
198}
237 199
238 spin_lock_irqsave(&fifo->lock, flags); 200static int
239 if (chid >= fifo->min && chid <= fifo->max) 201nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
240 chan = (void *)fifo->channel[chid]; 202{
241 spin_unlock_irqrestore(&fifo->lock, flags); 203 struct nvkm_fifo *fifo = nvkm_fifo(engine);
204 if (fifo->func->fini)
205 fifo->func->fini(fifo);
206 return 0;
207}
242 208
243 return nvkm_client_name(chan); 209static int
210nvkm_fifo_oneinit(struct nvkm_engine *engine)
211{
212 struct nvkm_fifo *fifo = nvkm_fifo(engine);
213 if (fifo->func->oneinit)
214 return fifo->func->oneinit(fifo);
215 return 0;
244} 216}
245 217
246void 218static int
247nvkm_fifo_destroy(struct nvkm_fifo *priv) 219nvkm_fifo_init(struct nvkm_engine *engine)
248{ 220{
249 kfree(priv->channel); 221 struct nvkm_fifo *fifo = nvkm_fifo(engine);
250 nvkm_event_fini(&priv->uevent); 222 fifo->func->init(fifo);
251 nvkm_event_fini(&priv->cevent); 223 return 0;
252 nvkm_engine_destroy(&priv->base);
253} 224}
254 225
226static void *
227nvkm_fifo_dtor(struct nvkm_engine *engine)
228{
229 struct nvkm_fifo *fifo = nvkm_fifo(engine);
230 void *data = fifo;
231 if (fifo->func->dtor)
232 data = fifo->func->dtor(fifo);
233 nvkm_event_fini(&fifo->cevent);
234 nvkm_event_fini(&fifo->uevent);
235 return data;
236}
237
238static const struct nvkm_engine_func
239nvkm_fifo = {
240 .dtor = nvkm_fifo_dtor,
241 .oneinit = nvkm_fifo_oneinit,
242 .init = nvkm_fifo_init,
243 .fini = nvkm_fifo_fini,
244 .intr = nvkm_fifo_intr,
245 .base.sclass = nvkm_fifo_class_get,
246};
247
255int 248int
256nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, 249nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
257 struct nvkm_oclass *oclass, 250 int index, int nr, struct nvkm_fifo *fifo)
258 int min, int max, int length, void **pobject)
259{ 251{
260 struct nvkm_fifo *priv;
261 int ret; 252 int ret;
262 253
263 ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO", 254 fifo->func = func;
264 "fifo", length, pobject); 255 INIT_LIST_HEAD(&fifo->chan);
265 priv = *pobject; 256 spin_lock_init(&fifo->lock);
266 if (ret)
267 return ret;
268 257
269 priv->min = min; 258 if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
270 priv->max = max; 259 fifo->nr = NVKM_FIFO_CHID_NR;
271 priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL); 260 else
272 if (!priv->channel) 261 fifo->nr = nr;
273 return -ENOMEM; 262 bitmap_clear(fifo->mask, 0, fifo->nr);
274 263
275 ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent); 264 ret = nvkm_engine_ctor(&nvkm_fifo, device, index, 0x00000100,
265 true, &fifo->engine);
276 if (ret) 266 if (ret)
277 return ret; 267 return ret;
278 268
279 priv->chid = nvkm_fifo_chid; 269 if (func->uevent_init) {
280 spin_lock_init(&priv->lock); 270 ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
281 return 0; 271 &fifo->uevent);
272 if (ret)
273 return ret;
274 }
275
276 return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
282} 277}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
new file mode 100644
index 000000000000..dc6d4678f228
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -0,0 +1,415 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "chan.h"
25
26#include <core/client.h>
27#include <core/gpuobj.h>
28#include <core/oproxy.h>
29#include <subdev/mmu.h>
30#include <engine/dma.h>
31
32struct nvkm_fifo_chan_object {
33 struct nvkm_oproxy oproxy;
34 struct nvkm_fifo_chan *chan;
35 int hash;
36};
37
38static int
39nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
40{
41 struct nvkm_fifo_chan_object *object =
42 container_of(base, typeof(*object), oproxy);
43 struct nvkm_engine *engine = object->oproxy.object->engine;
44 struct nvkm_fifo_chan *chan = object->chan;
45 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
46 const char *name = nvkm_subdev_name[engine->subdev.index];
47 int ret = 0;
48
49 if (--engn->usecount)
50 return 0;
51
52 if (chan->func->engine_fini) {
53 ret = chan->func->engine_fini(chan, engine, suspend);
54 if (ret) {
55 nvif_error(&chan->object,
56 "detach %s failed, %d\n", name, ret);
57 return ret;
58 }
59 }
60
61 if (engn->object) {
62 ret = nvkm_object_fini(engn->object, suspend);
63 if (ret && suspend)
64 return ret;
65 }
66
67 nvif_trace(&chan->object, "detached %s\n", name);
68 return ret;
69}
70
71static int
72nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
73{
74 struct nvkm_fifo_chan_object *object =
75 container_of(base, typeof(*object), oproxy);
76 struct nvkm_engine *engine = object->oproxy.object->engine;
77 struct nvkm_fifo_chan *chan = object->chan;
78 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
79 const char *name = nvkm_subdev_name[engine->subdev.index];
80 int ret;
81
82 if (engn->usecount++)
83 return 0;
84
85 if (engn->object) {
86 ret = nvkm_object_init(engn->object);
87 if (ret)
88 return ret;
89 }
90
91 if (chan->func->engine_init) {
92 ret = chan->func->engine_init(chan, engine);
93 if (ret) {
94 nvif_error(&chan->object,
95 "attach %s failed, %d\n", name, ret);
96 return ret;
97 }
98 }
99
100 nvif_trace(&chan->object, "attached %s\n", name);
101 return 0;
102}
103
104static void
105nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
106{
107 struct nvkm_fifo_chan_object *object =
108 container_of(base, typeof(*object), oproxy);
109 struct nvkm_engine *engine = object->oproxy.base.engine;
110 struct nvkm_fifo_chan *chan = object->chan;
111 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
112
113 if (chan->func->object_dtor)
114 chan->func->object_dtor(chan, object->hash);
115
116 if (!--engn->refcount) {
117 if (chan->func->engine_dtor)
118 chan->func->engine_dtor(chan, engine);
119 nvkm_object_del(&engn->object);
120 if (chan->vm)
121 atomic_dec(&chan->vm->engref[engine->subdev.index]);
122 }
123}
124
125static const struct nvkm_oproxy_func
126nvkm_fifo_chan_child_func = {
127 .dtor[0] = nvkm_fifo_chan_child_del,
128 .init[0] = nvkm_fifo_chan_child_init,
129 .fini[0] = nvkm_fifo_chan_child_fini,
130};
131
132static int
133nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
134 struct nvkm_object **pobject)
135{
136 struct nvkm_engine *engine = oclass->engine;
137 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
138 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
139 struct nvkm_fifo_chan_object *object;
140 int ret = 0;
141
142 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
143 return -ENOMEM;
144 nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
145 object->chan = chan;
146 *pobject = &object->oproxy.base;
147
148 if (!engn->refcount++) {
149 struct nvkm_oclass cclass = {
150 .client = oclass->client,
151 .engine = oclass->engine,
152 };
153
154 if (chan->vm)
155 atomic_inc(&chan->vm->engref[engine->subdev.index]);
156
157 if (engine->func->fifo.cclass) {
158 ret = engine->func->fifo.cclass(chan, &cclass,
159 &engn->object);
160 } else
161 if (engine->func->cclass) {
162 ret = nvkm_object_new_(engine->func->cclass, &cclass,
163 NULL, 0, &engn->object);
164 }
165 if (ret)
166 return ret;
167
168 if (chan->func->engine_ctor) {
169 ret = chan->func->engine_ctor(chan, oclass->engine,
170 engn->object);
171 if (ret)
172 return ret;
173 }
174 }
175
176 ret = oclass->base.ctor(&(const struct nvkm_oclass) {
177 .base = oclass->base,
178 .engn = oclass->engn,
179 .handle = oclass->handle,
180 .object = oclass->object,
181 .client = oclass->client,
182 .parent = engn->object ?
183 engn->object :
184 oclass->parent,
185 .engine = engine,
186 }, data, size, &object->oproxy.object);
187 if (ret)
188 return ret;
189
190 if (chan->func->object_ctor) {
191 object->hash =
192 chan->func->object_ctor(chan, object->oproxy.object);
193 if (object->hash < 0)
194 return object->hash;
195 }
196
197 return 0;
198}
199
200static int
201nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
202 struct nvkm_oclass *oclass)
203{
204 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
205 struct nvkm_fifo *fifo = chan->fifo;
206 struct nvkm_device *device = fifo->engine.subdev.device;
207 struct nvkm_engine *engine;
208 u64 mask = chan->engines;
209 int ret, i, c;
210
211 for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
212 if (!(engine = nvkm_device_engine(device, i)))
213 continue;
214 oclass->engine = engine;
215 oclass->base.oclass = 0;
216
217 if (engine->func->fifo.sclass) {
218 ret = engine->func->fifo.sclass(oclass, index);
219 if (oclass->base.oclass) {
220 if (!oclass->base.ctor)
221 oclass->base.ctor = nvkm_object_new;
222 oclass->ctor = nvkm_fifo_chan_child_new;
223 return 0;
224 }
225
226 index -= ret;
227 continue;
228 }
229
230 while (engine->func->sclass[c].oclass) {
231 if (c++ == index) {
232 oclass->base = engine->func->sclass[index];
233 if (!oclass->base.ctor)
234 oclass->base.ctor = nvkm_object_new;
235 oclass->ctor = nvkm_fifo_chan_child_new;
236 return 0;
237 }
238 }
239 index -= c;
240 }
241
242 return -EINVAL;
243}
244
245static int
246nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
247 struct nvkm_event **pevent)
248{
249 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
250 if (chan->func->ntfy)
251 return chan->func->ntfy(chan, type, pevent);
252 return -ENODEV;
253}
254
255static int
256nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
257{
258 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
259 *addr = chan->addr;
260 *size = chan->size;
261 return 0;
262}
263
264static int
265nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
266{
267 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
268 if (unlikely(!chan->user)) {
269 chan->user = ioremap(chan->addr, chan->size);
270 if (!chan->user)
271 return -ENOMEM;
272 }
273 if (unlikely(addr + 4 > chan->size))
274 return -EINVAL;
275 *data = ioread32_native(chan->user + addr);
276 return 0;
277}
278
279static int
280nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
281{
282 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
283 if (unlikely(!chan->user)) {
284 chan->user = ioremap(chan->addr, chan->size);
285 if (!chan->user)
286 return -ENOMEM;
287 }
288 if (unlikely(addr + 4 > chan->size))
289 return -EINVAL;
290 iowrite32_native(data, chan->user + addr);
291 return 0;
292}
293
294static int
295nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
296{
297 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
298 chan->func->fini(chan);
299 return 0;
300}
301
302static int
303nvkm_fifo_chan_init(struct nvkm_object *object)
304{
305 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
306 chan->func->init(chan);
307 return 0;
308}
309
310static void *
311nvkm_fifo_chan_dtor(struct nvkm_object *object)
312{
313 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
314 struct nvkm_fifo *fifo = chan->fifo;
315 void *data = chan->func->dtor(chan);
316 unsigned long flags;
317
318 spin_lock_irqsave(&fifo->lock, flags);
319 if (!list_empty(&chan->head)) {
320 __clear_bit(chan->chid, fifo->mask);
321 list_del(&chan->head);
322 }
323 spin_unlock_irqrestore(&fifo->lock, flags);
324
325 if (chan->user)
326 iounmap(chan->user);
327
328 nvkm_vm_ref(NULL, &chan->vm, NULL);
329
330 nvkm_gpuobj_del(&chan->push);
331 nvkm_gpuobj_del(&chan->inst);
332 return data;
333}
334
335static const struct nvkm_object_func
336nvkm_fifo_chan_func = {
337 .dtor = nvkm_fifo_chan_dtor,
338 .init = nvkm_fifo_chan_init,
339 .fini = nvkm_fifo_chan_fini,
340 .ntfy = nvkm_fifo_chan_ntfy,
341 .map = nvkm_fifo_chan_map,
342 .rd32 = nvkm_fifo_chan_rd32,
343 .wr32 = nvkm_fifo_chan_wr32,
344 .sclass = nvkm_fifo_chan_child_get,
345};
346
347int
348nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
349 struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
350 u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
351 const struct nvkm_oclass *oclass,
352 struct nvkm_fifo_chan *chan)
353{
354 struct nvkm_client *client = oclass->client;
355 struct nvkm_device *device = fifo->engine.subdev.device;
356 struct nvkm_mmu *mmu = device->mmu;
357 struct nvkm_dmaobj *dmaobj;
358 unsigned long flags;
359 int ret;
360
361 nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
362 chan->func = func;
363 chan->fifo = fifo;
364 chan->engines = engines;
365 INIT_LIST_HEAD(&chan->head);
366
367 /* instance memory */
368 ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
369 if (ret)
370 return ret;
371
372 /* allocate push buffer ctxdma instance */
373 if (push) {
374 dmaobj = nvkm_dma_search(device->dma, oclass->client, push);
375 if (!dmaobj)
376 return -ENOENT;
377
378 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
379 &chan->push);
380 if (ret)
381 return ret;
382 }
383
384 /* channel address space */
385 if (!vm && mmu) {
386 if (!client->vm || client->vm->mmu == mmu) {
387 ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
388 if (ret)
389 return ret;
390 } else {
391 return -EINVAL;
392 }
393 } else {
394 return -ENOENT;
395 }
396
397 /* allocate channel id */
398 spin_lock_irqsave(&fifo->lock, flags);
399 chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
400 if (chan->chid >= NVKM_FIFO_CHID_NR) {
401 spin_unlock_irqrestore(&fifo->lock, flags);
402 return -ENOSPC;
403 }
404 list_add(&chan->head, &fifo->chan);
405 __set_bit(chan->chid, fifo->mask);
406 spin_unlock_irqrestore(&fifo->lock, flags);
407
408 /* determine address of this channel's user registers */
409 chan->addr = device->func->resource_addr(device, bar) +
410 base + user * chan->chid;
411 chan->size = user;
412
413 nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
414 return 0;
415}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
new file mode 100644
index 000000000000..55dc415c5c08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -0,0 +1,33 @@
1#ifndef __NVKM_FIFO_CHAN_H__
2#define __NVKM_FIFO_CHAN_H__
3#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
4#include "priv.h"
5
6struct nvkm_fifo_chan_func {
7 void *(*dtor)(struct nvkm_fifo_chan *);
8 void (*init)(struct nvkm_fifo_chan *);
9 void (*fini)(struct nvkm_fifo_chan *);
10 int (*ntfy)(struct nvkm_fifo_chan *, u32 type, struct nvkm_event **);
11 int (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
12 struct nvkm_object *);
13 void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
14 int (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *);
15 int (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *,
16 bool suspend);
17 int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
18 void (*object_dtor)(struct nvkm_fifo_chan *, int);
19};
20
21int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
22 u32 size, u32 align, bool zero, u64 vm, u64 push,
23 u64 engines, int bar, u32 base, u32 user,
24 const struct nvkm_oclass *, struct nvkm_fifo_chan *);
25
26struct nvkm_fifo_chan_oclass {
27 int (*ctor)(struct nvkm_fifo *, const struct nvkm_oclass *,
28 void *data, u32 size, struct nvkm_object **);
29 struct nvkm_sclass base;
30};
31
32int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
33#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
new file mode 100644
index 000000000000..04305241ceed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -0,0 +1,285 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25
26#include <core/client.h>
27#include <core/ramht.h>
28#include <subdev/mmu.h>
29#include <subdev/timer.h>
30
31#include <nvif/class.h>
32
33int
34g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
35 struct nvkm_event **pevent)
36{
37 switch (type) {
38 case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
39 *pevent = &chan->fifo->uevent;
40 return 0;
41 default:
42 break;
43 }
44 return -EINVAL;
45}
46
47static int
48g84_fifo_chan_engine(struct nvkm_engine *engine)
49{
50 switch (engine->subdev.index) {
51 case NVKM_ENGINE_GR : return 0;
52 case NVKM_ENGINE_MPEG :
53 case NVKM_ENGINE_MSPPP : return 1;
54 case NVKM_ENGINE_CE0 : return 2;
55 case NVKM_ENGINE_VP :
56 case NVKM_ENGINE_MSPDEC: return 3;
57 case NVKM_ENGINE_CIPHER:
58 case NVKM_ENGINE_SEC : return 4;
59 case NVKM_ENGINE_BSP :
60 case NVKM_ENGINE_MSVLD : return 5;
61 default:
62 WARN_ON(1);
63 return 0;
64 }
65}
66
67static int
68g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
69{
70 switch (engine->subdev.index) {
71 case NVKM_ENGINE_DMAOBJ:
72 case NVKM_ENGINE_SW : return -1;
73 case NVKM_ENGINE_GR : return 0x0020;
74 case NVKM_ENGINE_VP :
75 case NVKM_ENGINE_MSPDEC: return 0x0040;
76 case NVKM_ENGINE_MPEG :
77 case NVKM_ENGINE_MSPPP : return 0x0060;
78 case NVKM_ENGINE_BSP :
79 case NVKM_ENGINE_MSVLD : return 0x0080;
80 case NVKM_ENGINE_CIPHER:
81 case NVKM_ENGINE_SEC : return 0x00a0;
82 case NVKM_ENGINE_CE0 : return 0x00c0;
83 default:
84 WARN_ON(1);
85 return -1;
86 }
87}
88
89static int
90g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
91 struct nvkm_engine *engine, bool suspend)
92{
93 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
94 struct nv50_fifo *fifo = chan->fifo;
95 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
96 struct nvkm_device *device = subdev->device;
97 u32 engn, save;
98 int offset;
99 bool done;
100
101 offset = g84_fifo_chan_engine_addr(engine);
102 if (offset < 0)
103 return 0;
104
105 engn = g84_fifo_chan_engine(engine);
106 save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
107 nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
108 done = nvkm_msec(device, 2000,
109 if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
110 break;
111 ) >= 0;
112 nvkm_wr32(device, 0x002520, save);
113 if (!done) {
114 nvkm_error(subdev, "channel %d [%s] unload timeout\n",
115 chan->base.chid, chan->base.object.client->name);
116 if (suspend)
117 return -EBUSY;
118 }
119
120 nvkm_kmap(chan->eng);
121 nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
122 nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
123 nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
124 nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
125 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
126 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
127 nvkm_done(chan->eng);
128 return 0;
129}
130
131
132int
133g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
134 struct nvkm_engine *engine)
135{
136 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
137 struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
138 u64 limit, start;
139 int offset;
140
141 offset = g84_fifo_chan_engine_addr(engine);
142 if (offset < 0)
143 return 0;
144 limit = engn->addr + engn->size - 1;
145 start = engn->addr;
146
147 nvkm_kmap(chan->eng);
148 nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
149 nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
150 nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
151 nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
152 upper_32_bits(start));
153 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
154 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
155 nvkm_done(chan->eng);
156 return 0;
157}
158
159static int
160g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
161 struct nvkm_engine *engine,
162 struct nvkm_object *object)
163{
164 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
165 int engn = engine->subdev.index;
166
167 if (g84_fifo_chan_engine_addr(engine) < 0)
168 return 0;
169
170 return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
171}
172
173int
174g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
175 struct nvkm_object *object)
176{
177 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
178 u32 handle = object->handle;
179 u32 context;
180
181 switch (object->engine->subdev.index) {
182 case NVKM_ENGINE_DMAOBJ:
183 case NVKM_ENGINE_SW : context = 0x00000000; break;
184 case NVKM_ENGINE_GR : context = 0x00100000; break;
185 case NVKM_ENGINE_MPEG :
186 case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
187 case NVKM_ENGINE_ME :
188 case NVKM_ENGINE_CE0 : context = 0x00300000; break;
189 case NVKM_ENGINE_VP :
190 case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
191 case NVKM_ENGINE_CIPHER:
192 case NVKM_ENGINE_SEC :
193 case NVKM_ENGINE_VIC : context = 0x00500000; break;
194 case NVKM_ENGINE_BSP :
195 case NVKM_ENGINE_MSVLD : context = 0x00600000; break;
196 default:
197 WARN_ON(1);
198 return -EINVAL;
199 }
200
201 return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
202}
203
204static void
205g84_fifo_chan_init(struct nvkm_fifo_chan *base)
206{
207 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
208 struct nv50_fifo *fifo = chan->fifo;
209 struct nvkm_device *device = fifo->base.engine.subdev.device;
210 u64 addr = chan->ramfc->addr >> 8;
211 u32 chid = chan->base.chid;
212
213 nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
214 nv50_fifo_runlist_update(fifo);
215}
216
217static const struct nvkm_fifo_chan_func
218g84_fifo_chan_func = {
219 .dtor = nv50_fifo_chan_dtor,
220 .init = g84_fifo_chan_init,
221 .fini = nv50_fifo_chan_fini,
222 .ntfy = g84_fifo_chan_ntfy,
223 .engine_ctor = g84_fifo_chan_engine_ctor,
224 .engine_dtor = nv50_fifo_chan_engine_dtor,
225 .engine_init = g84_fifo_chan_engine_init,
226 .engine_fini = g84_fifo_chan_engine_fini,
227 .object_ctor = g84_fifo_chan_object_ctor,
228 .object_dtor = nv50_fifo_chan_object_dtor,
229};
230
231int
232g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
233 const struct nvkm_oclass *oclass,
234 struct nv50_fifo_chan *chan)
235{
236 struct nvkm_device *device = fifo->base.engine.subdev.device;
237 int ret;
238
239 ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
240 0x10000, 0x1000, false, vm, push,
241 (1ULL << NVKM_ENGINE_BSP) |
242 (1ULL << NVKM_ENGINE_CE0) |
243 (1ULL << NVKM_ENGINE_CIPHER) |
244 (1ULL << NVKM_ENGINE_DMAOBJ) |
245 (1ULL << NVKM_ENGINE_GR) |
246 (1ULL << NVKM_ENGINE_ME) |
247 (1ULL << NVKM_ENGINE_MPEG) |
248 (1ULL << NVKM_ENGINE_MSPDEC) |
249 (1ULL << NVKM_ENGINE_MSPPP) |
250 (1ULL << NVKM_ENGINE_MSVLD) |
251 (1ULL << NVKM_ENGINE_SEC) |
252 (1ULL << NVKM_ENGINE_SW) |
253 (1ULL << NVKM_ENGINE_VIC) |
254 (1ULL << NVKM_ENGINE_VP),
255 0, 0xc00000, 0x2000, oclass, &chan->base);
256 chan->fifo = fifo;
257 if (ret)
258 return ret;
259
260 ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->base.inst,
261 &chan->eng);
262 if (ret)
263 return ret;
264
265 ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
266 &chan->pgd);
267 if (ret)
268 return ret;
269
270 ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->base.inst,
271 &chan->cache);
272 if (ret)
273 return ret;
274
275 ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->base.inst,
276 &chan->ramfc);
277 if (ret)
278 return ret;
279
280 ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
281 if (ret)
282 return ret;
283
284 return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
285}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
new file mode 100644
index 000000000000..7d697e2dce1a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -0,0 +1,24 @@
1#ifndef __GF100_FIFO_CHAN_H__
2#define __GF100_FIFO_CHAN_H__
3#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
4#include "chan.h"
5#include "gf100.h"
6
7struct gf100_fifo_chan {
8 struct nvkm_fifo_chan base;
9 struct gf100_fifo *fifo;
10
11 struct list_head head;
12 bool killed;
13
14 struct nvkm_gpuobj *pgd;
15 struct nvkm_vm *vm;
16
17 struct {
18 struct nvkm_gpuobj *inst;
19 struct nvkm_vma vma;
20 } engn[NVKM_SUBDEV_NR];
21};
22
23extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
24#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
new file mode 100644
index 000000000000..97bdddb7644a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -0,0 +1,29 @@
1#ifndef __GK104_FIFO_CHAN_H__
2#define __GK104_FIFO_CHAN_H__
3#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
4#include "chan.h"
5#include "gk104.h"
6
7struct gk104_fifo_chan {
8 struct nvkm_fifo_chan base;
9 struct gk104_fifo *fifo;
10 int engine;
11
12 struct list_head head;
13 bool killed;
14
15 struct nvkm_gpuobj *pgd;
16 struct nvkm_vm *vm;
17
18 struct {
19 struct nvkm_gpuobj *inst;
20 struct nvkm_vma vma;
21 } engn[NVKM_SUBDEV_NR];
22};
23
24int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
25 void *data, u32 size, struct nvkm_object **);
26
27extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
28extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass;
29#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
new file mode 100644
index 000000000000..3361a1fd0343
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -0,0 +1,24 @@
1#ifndef __NV04_FIFO_CHAN_H__
2#define __NV04_FIFO_CHAN_H__
3#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
4#include "chan.h"
5#include "nv04.h"
6
7struct nv04_fifo_chan {
8 struct nvkm_fifo_chan base;
9 struct nv04_fifo *fifo;
10 u32 ramfc;
11 struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
12};
13
14extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
15void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *);
16void nv04_fifo_dma_init(struct nvkm_fifo_chan *);
17void nv04_fifo_dma_fini(struct nvkm_fifo_chan *);
18void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int);
19
20extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass;
21extern const struct nvkm_fifo_chan_oclass nv10_fifo_dma_oclass;
22extern const struct nvkm_fifo_chan_oclass nv17_fifo_dma_oclass;
23extern const struct nvkm_fifo_chan_oclass nv40_fifo_dma_oclass;
24#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
new file mode 100644
index 000000000000..25b60aff40e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -0,0 +1,270 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25
26#include <core/client.h>
27#include <core/ramht.h>
28#include <subdev/mmu.h>
29#include <subdev/timer.h>
30
31static int
32nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
33{
34 switch (engine->subdev.index) {
35 case NVKM_ENGINE_DMAOBJ:
36 case NVKM_ENGINE_SW : return -1;
37 case NVKM_ENGINE_GR : return 0x0000;
38 case NVKM_ENGINE_MPEG : return 0x0060;
39 default:
40 WARN_ON(1);
41 return -1;
42 }
43}
44
45static int
46nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
47 struct nvkm_engine *engine, bool suspend)
48{
49 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
50 struct nv50_fifo *fifo = chan->fifo;
51 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
52 struct nvkm_device *device = subdev->device;
53 int offset, ret = 0;
54 u32 me;
55
56 offset = nv50_fifo_chan_engine_addr(engine);
57 if (offset < 0)
58 return 0;
59
60 /* HW bug workaround:
61 *
62 * PFIFO will hang forever if the connected engines don't report
63 * that they've processed the context switch request.
64 *
65 * In order for the kickoff to work, we need to ensure all the
66 * connected engines are in a state where they can answer.
67 *
68 * Newer chipsets don't seem to suffer from this issue, and well,
69 * there's also a "ignore these engines" bitmask reg we can use
70 * if we hit the issue there..
71 */
72 me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
73
74 /* do the kickoff... */
75 nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
76 if (nvkm_msec(device, 2000,
77 if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
78 break;
79 ) < 0) {
80 nvkm_error(subdev, "channel %d [%s] unload timeout\n",
81 chan->base.chid, chan->base.object.client->name);
82 if (suspend)
83 ret = -EBUSY;
84 }
85 nvkm_wr32(device, 0x00b860, me);
86
87 if (ret == 0) {
88 nvkm_kmap(chan->eng);
89 nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
90 nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
91 nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
92 nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
93 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
94 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
95 nvkm_done(chan->eng);
96 }
97
98 return ret;
99}
100
101static int
102nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
103 struct nvkm_engine *engine)
104{
105 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
106 struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
107 u64 limit, start;
108 int offset;
109
110 offset = nv50_fifo_chan_engine_addr(engine);
111 if (offset < 0)
112 return 0;
113 limit = engn->addr + engn->size - 1;
114 start = engn->addr;
115
116 nvkm_kmap(chan->eng);
117 nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
118 nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
119 nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
120 nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
121 upper_32_bits(start));
122 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
123 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
124 nvkm_done(chan->eng);
125 return 0;
126}
127
128void
129nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
130 struct nvkm_engine *engine)
131{
132 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
133 nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
134}
135
136static int
137nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
138 struct nvkm_engine *engine,
139 struct nvkm_object *object)
140{
141 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
142 int engn = engine->subdev.index;
143
144 if (nv50_fifo_chan_engine_addr(engine) < 0)
145 return 0;
146
147 return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
148}
149
150void
151nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie)
152{
153 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
154 nvkm_ramht_remove(chan->ramht, cookie);
155}
156
157static int
158nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
159 struct nvkm_object *object)
160{
161 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
162 u32 handle = object->handle;
163 u32 context;
164
165 switch (object->engine->subdev.index) {
166 case NVKM_ENGINE_DMAOBJ:
167 case NVKM_ENGINE_SW : context = 0x00000000; break;
168 case NVKM_ENGINE_GR : context = 0x00100000; break;
169 case NVKM_ENGINE_MPEG : context = 0x00200000; break;
170 default:
171 WARN_ON(1);
172 return -EINVAL;
173 }
174
175 return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
176}
177
178void
179nv50_fifo_chan_fini(struct nvkm_fifo_chan *base)
180{
181 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
182 struct nv50_fifo *fifo = chan->fifo;
183 struct nvkm_device *device = fifo->base.engine.subdev.device;
184 u32 chid = chan->base.chid;
185
186 /* remove channel from runlist, fifo will unload context */
187 nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
188 nv50_fifo_runlist_update(fifo);
189 nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
190}
191
192static void
193nv50_fifo_chan_init(struct nvkm_fifo_chan *base)
194{
195 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
196 struct nv50_fifo *fifo = chan->fifo;
197 struct nvkm_device *device = fifo->base.engine.subdev.device;
198 u64 addr = chan->ramfc->addr >> 12;
199 u32 chid = chan->base.chid;
200
201 nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
202 nv50_fifo_runlist_update(fifo);
203}
204
205void *
206nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
207{
208 struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
209 nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
210 nvkm_ramht_del(&chan->ramht);
211 nvkm_gpuobj_del(&chan->pgd);
212 nvkm_gpuobj_del(&chan->eng);
213 nvkm_gpuobj_del(&chan->cache);
214 nvkm_gpuobj_del(&chan->ramfc);
215 return chan;
216}
217
218static const struct nvkm_fifo_chan_func
219nv50_fifo_chan_func = {
220 .dtor = nv50_fifo_chan_dtor,
221 .init = nv50_fifo_chan_init,
222 .fini = nv50_fifo_chan_fini,
223 .engine_ctor = nv50_fifo_chan_engine_ctor,
224 .engine_dtor = nv50_fifo_chan_engine_dtor,
225 .engine_init = nv50_fifo_chan_engine_init,
226 .engine_fini = nv50_fifo_chan_engine_fini,
227 .object_ctor = nv50_fifo_chan_object_ctor,
228 .object_dtor = nv50_fifo_chan_object_dtor,
229};
230
231int
232nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
233 const struct nvkm_oclass *oclass,
234 struct nv50_fifo_chan *chan)
235{
236 struct nvkm_device *device = fifo->base.engine.subdev.device;
237 int ret;
238
239 ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
240 0x10000, 0x1000, false, vm, push,
241 (1ULL << NVKM_ENGINE_DMAOBJ) |
242 (1ULL << NVKM_ENGINE_SW) |
243 (1ULL << NVKM_ENGINE_GR) |
244 (1ULL << NVKM_ENGINE_MPEG),
245 0, 0xc00000, 0x2000, oclass, &chan->base);
246 chan->fifo = fifo;
247 if (ret)
248 return ret;
249
250 ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst,
251 &chan->ramfc);
252 if (ret)
253 return ret;
254
255 ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst,
256 &chan->eng);
257 if (ret)
258 return ret;
259
260 ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
261 &chan->pgd);
262 if (ret)
263 return ret;
264
265 ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
266 if (ret)
267 return ret;
268
269 return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
270}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
new file mode 100644
index 000000000000..4b9da469b704
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -0,0 +1,35 @@
1#ifndef __NV50_FIFO_CHAN_H__
2#define __NV50_FIFO_CHAN_H__
3#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
4#include "chan.h"
5#include "nv50.h"
6
7struct nv50_fifo_chan {
8 struct nv50_fifo *fifo;
9 struct nvkm_fifo_chan base;
10
11 struct nvkm_gpuobj *ramfc;
12 struct nvkm_gpuobj *cache;
13 struct nvkm_gpuobj *eng;
14 struct nvkm_gpuobj *pgd;
15 struct nvkm_ramht *ramht;
16 struct nvkm_vm *vm;
17
18 struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
19};
20
21int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
22 const struct nvkm_oclass *, struct nv50_fifo_chan *);
23void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
24void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
25void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
26void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
27
28int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
29 const struct nvkm_oclass *, struct nv50_fifo_chan *);
30
31extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
32extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
33extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
34extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
35#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
new file mode 100644
index 000000000000..a5ca52c7b74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25
26#include <core/client.h>
27#include <core/ramht.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32static int
33g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
34 void *data, u32 size, struct nvkm_object **pobject)
35{
36 struct nvkm_object *parent = oclass->parent;
37 union {
38 struct nv50_channel_dma_v0 v0;
39 } *args = data;
40 struct nv50_fifo *fifo = nv50_fifo(base);
41 struct nv50_fifo_chan *chan;
42 int ret;
43
44 nvif_ioctl(parent, "create channel dma size %d\n", size);
45 if (nvif_unpack(args->v0, 0, 0, false)) {
46 nvif_ioctl(parent, "create channel dma vers %d vm %llx "
47 "pushbuf %llx offset %016llx\n",
48 args->v0.version, args->v0.vm, args->v0.pushbuf,
49 args->v0.offset);
50 if (!args->v0.pushbuf)
51 return -EINVAL;
52 } else
53 return ret;
54
55 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
56 return -ENOMEM;
57 *pobject = &chan->base.object;
58
59 ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
60 oclass, chan);
61 if (ret)
62 return ret;
63
64 args->v0.chid = chan->base.chid;
65
66 nvkm_kmap(chan->ramfc);
67 nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
68 nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
69 nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
70 nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
71 nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
72 nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
73 nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
74 nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
75 nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
76 nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
77 nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
78 nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
79 (4 << 24) /* SEARCH_FULL */ |
80 (chan->ramht->gpuobj->node->offset >> 4));
81 nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
82 nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
83 nvkm_done(chan->ramfc);
84 return 0;
85}
86
87const struct nvkm_fifo_chan_oclass
88g84_fifo_dma_oclass = {
89 .base.oclass = G82_CHANNEL_DMA,
90 .base.minver = 0,
91 .base.maxver = 0,
92 .ctor = g84_fifo_dma_new,
93};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
new file mode 100644
index 000000000000..bfcc6408a772
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -0,0 +1,220 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv04.h"
25#include "regsnv04.h"
26
27#include <core/client.h>
28#include <core/ramht.h>
29#include <subdev/instmem.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34void
35nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
36{
37 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
38 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
39 nvkm_ramht_remove(imem->ramht, cookie);
40}
41
42static int
43nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
44 struct nvkm_object *object)
45{
46 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
47 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
48 u32 context = 0x80000000 | chan->base.chid << 24;
49 u32 handle = object->handle;
50 int hash;
51
52 switch (object->engine->subdev.index) {
53 case NVKM_ENGINE_DMAOBJ:
54 case NVKM_ENGINE_SW : context |= 0x00000000; break;
55 case NVKM_ENGINE_GR : context |= 0x00010000; break;
56 case NVKM_ENGINE_MPEG : context |= 0x00020000; break;
57 default:
58 WARN_ON(1);
59 return -EINVAL;
60 }
61
62 mutex_lock(&chan->fifo->base.engine.subdev.mutex);
63 hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
64 handle, context);
65 mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
66 return hash;
67}
68
69void
70nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
71{
72 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
73 struct nv04_fifo *fifo = chan->fifo;
74 struct nvkm_device *device = fifo->base.engine.subdev.device;
75 struct nvkm_memory *fctx = device->imem->ramfc;
76 const struct nv04_fifo_ramfc *c;
77 unsigned long flags;
78 u32 mask = fifo->base.nr - 1;
79 u32 data = chan->ramfc;
80 u32 chid;
81
82 /* prevent fifo context switches */
83 spin_lock_irqsave(&fifo->base.lock, flags);
84 nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
85
86 /* if this channel is active, replace it with a null context */
87 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
88 if (chid == chan->base.chid) {
89 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
90 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
91 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
92
93 c = fifo->ramfc;
94 do {
95 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
96 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
97 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
98 u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
99 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
100 } while ((++c)->bits);
101
102 c = fifo->ramfc;
103 do {
104 nvkm_wr32(device, c->regp, 0x00000000);
105 } while ((++c)->bits);
106
107 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
108 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
109 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
110 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
111 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
112 }
113
114 /* restore normal operation, after disabling dma mode */
115 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
116 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
117 spin_unlock_irqrestore(&fifo->base.lock, flags);
118}
119
120void
121nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
122{
123 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
124 struct nv04_fifo *fifo = chan->fifo;
125 struct nvkm_device *device = fifo->base.engine.subdev.device;
126 u32 mask = 1 << chan->base.chid;
127 unsigned long flags;
128 spin_lock_irqsave(&fifo->base.lock, flags);
129 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
130 spin_unlock_irqrestore(&fifo->base.lock, flags);
131}
132
133void *
134nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
135{
136 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
137 struct nv04_fifo *fifo = chan->fifo;
138 struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
139 const struct nv04_fifo_ramfc *c = fifo->ramfc;
140
141 nvkm_kmap(imem->ramfc);
142 do {
143 nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
144 } while ((++c)->bits);
145 nvkm_done(imem->ramfc);
146 return chan;
147}
148
149const struct nvkm_fifo_chan_func
150nv04_fifo_dma_func = {
151 .dtor = nv04_fifo_dma_dtor,
152 .init = nv04_fifo_dma_init,
153 .fini = nv04_fifo_dma_fini,
154 .object_ctor = nv04_fifo_dma_object_ctor,
155 .object_dtor = nv04_fifo_dma_object_dtor,
156};
157
158static int
159nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
160 void *data, u32 size, struct nvkm_object **pobject)
161{
162 struct nvkm_object *parent = oclass->parent;
163 union {
164 struct nv03_channel_dma_v0 v0;
165 } *args = data;
166 struct nv04_fifo *fifo = nv04_fifo(base);
167 struct nv04_fifo_chan *chan = NULL;
168 struct nvkm_device *device = fifo->base.engine.subdev.device;
169 struct nvkm_instmem *imem = device->imem;
170 int ret;
171
172 nvif_ioctl(parent, "create channel dma size %d\n", size);
173 if (nvif_unpack(args->v0, 0, 0, false)) {
174 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
175 "offset %08x\n", args->v0.version,
176 args->v0.pushbuf, args->v0.offset);
177 if (!args->v0.pushbuf)
178 return -EINVAL;
179 } else
180 return ret;
181
182 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
183 return -ENOMEM;
184 *pobject = &chan->base.object;
185
186 ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
187 0x1000, 0x1000, false, 0, args->v0.pushbuf,
188 (1ULL << NVKM_ENGINE_DMAOBJ) |
189 (1ULL << NVKM_ENGINE_GR) |
190 (1ULL << NVKM_ENGINE_SW),
191 0, 0x800000, 0x10000, oclass, &chan->base);
192 chan->fifo = fifo;
193 if (ret)
194 return ret;
195
196 args->v0.chid = chan->base.chid;
197 chan->ramfc = chan->base.chid * 32;
198
199 nvkm_kmap(imem->ramfc);
200 nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
201 nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
202 nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
203 nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
204 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
205 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
206#ifdef __BIG_ENDIAN
207 NV_PFIFO_CACHE1_BIG_ENDIAN |
208#endif
209 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
210 nvkm_done(imem->ramfc);
211 return 0;
212}
213
214const struct nvkm_fifo_chan_oclass
215nv04_fifo_dma_oclass = {
216 .base.oclass = NV03_CHANNEL_DMA,
217 .base.minver = 0,
218 .base.maxver = 0,
219 .ctor = nv04_fifo_dma_new,
220};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
new file mode 100644
index 000000000000..34f68e5bd040
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv04.h"
25#include "regsnv04.h"
26
27#include <core/client.h>
28#include <core/gpuobj.h>
29#include <subdev/instmem.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34static int
35nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
36 void *data, u32 size, struct nvkm_object **pobject)
37{
38 struct nvkm_object *parent = oclass->parent;
39 union {
40 struct nv03_channel_dma_v0 v0;
41 } *args = data;
42 struct nv04_fifo *fifo = nv04_fifo(base);
43 struct nv04_fifo_chan *chan = NULL;
44 struct nvkm_device *device = fifo->base.engine.subdev.device;
45 struct nvkm_instmem *imem = device->imem;
46 int ret;
47
48 nvif_ioctl(parent, "create channel dma size %d\n", size);
49 if (nvif_unpack(args->v0, 0, 0, false)) {
50 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
51 "offset %08x\n", args->v0.version,
52 args->v0.pushbuf, args->v0.offset);
53 if (!args->v0.pushbuf)
54 return -EINVAL;
55 } else
56 return ret;
57
58 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
59 return -ENOMEM;
60 *pobject = &chan->base.object;
61
62 ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
63 0x1000, 0x1000, false, 0, args->v0.pushbuf,
64 (1ULL << NVKM_ENGINE_DMAOBJ) |
65 (1ULL << NVKM_ENGINE_GR) |
66 (1ULL << NVKM_ENGINE_SW),
67 0, 0x800000, 0x10000, oclass, &chan->base);
68 chan->fifo = fifo;
69 if (ret)
70 return ret;
71
72 args->v0.chid = chan->base.chid;
73 chan->ramfc = chan->base.chid * 32;
74
75 nvkm_kmap(imem->ramfc);
76 nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
77 nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
78 nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
79 nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
80 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
81 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
82#ifdef __BIG_ENDIAN
83 NV_PFIFO_CACHE1_BIG_ENDIAN |
84#endif
85 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
86 nvkm_done(imem->ramfc);
87 return 0;
88}
89
90const struct nvkm_fifo_chan_oclass
91nv10_fifo_dma_oclass = {
92 .base.oclass = NV10_CHANNEL_DMA,
93 .base.minver = 0,
94 .base.maxver = 0,
95 .ctor = nv10_fifo_dma_new,
96};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
new file mode 100644
index 000000000000..ed7cc9f2b540
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -0,0 +1,97 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv04.h"
25#include "regsnv04.h"
26
27#include <core/client.h>
28#include <core/gpuobj.h>
29#include <subdev/instmem.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34static int
35nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
36 void *data, u32 size, struct nvkm_object **pobject)
37{
38 struct nvkm_object *parent = oclass->parent;
39 union {
40 struct nv03_channel_dma_v0 v0;
41 } *args = data;
42 struct nv04_fifo *fifo = nv04_fifo(base);
43 struct nv04_fifo_chan *chan = NULL;
44 struct nvkm_device *device = fifo->base.engine.subdev.device;
45 struct nvkm_instmem *imem = device->imem;
46 int ret;
47
48 nvif_ioctl(parent, "create channel dma size %d\n", size);
49 if (nvif_unpack(args->v0, 0, 0, false)) {
50 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
51 "offset %08x\n", args->v0.version,
52 args->v0.pushbuf, args->v0.offset);
53 if (!args->v0.pushbuf)
54 return -EINVAL;
55 } else
56 return ret;
57
58 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
59 return -ENOMEM;
60 *pobject = &chan->base.object;
61
62 ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
63 0x1000, 0x1000, false, 0, args->v0.pushbuf,
64 (1ULL << NVKM_ENGINE_DMAOBJ) |
65 (1ULL << NVKM_ENGINE_GR) |
66 (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */
67 (1ULL << NVKM_ENGINE_SW),
68 0, 0x800000, 0x10000, oclass, &chan->base);
69 chan->fifo = fifo;
70 if (ret)
71 return ret;
72
73 args->v0.chid = chan->base.chid;
74 chan->ramfc = chan->base.chid * 64;
75
76 nvkm_kmap(imem->ramfc);
77 nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
78 nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
79 nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
80 nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
81 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
82 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
83#ifdef __BIG_ENDIAN
84 NV_PFIFO_CACHE1_BIG_ENDIAN |
85#endif
86 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
87 nvkm_done(imem->ramfc);
88 return 0;
89}
90
91const struct nvkm_fifo_chan_oclass
92nv17_fifo_dma_oclass = {
93 .base.oclass = NV17_CHANNEL_DMA,
94 .base.minver = 0,
95 .base.maxver = 0,
96 .ctor = nv17_fifo_dma_new,
97};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
new file mode 100644
index 000000000000..043b6c325949
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv04.h"
25#include "regsnv04.h"
26
27#include <core/client.h>
28#include <core/ramht.h>
29#include <subdev/instmem.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34static bool
35nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
36{
37 switch (engine->subdev.index) {
38 case NVKM_ENGINE_DMAOBJ:
39 case NVKM_ENGINE_SW:
40 return false;
41 case NVKM_ENGINE_GR:
42 *reg = 0x0032e0;
43 *ctx = 0x38;
44 return true;
45 case NVKM_ENGINE_MPEG:
46 *reg = 0x00330c;
47 *ctx = 0x54;
48 return true;
49 default:
50 WARN_ON(1);
51 return false;
52 }
53}
54
55static int
56nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
57 struct nvkm_engine *engine, bool suspend)
58{
59 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
60 struct nv04_fifo *fifo = chan->fifo;
61 struct nvkm_device *device = fifo->base.engine.subdev.device;
62 struct nvkm_instmem *imem = device->imem;
63 unsigned long flags;
64 u32 reg, ctx;
65 int chid;
66
67 if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
68 return 0;
69
70 spin_lock_irqsave(&fifo->base.lock, flags);
71 nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
72
73 chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
74 if (chid == chan->base.chid)
75 nvkm_wr32(device, reg, 0x00000000);
76 nvkm_kmap(imem->ramfc);
77 nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
78 nvkm_done(imem->ramfc);
79
80 nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
81 spin_unlock_irqrestore(&fifo->base.lock, flags);
82 return 0;
83}
84
85static int
86nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
87 struct nvkm_engine *engine)
88{
89 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
90 struct nv04_fifo *fifo = chan->fifo;
91 struct nvkm_device *device = fifo->base.engine.subdev.device;
92 struct nvkm_instmem *imem = device->imem;
93 unsigned long flags;
94 u32 inst, reg, ctx;
95 int chid;
96
97 if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
98 return 0;
99 inst = chan->engn[engine->subdev.index]->addr >> 4;
100
101 spin_lock_irqsave(&fifo->base.lock, flags);
102 nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
103
104 chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
105 if (chid == chan->base.chid)
106 nvkm_wr32(device, reg, inst);
107 nvkm_kmap(imem->ramfc);
108 nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
109 nvkm_done(imem->ramfc);
110
111 nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
112 spin_unlock_irqrestore(&fifo->base.lock, flags);
113 return 0;
114}
115
116static void
117nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
118 struct nvkm_engine *engine)
119{
120 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
121 nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
122}
123
124static int
125nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
126 struct nvkm_engine *engine,
127 struct nvkm_object *object)
128{
129 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
130 const int engn = engine->subdev.index;
131 u32 reg, ctx;
132
133 if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
134 return 0;
135
136 return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
137}
138
139static int
140nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
141 struct nvkm_object *object)
142{
143 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
144 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
145 u32 context = chan->base.chid << 23;
146 u32 handle = object->handle;
147 int hash;
148
149 switch (object->engine->subdev.index) {
150 case NVKM_ENGINE_DMAOBJ:
151 case NVKM_ENGINE_SW : context |= 0x00000000; break;
152 case NVKM_ENGINE_GR : context |= 0x00100000; break;
153 case NVKM_ENGINE_MPEG : context |= 0x00200000; break;
154 default:
155 WARN_ON(1);
156 return -EINVAL;
157 }
158
159 mutex_lock(&chan->fifo->base.engine.subdev.mutex);
160 hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
161 handle, context);
162 mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
163 return hash;
164}
165
166static const struct nvkm_fifo_chan_func
167nv40_fifo_dma_func = {
168 .dtor = nv04_fifo_dma_dtor,
169 .init = nv04_fifo_dma_init,
170 .fini = nv04_fifo_dma_fini,
171 .engine_ctor = nv40_fifo_dma_engine_ctor,
172 .engine_dtor = nv40_fifo_dma_engine_dtor,
173 .engine_init = nv40_fifo_dma_engine_init,
174 .engine_fini = nv40_fifo_dma_engine_fini,
175 .object_ctor = nv40_fifo_dma_object_ctor,
176 .object_dtor = nv04_fifo_dma_object_dtor,
177};
178
179static int
180nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
181 void *data, u32 size, struct nvkm_object **pobject)
182{
183 struct nvkm_object *parent = oclass->parent;
184 union {
185 struct nv03_channel_dma_v0 v0;
186 } *args = data;
187 struct nv04_fifo *fifo = nv04_fifo(base);
188 struct nv04_fifo_chan *chan = NULL;
189 struct nvkm_device *device = fifo->base.engine.subdev.device;
190 struct nvkm_instmem *imem = device->imem;
191 int ret;
192
193 nvif_ioctl(parent, "create channel dma size %d\n", size);
194 if (nvif_unpack(args->v0, 0, 0, false)) {
195 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
196 "offset %08x\n", args->v0.version,
197 args->v0.pushbuf, args->v0.offset);
198 if (!args->v0.pushbuf)
199 return -EINVAL;
200 } else
201 return ret;
202
203 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
204 return -ENOMEM;
205 *pobject = &chan->base.object;
206
207 ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
208 0x1000, 0x1000, false, 0, args->v0.pushbuf,
209 (1ULL << NVKM_ENGINE_DMAOBJ) |
210 (1ULL << NVKM_ENGINE_GR) |
211 (1ULL << NVKM_ENGINE_MPEG) |
212 (1ULL << NVKM_ENGINE_SW),
213 0, 0xc00000, 0x1000, oclass, &chan->base);
214 chan->fifo = fifo;
215 if (ret)
216 return ret;
217
218 args->v0.chid = chan->base.chid;
219 chan->ramfc = chan->base.chid * 128;
220
221 nvkm_kmap(imem->ramfc);
222 nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
223 nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
224 nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
225 nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
226 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
227 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
228#ifdef __BIG_ENDIAN
229 NV_PFIFO_CACHE1_BIG_ENDIAN |
230#endif
231 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
232 nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
233 nvkm_done(imem->ramfc);
234 return 0;
235}
236
237const struct nvkm_fifo_chan_oclass
238nv40_fifo_dma_oclass = {
239 .base.oclass = NV40_CHANNEL_DMA,
240 .base.minver = 0,
241 .base.maxver = 0,
242 .ctor = nv40_fifo_dma_new,
243};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
new file mode 100644
index 000000000000..6b3b15f12c39
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -0,0 +1,91 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25
26#include <core/client.h>
27#include <core/ramht.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32static int
33nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
34 void *data, u32 size, struct nvkm_object **pobject)
35{
36 struct nvkm_object *parent = oclass->parent;
37 union {
38 struct nv50_channel_dma_v0 v0;
39 } *args = data;
40 struct nv50_fifo *fifo = nv50_fifo(base);
41 struct nv50_fifo_chan *chan;
42 int ret;
43
44 nvif_ioctl(parent, "create channel dma size %d\n", size);
45 if (nvif_unpack(args->v0, 0, 0, false)) {
46 nvif_ioctl(parent, "create channel dma vers %d vm %llx "
47 "pushbuf %llx offset %016llx\n",
48 args->v0.version, args->v0.vm, args->v0.pushbuf,
49 args->v0.offset);
50 if (!args->v0.pushbuf)
51 return -EINVAL;
52 } else
53 return ret;
54
55 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
56 return -ENOMEM;
57 *pobject = &chan->base.object;
58
59 ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
60 oclass, chan);
61 if (ret)
62 return ret;
63
64 args->v0.chid = chan->base.chid;
65
66 nvkm_kmap(chan->ramfc);
67 nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
68 nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
69 nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
70 nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
71 nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
72 nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
73 nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
74 nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
75 nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
76 nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
77 nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
78 nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
79 (4 << 24) /* SEARCH_FULL */ |
80 (chan->ramht->gpuobj->node->offset >> 4));
81 nvkm_done(chan->ramfc);
82 return 0;
83}
84
85const struct nvkm_fifo_chan_oclass
86nv50_fifo_dma_oclass = {
87 .base.oclass = NV50_CHANNEL_DMA,
88 .base.minver = 0,
89 .base.maxver = 0,
90 .ctor = nv50_fifo_dma_new,
91};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index a04920b3cf84..ff7b529764fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -22,466 +22,41 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "nv04.h" 25#include "channv50.h"
26
27#include <core/client.h>
28#include <core/engctx.h>
29#include <core/ramht.h>
30#include <subdev/bar.h>
31#include <subdev/mmu.h>
32#include <subdev/timer.h>
33
34#include <nvif/class.h>
35#include <nvif/unpack.h>
36
37/*******************************************************************************
38 * FIFO channel objects
39 ******************************************************************************/
40
41static int
42g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
43{
44 struct nvkm_bar *bar = nvkm_bar(parent);
45 struct nv50_fifo_base *base = (void *)parent->parent;
46 struct nvkm_gpuobj *ectx = (void *)object;
47 u64 limit = ectx->addr + ectx->size - 1;
48 u64 start = ectx->addr;
49 u32 addr;
50
51 switch (nv_engidx(object->engine)) {
52 case NVDEV_ENGINE_SW : return 0;
53 case NVDEV_ENGINE_GR : addr = 0x0020; break;
54 case NVDEV_ENGINE_VP :
55 case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
56 case NVDEV_ENGINE_MSPPP :
57 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
58 case NVDEV_ENGINE_BSP :
59 case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
60 case NVDEV_ENGINE_CIPHER:
61 case NVDEV_ENGINE_SEC : addr = 0x00a0; break;
62 case NVDEV_ENGINE_CE0 : addr = 0x00c0; break;
63 default:
64 return -EINVAL;
65 }
66
67 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
68 nv_wo32(base->eng, addr + 0x00, 0x00190000);
69 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
70 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
71 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
72 upper_32_bits(start));
73 nv_wo32(base->eng, addr + 0x10, 0x00000000);
74 nv_wo32(base->eng, addr + 0x14, 0x00000000);
75 bar->flush(bar);
76 return 0;
77}
78
79static int
80g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
81 struct nvkm_object *object)
82{
83 struct nvkm_bar *bar = nvkm_bar(parent);
84 struct nv50_fifo_priv *priv = (void *)parent->engine;
85 struct nv50_fifo_base *base = (void *)parent->parent;
86 struct nv50_fifo_chan *chan = (void *)parent;
87 u32 addr, save, engn;
88 bool done;
89
90 switch (nv_engidx(object->engine)) {
91 case NVDEV_ENGINE_SW : return 0;
92 case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
93 case NVDEV_ENGINE_VP :
94 case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
95 case NVDEV_ENGINE_MSPPP :
96 case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
97 case NVDEV_ENGINE_BSP :
98 case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
99 case NVDEV_ENGINE_CIPHER:
100 case NVDEV_ENGINE_SEC : engn = 4; addr = 0x00a0; break;
101 case NVDEV_ENGINE_CE0 : engn = 2; addr = 0x00c0; break;
102 default:
103 return -EINVAL;
104 }
105
106 save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
107 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
108 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
109 nv_wr32(priv, 0x002520, save);
110 if (!done) {
111 nv_error(priv, "channel %d [%s] unload timeout\n",
112 chan->base.chid, nvkm_client_name(chan));
113 if (suspend)
114 return -EBUSY;
115 }
116
117 nv_wo32(base->eng, addr + 0x00, 0x00000000);
118 nv_wo32(base->eng, addr + 0x04, 0x00000000);
119 nv_wo32(base->eng, addr + 0x08, 0x00000000);
120 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
121 nv_wo32(base->eng, addr + 0x10, 0x00000000);
122 nv_wo32(base->eng, addr + 0x14, 0x00000000);
123 bar->flush(bar);
124 return 0;
125}
126
127static int
128g84_fifo_object_attach(struct nvkm_object *parent,
129 struct nvkm_object *object, u32 handle)
130{
131 struct nv50_fifo_chan *chan = (void *)parent;
132 u32 context;
133
134 if (nv_iclass(object, NV_GPUOBJ_CLASS))
135 context = nv_gpuobj(object)->node->offset >> 4;
136 else
137 context = 0x00000004; /* just non-zero */
138
139 switch (nv_engidx(object->engine)) {
140 case NVDEV_ENGINE_DMAOBJ:
141 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
142 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
143 case NVDEV_ENGINE_MPEG :
144 case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
145 case NVDEV_ENGINE_ME :
146 case NVDEV_ENGINE_CE0 : context |= 0x00300000; break;
147 case NVDEV_ENGINE_VP :
148 case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
149 case NVDEV_ENGINE_CIPHER:
150 case NVDEV_ENGINE_SEC :
151 case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
152 case NVDEV_ENGINE_BSP :
153 case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
154 default:
155 return -EINVAL;
156 }
157
158 return nvkm_ramht_insert(chan->ramht, 0, handle, context);
159}
160
161static int
162g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
163 struct nvkm_oclass *oclass, void *data, u32 size,
164 struct nvkm_object **pobject)
165{
166 union {
167 struct nv03_channel_dma_v0 v0;
168 } *args = data;
169 struct nvkm_bar *bar = nvkm_bar(parent);
170 struct nv50_fifo_base *base = (void *)parent;
171 struct nv50_fifo_chan *chan;
172 int ret;
173
174 nv_ioctl(parent, "create channel dma size %d\n", size);
175 if (nvif_unpack(args->v0, 0, 0, false)) {
176 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
177 "offset %016llx\n", args->v0.version,
178 args->v0.pushbuf, args->v0.offset);
179 } else
180 return ret;
181
182 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
183 0x2000, args->v0.pushbuf,
184 (1ULL << NVDEV_ENGINE_DMAOBJ) |
185 (1ULL << NVDEV_ENGINE_SW) |
186 (1ULL << NVDEV_ENGINE_GR) |
187 (1ULL << NVDEV_ENGINE_MPEG) |
188 (1ULL << NVDEV_ENGINE_ME) |
189 (1ULL << NVDEV_ENGINE_VP) |
190 (1ULL << NVDEV_ENGINE_CIPHER) |
191 (1ULL << NVDEV_ENGINE_SEC) |
192 (1ULL << NVDEV_ENGINE_BSP) |
193 (1ULL << NVDEV_ENGINE_MSVLD) |
194 (1ULL << NVDEV_ENGINE_MSPDEC) |
195 (1ULL << NVDEV_ENGINE_MSPPP) |
196 (1ULL << NVDEV_ENGINE_CE0) |
197 (1ULL << NVDEV_ENGINE_VIC), &chan);
198 *pobject = nv_object(chan);
199 if (ret)
200 return ret;
201
202 args->v0.chid = chan->base.chid;
203
204 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
205 &chan->ramht);
206 if (ret)
207 return ret;
208
209 nv_parent(chan)->context_attach = g84_fifo_context_attach;
210 nv_parent(chan)->context_detach = g84_fifo_context_detach;
211 nv_parent(chan)->object_attach = g84_fifo_object_attach;
212 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
213
214 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
215 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
216 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
217 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
218 nv_wo32(base->ramfc, 0x3c, 0x003f6078);
219 nv_wo32(base->ramfc, 0x44, 0x01003fff);
220 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
221 nv_wo32(base->ramfc, 0x4c, 0xffffffff);
222 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
223 nv_wo32(base->ramfc, 0x78, 0x00000000);
224 nv_wo32(base->ramfc, 0x7c, 0x30000001);
225 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
226 (4 << 24) /* SEARCH_FULL */ |
227 (chan->ramht->gpuobj.node->offset >> 4));
228 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
229 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
230 bar->flush(bar);
231 return 0;
232}
233
234static int
235g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
236 struct nvkm_oclass *oclass, void *data, u32 size,
237 struct nvkm_object **pobject)
238{
239 union {
240 struct nv50_channel_gpfifo_v0 v0;
241 } *args = data;
242 struct nvkm_bar *bar = nvkm_bar(parent);
243 struct nv50_fifo_base *base = (void *)parent;
244 struct nv50_fifo_chan *chan;
245 u64 ioffset, ilength;
246 int ret;
247
248 nv_ioctl(parent, "create channel gpfifo size %d\n", size);
249 if (nvif_unpack(args->v0, 0, 0, false)) {
250 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
251 "ioffset %016llx ilength %08x\n",
252 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
253 args->v0.ilength);
254 } else
255 return ret;
256
257 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
258 0x2000, args->v0.pushbuf,
259 (1ULL << NVDEV_ENGINE_DMAOBJ) |
260 (1ULL << NVDEV_ENGINE_SW) |
261 (1ULL << NVDEV_ENGINE_GR) |
262 (1ULL << NVDEV_ENGINE_MPEG) |
263 (1ULL << NVDEV_ENGINE_ME) |
264 (1ULL << NVDEV_ENGINE_VP) |
265 (1ULL << NVDEV_ENGINE_CIPHER) |
266 (1ULL << NVDEV_ENGINE_SEC) |
267 (1ULL << NVDEV_ENGINE_BSP) |
268 (1ULL << NVDEV_ENGINE_MSVLD) |
269 (1ULL << NVDEV_ENGINE_MSPDEC) |
270 (1ULL << NVDEV_ENGINE_MSPPP) |
271 (1ULL << NVDEV_ENGINE_CE0) |
272 (1ULL << NVDEV_ENGINE_VIC), &chan);
273 *pobject = nv_object(chan);
274 if (ret)
275 return ret;
276
277 args->v0.chid = chan->base.chid;
278
279 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
280 &chan->ramht);
281 if (ret)
282 return ret;
283
284 nv_parent(chan)->context_attach = g84_fifo_context_attach;
285 nv_parent(chan)->context_detach = g84_fifo_context_detach;
286 nv_parent(chan)->object_attach = g84_fifo_object_attach;
287 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
288
289 ioffset = args->v0.ioffset;
290 ilength = order_base_2(args->v0.ilength / 8);
291
292 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
293 nv_wo32(base->ramfc, 0x44, 0x01003fff);
294 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
295 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
296 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
297 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
298 nv_wo32(base->ramfc, 0x78, 0x00000000);
299 nv_wo32(base->ramfc, 0x7c, 0x30000001);
300 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
301 (4 << 24) /* SEARCH_FULL */ |
302 (chan->ramht->gpuobj.node->offset >> 4));
303 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
304 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
305 bar->flush(bar);
306 return 0;
307}
308
309static int
310g84_fifo_chan_init(struct nvkm_object *object)
311{
312 struct nv50_fifo_priv *priv = (void *)object->engine;
313 struct nv50_fifo_base *base = (void *)object->parent;
314 struct nv50_fifo_chan *chan = (void *)object;
315 struct nvkm_gpuobj *ramfc = base->ramfc;
316 u32 chid = chan->base.chid;
317 int ret;
318
319 ret = nvkm_fifo_channel_init(&chan->base);
320 if (ret)
321 return ret;
322
323 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
324 nv50_fifo_playlist_update(priv);
325 return 0;
326}
327
328static struct nvkm_ofuncs
329g84_fifo_ofuncs_dma = {
330 .ctor = g84_fifo_chan_ctor_dma,
331 .dtor = nv50_fifo_chan_dtor,
332 .init = g84_fifo_chan_init,
333 .fini = nv50_fifo_chan_fini,
334 .map = _nvkm_fifo_channel_map,
335 .rd32 = _nvkm_fifo_channel_rd32,
336 .wr32 = _nvkm_fifo_channel_wr32,
337 .ntfy = _nvkm_fifo_channel_ntfy
338};
339
340static struct nvkm_ofuncs
341g84_fifo_ofuncs_ind = {
342 .ctor = g84_fifo_chan_ctor_ind,
343 .dtor = nv50_fifo_chan_dtor,
344 .init = g84_fifo_chan_init,
345 .fini = nv50_fifo_chan_fini,
346 .map = _nvkm_fifo_channel_map,
347 .rd32 = _nvkm_fifo_channel_rd32,
348 .wr32 = _nvkm_fifo_channel_wr32,
349 .ntfy = _nvkm_fifo_channel_ntfy
350};
351
352static struct nvkm_oclass
353g84_fifo_sclass[] = {
354 { G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
355 { G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
356 {}
357};
358
359/*******************************************************************************
360 * FIFO context - basically just the instmem reserved for the channel
361 ******************************************************************************/
362
363static int
364g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
365 struct nvkm_oclass *oclass, void *data, u32 size,
366 struct nvkm_object **pobject)
367{
368 struct nv50_fifo_base *base;
369 int ret;
370
371 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
372 0x1000, NVOBJ_FLAG_HEAP, &base);
373 *pobject = nv_object(base);
374 if (ret)
375 return ret;
376
377 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
378 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
379 if (ret)
380 return ret;
381
382 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
383 0, &base->pgd);
384 if (ret)
385 return ret;
386
387 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
388 if (ret)
389 return ret;
390
391 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
392 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
393 if (ret)
394 return ret;
395
396 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
397 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
398 if (ret)
399 return ret;
400
401 return 0;
402}
403
404static struct nvkm_oclass
405g84_fifo_cclass = {
406 .handle = NV_ENGCTX(FIFO, 0x84),
407 .ofuncs = &(struct nvkm_ofuncs) {
408 .ctor = g84_fifo_context_ctor,
409 .dtor = nv50_fifo_context_dtor,
410 .init = _nvkm_fifo_context_init,
411 .fini = _nvkm_fifo_context_fini,
412 .rd32 = _nvkm_fifo_context_rd32,
413 .wr32 = _nvkm_fifo_context_wr32,
414 },
415};
416
417/*******************************************************************************
418 * PFIFO engine
419 ******************************************************************************/
420 26
421static void 27static void
422g84_fifo_uevent_init(struct nvkm_event *event, int type, int index) 28g84_fifo_uevent_fini(struct nvkm_fifo *fifo)
423{ 29{
424 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 30 struct nvkm_device *device = fifo->engine.subdev.device;
425 nv_mask(fifo, 0x002140, 0x40000000, 0x40000000); 31 nvkm_mask(device, 0x002140, 0x40000000, 0x00000000);
426} 32}
427 33
428static void 34static void
429g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index) 35g84_fifo_uevent_init(struct nvkm_fifo *fifo)
430{ 36{
431 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 37 struct nvkm_device *device = fifo->engine.subdev.device;
432 nv_mask(fifo, 0x002140, 0x40000000, 0x00000000); 38 nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
433} 39}
434 40
435static const struct nvkm_event_func 41static const struct nvkm_fifo_func
436g84_fifo_uevent_func = { 42g84_fifo = {
437 .ctor = nvkm_fifo_uevent_ctor, 43 .dtor = nv50_fifo_dtor,
438 .init = g84_fifo_uevent_init, 44 .oneinit = nv50_fifo_oneinit,
439 .fini = g84_fifo_uevent_fini, 45 .init = nv50_fifo_init,
46 .intr = nv04_fifo_intr,
47 .pause = nv04_fifo_pause,
48 .start = nv04_fifo_start,
49 .uevent_init = g84_fifo_uevent_init,
50 .uevent_fini = g84_fifo_uevent_fini,
51 .chan = {
52 &g84_fifo_dma_oclass,
53 &g84_fifo_gpfifo_oclass,
54 NULL
55 },
440}; 56};
441 57
442static int 58int
443g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 59g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
444 struct nvkm_oclass *oclass, void *data, u32 size,
445 struct nvkm_object **pobject)
446{ 60{
447 struct nv50_fifo_priv *priv; 61 return nv50_fifo_new_(&g84_fifo, device, index, pfifo);
448 int ret;
449
450 ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
451 *pobject = nv_object(priv);
452 if (ret)
453 return ret;
454
455 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
456 &priv->playlist[0]);
457 if (ret)
458 return ret;
459
460 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
461 &priv->playlist[1]);
462 if (ret)
463 return ret;
464
465 ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &priv->base.uevent);
466 if (ret)
467 return ret;
468
469 nv_subdev(priv)->unit = 0x00000100;
470 nv_subdev(priv)->intr = nv04_fifo_intr;
471 nv_engine(priv)->cclass = &g84_fifo_cclass;
472 nv_engine(priv)->sclass = g84_fifo_sclass;
473 priv->base.pause = nv04_fifo_pause;
474 priv->base.start = nv04_fifo_start;
475 return 0;
476} 62}
477
478struct nvkm_oclass *
479g84_fifo_oclass = &(struct nvkm_oclass) {
480 .handle = NV_ENGINE(FIFO, 0x84),
481 .ofuncs = &(struct nvkm_ofuncs) {
482 .ctor = g84_fifo_ctor,
483 .dtor = nv50_fifo_dtor,
484 .init = nv50_fifo_init,
485 .fini = _nvkm_fifo_fini,
486 },
487};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index b745252f2261..ff6fcbda615b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -21,365 +21,72 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/fifo.h> 24#include "gf100.h"
25#include "changf100.h"
25 26
26#include <core/client.h> 27#include <core/client.h>
27#include <core/engctx.h>
28#include <core/enum.h> 28#include <core/enum.h>
29#include <core/handle.h> 29#include <core/gpuobj.h>
30#include <subdev/bar.h> 30#include <subdev/bar.h>
31#include <subdev/fb.h> 31#include <engine/sw.h>
32#include <subdev/mmu.h>
33#include <subdev/timer.h>
34 32
35#include <nvif/class.h> 33#include <nvif/class.h>
36#include <nvif/unpack.h>
37
38struct gf100_fifo_priv {
39 struct nvkm_fifo base;
40
41 struct work_struct fault;
42 u64 mask;
43
44 struct {
45 struct nvkm_gpuobj *mem[2];
46 int active;
47 wait_queue_head_t wait;
48 } runlist;
49
50 struct {
51 struct nvkm_gpuobj *mem;
52 struct nvkm_vma bar;
53 } user;
54 int spoon_nr;
55};
56
57struct gf100_fifo_base {
58 struct nvkm_fifo_base base;
59 struct nvkm_gpuobj *pgd;
60 struct nvkm_vm *vm;
61};
62
63struct gf100_fifo_chan {
64 struct nvkm_fifo_chan base;
65 enum {
66 STOPPED,
67 RUNNING,
68 KILLED
69 } state;
70};
71
72/*******************************************************************************
73 * FIFO channel objects
74 ******************************************************************************/
75 34
76static void 35static void
77gf100_fifo_runlist_update(struct gf100_fifo_priv *priv) 36gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
78{ 37{
79 struct nvkm_bar *bar = nvkm_bar(priv); 38 struct nvkm_device *device = fifo->engine.subdev.device;
80 struct nvkm_gpuobj *cur; 39 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
81 int i, p;
82
83 mutex_lock(&nv_subdev(priv)->mutex);
84 cur = priv->runlist.mem[priv->runlist.active];
85 priv->runlist.active = !priv->runlist.active;
86
87 for (i = 0, p = 0; i < 128; i++) {
88 struct gf100_fifo_chan *chan = (void *)priv->base.channel[i];
89 if (chan && chan->state == RUNNING) {
90 nv_wo32(cur, p + 0, i);
91 nv_wo32(cur, p + 4, 0x00000004);
92 p += 8;
93 }
94 }
95 bar->flush(bar);
96
97 nv_wr32(priv, 0x002270, cur->addr >> 12);
98 nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
99
100 if (wait_event_timeout(priv->runlist.wait,
101 !(nv_rd32(priv, 0x00227c) & 0x00100000),
102 msecs_to_jiffies(2000)) == 0)
103 nv_error(priv, "runlist update timeout\n");
104 mutex_unlock(&nv_subdev(priv)->mutex);
105} 40}
106 41
107static int 42static void
108gf100_fifo_context_attach(struct nvkm_object *parent, 43gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
109 struct nvkm_object *object)
110{
111 struct nvkm_bar *bar = nvkm_bar(parent);
112 struct gf100_fifo_base *base = (void *)parent->parent;
113 struct nvkm_engctx *ectx = (void *)object;
114 u32 addr;
115 int ret;
116
117 switch (nv_engidx(object->engine)) {
118 case NVDEV_ENGINE_SW : return 0;
119 case NVDEV_ENGINE_GR : addr = 0x0210; break;
120 case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
121 case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
122 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
123 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
124 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
125 default:
126 return -EINVAL;
127 }
128
129 if (!ectx->vma.node) {
130 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
131 NV_MEM_ACCESS_RW, &ectx->vma);
132 if (ret)
133 return ret;
134
135 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
136 }
137
138 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
139 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
140 bar->flush(bar);
141 return 0;
142}
143
144static int
145gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
146 struct nvkm_object *object)
147{ 44{
148 struct nvkm_bar *bar = nvkm_bar(parent); 45 struct nvkm_device *device = fifo->engine.subdev.device;
149 struct gf100_fifo_priv *priv = (void *)parent->engine; 46 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
150 struct gf100_fifo_base *base = (void *)parent->parent;
151 struct gf100_fifo_chan *chan = (void *)parent;
152 u32 addr;
153
154 switch (nv_engidx(object->engine)) {
155 case NVDEV_ENGINE_SW : return 0;
156 case NVDEV_ENGINE_GR : addr = 0x0210; break;
157 case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
158 case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
159 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
160 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
161 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
162 default:
163 return -EINVAL;
164 }
165
166 nv_wr32(priv, 0x002634, chan->base.chid);
167 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
168 nv_error(priv, "channel %d [%s] kick timeout\n",
169 chan->base.chid, nvkm_client_name(chan));
170 if (suspend)
171 return -EBUSY;
172 }
173
174 nv_wo32(base, addr + 0x00, 0x00000000);
175 nv_wo32(base, addr + 0x04, 0x00000000);
176 bar->flush(bar);
177 return 0;
178} 47}
179 48
180static int 49void
181gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 50gf100_fifo_runlist_update(struct gf100_fifo *fifo)
182 struct nvkm_oclass *oclass, void *data, u32 size,
183 struct nvkm_object **pobject)
184{ 51{
185 union {
186 struct nv50_channel_gpfifo_v0 v0;
187 } *args = data;
188 struct nvkm_bar *bar = nvkm_bar(parent);
189 struct gf100_fifo_priv *priv = (void *)engine;
190 struct gf100_fifo_base *base = (void *)parent;
191 struct gf100_fifo_chan *chan; 52 struct gf100_fifo_chan *chan;
192 u64 usermem, ioffset, ilength; 53 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
193 int ret, i; 54 struct nvkm_device *device = subdev->device;
194 55 struct nvkm_memory *cur;
195 nv_ioctl(parent, "create channel gpfifo size %d\n", size); 56 int nr = 0;
196 if (nvif_unpack(args->v0, 0, 0, false)) {
197 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
198 "ioffset %016llx ilength %08x\n",
199 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
200 args->v0.ilength);
201 } else
202 return ret;
203 57
204 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, 58 mutex_lock(&subdev->mutex);
205 priv->user.bar.offset, 0x1000, 59 cur = fifo->runlist.mem[fifo->runlist.active];
206 args->v0.pushbuf, 60 fifo->runlist.active = !fifo->runlist.active;
207 (1ULL << NVDEV_ENGINE_SW) |
208 (1ULL << NVDEV_ENGINE_GR) |
209 (1ULL << NVDEV_ENGINE_CE0) |
210 (1ULL << NVDEV_ENGINE_CE1) |
211 (1ULL << NVDEV_ENGINE_MSVLD) |
212 (1ULL << NVDEV_ENGINE_MSPDEC) |
213 (1ULL << NVDEV_ENGINE_MSPPP), &chan);
214 *pobject = nv_object(chan);
215 if (ret)
216 return ret;
217 61
218 args->v0.chid = chan->base.chid; 62 nvkm_kmap(cur);
219 63 list_for_each_entry(chan, &fifo->chan, head) {
220 nv_parent(chan)->context_attach = gf100_fifo_context_attach; 64 nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
221 nv_parent(chan)->context_detach = gf100_fifo_context_detach; 65 nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
222 66 nr++;
223 usermem = chan->base.chid * 0x1000;
224 ioffset = args->v0.ioffset;
225 ilength = order_base_2(args->v0.ilength / 8);
226
227 for (i = 0; i < 0x1000; i += 4)
228 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
229
230 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
231 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
232 nv_wo32(base, 0x10, 0x0000face);
233 nv_wo32(base, 0x30, 0xfffff902);
234 nv_wo32(base, 0x48, lower_32_bits(ioffset));
235 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
236 nv_wo32(base, 0x54, 0x00000002);
237 nv_wo32(base, 0x84, 0x20400000);
238 nv_wo32(base, 0x94, 0x30000001);
239 nv_wo32(base, 0x9c, 0x00000100);
240 nv_wo32(base, 0xa4, 0x1f1f1f1f);
241 nv_wo32(base, 0xa8, 0x1f1f1f1f);
242 nv_wo32(base, 0xac, 0x0000001f);
243 nv_wo32(base, 0xb8, 0xf8000000);
244 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
245 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
246 bar->flush(bar);
247 return 0;
248}
249
250static int
251gf100_fifo_chan_init(struct nvkm_object *object)
252{
253 struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
254 struct gf100_fifo_priv *priv = (void *)object->engine;
255 struct gf100_fifo_chan *chan = (void *)object;
256 u32 chid = chan->base.chid;
257 int ret;
258
259 ret = nvkm_fifo_channel_init(&chan->base);
260 if (ret)
261 return ret;
262
263 nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
264
265 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
266 nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
267 gf100_fifo_runlist_update(priv);
268 }
269
270 return 0;
271}
272
273static void gf100_fifo_intr_engine(struct gf100_fifo_priv *priv);
274
275static int
276gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
277{
278 struct gf100_fifo_priv *priv = (void *)object->engine;
279 struct gf100_fifo_chan *chan = (void *)object;
280 u32 chid = chan->base.chid;
281
282 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
283 nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
284 gf100_fifo_runlist_update(priv);
285 } 67 }
68 nvkm_done(cur);
286 69
287 gf100_fifo_intr_engine(priv); 70 nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
288 71 nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
289 nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
290 return nvkm_fifo_channel_fini(&chan->base, suspend);
291}
292
293static struct nvkm_ofuncs
294gf100_fifo_ofuncs = {
295 .ctor = gf100_fifo_chan_ctor,
296 .dtor = _nvkm_fifo_channel_dtor,
297 .init = gf100_fifo_chan_init,
298 .fini = gf100_fifo_chan_fini,
299 .map = _nvkm_fifo_channel_map,
300 .rd32 = _nvkm_fifo_channel_rd32,
301 .wr32 = _nvkm_fifo_channel_wr32,
302 .ntfy = _nvkm_fifo_channel_ntfy
303};
304
305static struct nvkm_oclass
306gf100_fifo_sclass[] = {
307 { FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
308 {}
309};
310
311/*******************************************************************************
312 * FIFO context - instmem heap and vm setup
313 ******************************************************************************/
314
315static int
316gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
317 struct nvkm_oclass *oclass, void *data, u32 size,
318 struct nvkm_object **pobject)
319{
320 struct gf100_fifo_base *base;
321 int ret;
322
323 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
324 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
325 NVOBJ_FLAG_HEAP, &base);
326 *pobject = nv_object(base);
327 if (ret)
328 return ret;
329
330 ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
331 &base->pgd);
332 if (ret)
333 return ret;
334
335 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
336 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
337 nv_wo32(base, 0x0208, 0xffffffff);
338 nv_wo32(base, 0x020c, 0x000000ff);
339 72
340 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); 73 if (wait_event_timeout(fifo->runlist.wait,
341 if (ret) 74 !(nvkm_rd32(device, 0x00227c) & 0x00100000),
342 return ret; 75 msecs_to_jiffies(2000)) == 0)
343 76 nvkm_error(subdev, "runlist update timeout\n");
344 return 0; 77 mutex_unlock(&subdev->mutex);
345}
346
347static void
348gf100_fifo_context_dtor(struct nvkm_object *object)
349{
350 struct gf100_fifo_base *base = (void *)object;
351 nvkm_vm_ref(NULL, &base->vm, base->pgd);
352 nvkm_gpuobj_ref(NULL, &base->pgd);
353 nvkm_fifo_context_destroy(&base->base);
354} 78}
355 79
356static struct nvkm_oclass
357gf100_fifo_cclass = {
358 .handle = NV_ENGCTX(FIFO, 0xc0),
359 .ofuncs = &(struct nvkm_ofuncs) {
360 .ctor = gf100_fifo_context_ctor,
361 .dtor = gf100_fifo_context_dtor,
362 .init = _nvkm_fifo_context_init,
363 .fini = _nvkm_fifo_context_fini,
364 .rd32 = _nvkm_fifo_context_rd32,
365 .wr32 = _nvkm_fifo_context_wr32,
366 },
367};
368
369/*******************************************************************************
370 * PFIFO engine
371 ******************************************************************************/
372
373static inline int 80static inline int
374gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn) 81gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
375{ 82{
376 switch (engn) { 83 switch (engn) {
377 case NVDEV_ENGINE_GR : engn = 0; break; 84 case NVKM_ENGINE_GR : engn = 0; break;
378 case NVDEV_ENGINE_MSVLD : engn = 1; break; 85 case NVKM_ENGINE_MSVLD : engn = 1; break;
379 case NVDEV_ENGINE_MSPPP : engn = 2; break; 86 case NVKM_ENGINE_MSPPP : engn = 2; break;
380 case NVDEV_ENGINE_MSPDEC: engn = 3; break; 87 case NVKM_ENGINE_MSPDEC: engn = 3; break;
381 case NVDEV_ENGINE_CE0 : engn = 4; break; 88 case NVKM_ENGINE_CE0 : engn = 4; break;
382 case NVDEV_ENGINE_CE1 : engn = 5; break; 89 case NVKM_ENGINE_CE1 : engn = 5; break;
383 default: 90 default:
384 return -1; 91 return -1;
385 } 92 }
@@ -388,95 +95,73 @@ gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
388} 95}
389 96
390static inline struct nvkm_engine * 97static inline struct nvkm_engine *
391gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn) 98gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
392{ 99{
100 struct nvkm_device *device = fifo->base.engine.subdev.device;
101
393 switch (engn) { 102 switch (engn) {
394 case 0: engn = NVDEV_ENGINE_GR; break; 103 case 0: engn = NVKM_ENGINE_GR; break;
395 case 1: engn = NVDEV_ENGINE_MSVLD; break; 104 case 1: engn = NVKM_ENGINE_MSVLD; break;
396 case 2: engn = NVDEV_ENGINE_MSPPP; break; 105 case 2: engn = NVKM_ENGINE_MSPPP; break;
397 case 3: engn = NVDEV_ENGINE_MSPDEC; break; 106 case 3: engn = NVKM_ENGINE_MSPDEC; break;
398 case 4: engn = NVDEV_ENGINE_CE0; break; 107 case 4: engn = NVKM_ENGINE_CE0; break;
399 case 5: engn = NVDEV_ENGINE_CE1; break; 108 case 5: engn = NVKM_ENGINE_CE1; break;
400 default: 109 default:
401 return NULL; 110 return NULL;
402 } 111 }
403 112
404 return nvkm_engine(priv, engn); 113 return nvkm_device_engine(device, engn);
405} 114}
406 115
407static void 116static void
408gf100_fifo_recover_work(struct work_struct *work) 117gf100_fifo_recover_work(struct work_struct *work)
409{ 118{
410 struct gf100_fifo_priv *priv = container_of(work, typeof(*priv), fault); 119 struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
411 struct nvkm_object *engine; 120 struct nvkm_device *device = fifo->base.engine.subdev.device;
121 struct nvkm_engine *engine;
412 unsigned long flags; 122 unsigned long flags;
413 u32 engn, engm = 0; 123 u32 engn, engm = 0;
414 u64 mask, todo; 124 u64 mask, todo;
415 125
416 spin_lock_irqsave(&priv->base.lock, flags); 126 spin_lock_irqsave(&fifo->base.lock, flags);
417 mask = priv->mask; 127 mask = fifo->mask;
418 priv->mask = 0ULL; 128 fifo->mask = 0ULL;
419 spin_unlock_irqrestore(&priv->base.lock, flags); 129 spin_unlock_irqrestore(&fifo->base.lock, flags);
420 130
421 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) 131 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
422 engm |= 1 << gf100_fifo_engidx(priv, engn); 132 engm |= 1 << gf100_fifo_engidx(fifo, engn);
423 nv_mask(priv, 0x002630, engm, engm); 133 nvkm_mask(device, 0x002630, engm, engm);
424 134
425 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { 135 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
426 if ((engine = (void *)nvkm_engine(priv, engn))) { 136 if ((engine = nvkm_device_engine(device, engn))) {
427 nv_ofuncs(engine)->fini(engine, false); 137 nvkm_subdev_fini(&engine->subdev, false);
428 WARN_ON(nv_ofuncs(engine)->init(engine)); 138 WARN_ON(nvkm_subdev_init(&engine->subdev));
429 } 139 }
430 } 140 }
431 141
432 gf100_fifo_runlist_update(priv); 142 gf100_fifo_runlist_update(fifo);
433 nv_wr32(priv, 0x00262c, engm); 143 nvkm_wr32(device, 0x00262c, engm);
434 nv_mask(priv, 0x002630, engm, 0x00000000); 144 nvkm_mask(device, 0x002630, engm, 0x00000000);
435} 145}
436 146
437static void 147static void
438gf100_fifo_recover(struct gf100_fifo_priv *priv, struct nvkm_engine *engine, 148gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
439 struct gf100_fifo_chan *chan) 149 struct gf100_fifo_chan *chan)
440{ 150{
151 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
152 struct nvkm_device *device = subdev->device;
441 u32 chid = chan->base.chid; 153 u32 chid = chan->base.chid;
442 unsigned long flags;
443 154
444 nv_error(priv, "%s engine fault on channel %d, recovering...\n", 155 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
445 nv_subdev(engine)->name, chid); 156 nvkm_subdev_name[engine->subdev.index], chid);
157 assert_spin_locked(&fifo->base.lock);
446 158
447 nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); 159 nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
448 chan->state = KILLED; 160 list_del_init(&chan->head);
161 chan->killed = true;
449 162
450 spin_lock_irqsave(&priv->base.lock, flags); 163 fifo->mask |= 1ULL << engine->subdev.index;
451 priv->mask |= 1ULL << nv_engidx(engine); 164 schedule_work(&fifo->fault);
452 spin_unlock_irqrestore(&priv->base.lock, flags);
453 schedule_work(&priv->fault);
454}
455
456static int
457gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
458{
459 struct gf100_fifo_chan *chan = NULL;
460 struct nvkm_handle *bind;
461 unsigned long flags;
462 int ret = -EINVAL;
463
464 spin_lock_irqsave(&priv->base.lock, flags);
465 if (likely(chid >= priv->base.min && chid <= priv->base.max))
466 chan = (void *)priv->base.channel[chid];
467 if (unlikely(!chan))
468 goto out;
469
470 bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
471 if (likely(bind)) {
472 if (!mthd || !nv_call(bind->object, mthd, data))
473 ret = 0;
474 nvkm_namedb_put(bind);
475 }
476
477out:
478 spin_unlock_irqrestore(&priv->base.lock, flags);
479 return ret;
480} 165}
481 166
482static const struct nvkm_enum 167static const struct nvkm_enum
@@ -486,14 +171,17 @@ gf100_fifo_sched_reason[] = {
486}; 171};
487 172
488static void 173static void
489gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv) 174gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
490{ 175{
176 struct nvkm_device *device = fifo->base.engine.subdev.device;
491 struct nvkm_engine *engine; 177 struct nvkm_engine *engine;
492 struct gf100_fifo_chan *chan; 178 struct gf100_fifo_chan *chan;
179 unsigned long flags;
493 u32 engn; 180 u32 engn;
494 181
182 spin_lock_irqsave(&fifo->base.lock, flags);
495 for (engn = 0; engn < 6; engn++) { 183 for (engn = 0; engn < 6; engn++) {
496 u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04)); 184 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
497 u32 busy = (stat & 0x80000000); 185 u32 busy = (stat & 0x80000000);
498 u32 save = (stat & 0x00100000); /* maybe? */ 186 u32 save = (stat & 0x00100000); /* maybe? */
499 u32 unk0 = (stat & 0x00040000); 187 u32 unk0 = (stat & 0x00040000);
@@ -502,32 +190,36 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
502 (void)save; 190 (void)save;
503 191
504 if (busy && unk0 && unk1) { 192 if (busy && unk0 && unk1) {
505 if (!(chan = (void *)priv->base.channel[chid])) 193 list_for_each_entry(chan, &fifo->chan, head) {
506 continue; 194 if (chan->base.chid == chid) {
507 if (!(engine = gf100_fifo_engine(priv, engn))) 195 engine = gf100_fifo_engine(fifo, engn);
508 continue; 196 if (!engine)
509 gf100_fifo_recover(priv, engine, chan); 197 break;
198 gf100_fifo_recover(fifo, engine, chan);
199 break;
200 }
201 }
510 } 202 }
511 } 203 }
204 spin_unlock_irqrestore(&fifo->base.lock, flags);
512} 205}
513 206
514static void 207static void
515gf100_fifo_intr_sched(struct gf100_fifo_priv *priv) 208gf100_fifo_intr_sched(struct gf100_fifo *fifo)
516{ 209{
517 u32 intr = nv_rd32(priv, 0x00254c); 210 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
211 struct nvkm_device *device = subdev->device;
212 u32 intr = nvkm_rd32(device, 0x00254c);
518 u32 code = intr & 0x000000ff; 213 u32 code = intr & 0x000000ff;
519 const struct nvkm_enum *en; 214 const struct nvkm_enum *en;
520 char enunk[6] = "";
521 215
522 en = nvkm_enum_find(gf100_fifo_sched_reason, code); 216 en = nvkm_enum_find(gf100_fifo_sched_reason, code);
523 if (!en)
524 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
525 217
526 nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk); 218 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
527 219
528 switch (code) { 220 switch (code) {
529 case 0x0a: 221 case 0x0a:
530 gf100_fifo_intr_sched_ctxsw(priv); 222 gf100_fifo_intr_sched_ctxsw(fifo);
531 break; 223 break;
532 default: 224 default:
533 break; 225 break;
@@ -536,17 +228,17 @@ gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
536 228
537static const struct nvkm_enum 229static const struct nvkm_enum
538gf100_fifo_fault_engine[] = { 230gf100_fifo_fault_engine[] = {
539 { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR }, 231 { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
540 { 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB }, 232 { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
541 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR }, 233 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
542 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM }, 234 { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
543 { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO }, 235 { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
544 { 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD }, 236 { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
545 { 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP }, 237 { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
546 { 0x13, "PCOUNTER" }, 238 { 0x13, "PCOUNTER" },
547 { 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC }, 239 { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
548 { 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 }, 240 { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
549 { 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 }, 241 { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
550 { 0x17, "PDAEMON" }, 242 { 0x17, "PDAEMON" },
551 {} 243 {}
552}; 244};
@@ -594,79 +286,65 @@ gf100_fifo_fault_gpcclient[] = {
594}; 286};
595 287
596static void 288static void
597gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit) 289gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
598{ 290{
599 u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); 291 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
600 u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); 292 struct nvkm_device *device = subdev->device;
601 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); 293 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
602 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); 294 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
295 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
296 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
603 u32 gpc = (stat & 0x1f000000) >> 24; 297 u32 gpc = (stat & 0x1f000000) >> 24;
604 u32 client = (stat & 0x00001f00) >> 8; 298 u32 client = (stat & 0x00001f00) >> 8;
605 u32 write = (stat & 0x00000080); 299 u32 write = (stat & 0x00000080);
606 u32 hub = (stat & 0x00000040); 300 u32 hub = (stat & 0x00000040);
607 u32 reason = (stat & 0x0000000f); 301 u32 reason = (stat & 0x0000000f);
608 struct nvkm_object *engctx = NULL, *object;
609 struct nvkm_engine *engine = NULL;
610 const struct nvkm_enum *er, *eu, *ec; 302 const struct nvkm_enum *er, *eu, *ec;
611 char erunk[6] = ""; 303 struct nvkm_engine *engine = NULL;
612 char euunk[6] = ""; 304 struct nvkm_fifo_chan *chan;
613 char ecunk[6] = ""; 305 unsigned long flags;
614 char gpcid[3] = ""; 306 char gpcid[8] = "";
615 307
616 er = nvkm_enum_find(gf100_fifo_fault_reason, reason); 308 er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
617 if (!er)
618 snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
619
620 eu = nvkm_enum_find(gf100_fifo_fault_engine, unit); 309 eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
310 if (hub) {
311 ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
312 } else {
313 ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
314 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
315 }
316
621 if (eu) { 317 if (eu) {
622 switch (eu->data2) { 318 switch (eu->data2) {
623 case NVDEV_SUBDEV_BAR: 319 case NVKM_SUBDEV_BAR:
624 nv_mask(priv, 0x001704, 0x00000000, 0x00000000); 320 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
625 break; 321 break;
626 case NVDEV_SUBDEV_INSTMEM: 322 case NVKM_SUBDEV_INSTMEM:
627 nv_mask(priv, 0x001714, 0x00000000, 0x00000000); 323 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
628 break; 324 break;
629 case NVDEV_ENGINE_IFB: 325 case NVKM_ENGINE_IFB:
630 nv_mask(priv, 0x001718, 0x00000000, 0x00000000); 326 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
631 break; 327 break;
632 default: 328 default:
633 engine = nvkm_engine(priv, eu->data2); 329 engine = nvkm_device_engine(device, eu->data2);
634 if (engine)
635 engctx = nvkm_engctx_get(engine, inst);
636 break; 330 break;
637 } 331 }
638 } else {
639 snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
640 } 332 }
641 333
642 if (hub) { 334 chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
643 ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
644 } else {
645 ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
646 snprintf(gpcid, sizeof(gpcid), "%d", gpc);
647 }
648 335
649 if (!ec) 336 nvkm_error(subdev,
650 snprintf(ecunk, sizeof(ecunk), "UNK%02x", client); 337 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
651 338 "reason %02x [%s] on channel %d [%010llx %s]\n",
652 nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on " 339 write ? "write" : "read", (u64)vahi << 32 | valo,
653 "channel 0x%010llx [%s]\n", write ? "write" : "read", 340 unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
654 (u64)vahi << 32 | valo, er ? er->name : erunk, 341 reason, er ? er->name : "", chan ? chan->chid : -1,
655 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/", 342 (u64)inst << 12,
656 ec ? ec->name : ecunk, (u64)inst << 12, 343 chan ? chan->object.client->name : "unknown");
657 nvkm_client_name(engctx));
658
659 object = engctx;
660 while (object) {
661 switch (nv_mclass(object)) {
662 case FERMI_CHANNEL_GPFIFO:
663 gf100_fifo_recover(priv, engine, (void *)object);
664 break;
665 }
666 object = object->parent;
667 }
668 344
669 nvkm_engctx_put(engctx); 345 if (engine && chan)
346 gf100_fifo_recover(fifo, engine, (void *)chan);
347 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
670} 348}
671 349
672static const struct nvkm_bitfield 350static const struct nvkm_bitfield
@@ -678,290 +356,288 @@ gf100_fifo_pbdma_intr[] = {
678}; 356};
679 357
680static void 358static void
681gf100_fifo_intr_pbdma(struct gf100_fifo_priv *priv, int unit) 359gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
682{ 360{
683 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); 361 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
684 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); 362 struct nvkm_device *device = subdev->device;
685 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); 363 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
686 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f; 364 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
365 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
366 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
687 u32 subc = (addr & 0x00070000) >> 16; 367 u32 subc = (addr & 0x00070000) >> 16;
688 u32 mthd = (addr & 0x00003ffc); 368 u32 mthd = (addr & 0x00003ffc);
689 u32 show = stat; 369 struct nvkm_fifo_chan *chan;
370 unsigned long flags;
371 u32 show= stat;
372 char msg[128];
690 373
691 if (stat & 0x00800000) { 374 if (stat & 0x00800000) {
692 if (!gf100_fifo_swmthd(priv, chid, mthd, data)) 375 if (device->sw) {
693 show &= ~0x00800000; 376 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
377 show &= ~0x00800000;
378 }
694 } 379 }
695 380
696 if (show) { 381 if (show) {
697 nv_error(priv, "PBDMA%d:", unit); 382 nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
698 nvkm_bitfield_print(gf100_fifo_pbdma_intr, show); 383 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
699 pr_cont("\n"); 384 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
700 nv_error(priv, 385 "subc %d mthd %04x data %08x\n",
701 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 386 unit, show, msg, chid, chan ? chan->inst->addr : 0,
702 unit, chid, 387 chan ? chan->object.client->name : "unknown",
703 nvkm_client_name_for_fifo_chid(&priv->base, chid), 388 subc, mthd, data);
704 subc, mthd, data); 389 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
705 } 390 }
706 391
707 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); 392 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
708 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); 393 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
709} 394}
710 395
711static void 396static void
712gf100_fifo_intr_runlist(struct gf100_fifo_priv *priv) 397gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
713{ 398{
714 u32 intr = nv_rd32(priv, 0x002a00); 399 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
400 struct nvkm_device *device = subdev->device;
401 u32 intr = nvkm_rd32(device, 0x002a00);
715 402
716 if (intr & 0x10000000) { 403 if (intr & 0x10000000) {
717 wake_up(&priv->runlist.wait); 404 wake_up(&fifo->runlist.wait);
718 nv_wr32(priv, 0x002a00, 0x10000000); 405 nvkm_wr32(device, 0x002a00, 0x10000000);
719 intr &= ~0x10000000; 406 intr &= ~0x10000000;
720 } 407 }
721 408
722 if (intr) { 409 if (intr) {
723 nv_error(priv, "RUNLIST 0x%08x\n", intr); 410 nvkm_error(subdev, "RUNLIST %08x\n", intr);
724 nv_wr32(priv, 0x002a00, intr); 411 nvkm_wr32(device, 0x002a00, intr);
725 } 412 }
726} 413}
727 414
728static void 415static void
729gf100_fifo_intr_engine_unit(struct gf100_fifo_priv *priv, int engn) 416gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
730{ 417{
731 u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04)); 418 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
732 u32 inte = nv_rd32(priv, 0x002628); 419 struct nvkm_device *device = subdev->device;
420 u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
421 u32 inte = nvkm_rd32(device, 0x002628);
733 u32 unkn; 422 u32 unkn;
734 423
735 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr); 424 nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
736 425
737 for (unkn = 0; unkn < 8; unkn++) { 426 for (unkn = 0; unkn < 8; unkn++) {
738 u32 ints = (intr >> (unkn * 0x04)) & inte; 427 u32 ints = (intr >> (unkn * 0x04)) & inte;
739 if (ints & 0x1) { 428 if (ints & 0x1) {
740 nvkm_fifo_uevent(&priv->base); 429 nvkm_fifo_uevent(&fifo->base);
741 ints &= ~1; 430 ints &= ~1;
742 } 431 }
743 if (ints) { 432 if (ints) {
744 nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints); 433 nvkm_error(subdev, "ENGINE %d %d %01x",
745 nv_mask(priv, 0x002628, ints, 0); 434 engn, unkn, ints);
435 nvkm_mask(device, 0x002628, ints, 0);
746 } 436 }
747 } 437 }
748} 438}
749 439
750static void 440void
751gf100_fifo_intr_engine(struct gf100_fifo_priv *priv) 441gf100_fifo_intr_engine(struct gf100_fifo *fifo)
752{ 442{
753 u32 mask = nv_rd32(priv, 0x0025a4); 443 struct nvkm_device *device = fifo->base.engine.subdev.device;
444 u32 mask = nvkm_rd32(device, 0x0025a4);
754 while (mask) { 445 while (mask) {
755 u32 unit = __ffs(mask); 446 u32 unit = __ffs(mask);
756 gf100_fifo_intr_engine_unit(priv, unit); 447 gf100_fifo_intr_engine_unit(fifo, unit);
757 mask &= ~(1 << unit); 448 mask &= ~(1 << unit);
758 } 449 }
759} 450}
760 451
761static void 452static void
762gf100_fifo_intr(struct nvkm_subdev *subdev) 453gf100_fifo_intr(struct nvkm_fifo *base)
763{ 454{
764 struct gf100_fifo_priv *priv = (void *)subdev; 455 struct gf100_fifo *fifo = gf100_fifo(base);
765 u32 mask = nv_rd32(priv, 0x002140); 456 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
766 u32 stat = nv_rd32(priv, 0x002100) & mask; 457 struct nvkm_device *device = subdev->device;
458 u32 mask = nvkm_rd32(device, 0x002140);
459 u32 stat = nvkm_rd32(device, 0x002100) & mask;
767 460
768 if (stat & 0x00000001) { 461 if (stat & 0x00000001) {
769 u32 intr = nv_rd32(priv, 0x00252c); 462 u32 intr = nvkm_rd32(device, 0x00252c);
770 nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr); 463 nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
771 nv_wr32(priv, 0x002100, 0x00000001); 464 nvkm_wr32(device, 0x002100, 0x00000001);
772 stat &= ~0x00000001; 465 stat &= ~0x00000001;
773 } 466 }
774 467
775 if (stat & 0x00000100) { 468 if (stat & 0x00000100) {
776 gf100_fifo_intr_sched(priv); 469 gf100_fifo_intr_sched(fifo);
777 nv_wr32(priv, 0x002100, 0x00000100); 470 nvkm_wr32(device, 0x002100, 0x00000100);
778 stat &= ~0x00000100; 471 stat &= ~0x00000100;
779 } 472 }
780 473
781 if (stat & 0x00010000) { 474 if (stat & 0x00010000) {
782 u32 intr = nv_rd32(priv, 0x00256c); 475 u32 intr = nvkm_rd32(device, 0x00256c);
783 nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr); 476 nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
784 nv_wr32(priv, 0x002100, 0x00010000); 477 nvkm_wr32(device, 0x002100, 0x00010000);
785 stat &= ~0x00010000; 478 stat &= ~0x00010000;
786 } 479 }
787 480
788 if (stat & 0x01000000) { 481 if (stat & 0x01000000) {
789 u32 intr = nv_rd32(priv, 0x00258c); 482 u32 intr = nvkm_rd32(device, 0x00258c);
790 nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr); 483 nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
791 nv_wr32(priv, 0x002100, 0x01000000); 484 nvkm_wr32(device, 0x002100, 0x01000000);
792 stat &= ~0x01000000; 485 stat &= ~0x01000000;
793 } 486 }
794 487
795 if (stat & 0x10000000) { 488 if (stat & 0x10000000) {
796 u32 mask = nv_rd32(priv, 0x00259c); 489 u32 mask = nvkm_rd32(device, 0x00259c);
797 while (mask) { 490 while (mask) {
798 u32 unit = __ffs(mask); 491 u32 unit = __ffs(mask);
799 gf100_fifo_intr_fault(priv, unit); 492 gf100_fifo_intr_fault(fifo, unit);
800 nv_wr32(priv, 0x00259c, (1 << unit)); 493 nvkm_wr32(device, 0x00259c, (1 << unit));
801 mask &= ~(1 << unit); 494 mask &= ~(1 << unit);
802 } 495 }
803 stat &= ~0x10000000; 496 stat &= ~0x10000000;
804 } 497 }
805 498
806 if (stat & 0x20000000) { 499 if (stat & 0x20000000) {
807 u32 mask = nv_rd32(priv, 0x0025a0); 500 u32 mask = nvkm_rd32(device, 0x0025a0);
808 while (mask) { 501 while (mask) {
809 u32 unit = __ffs(mask); 502 u32 unit = __ffs(mask);
810 gf100_fifo_intr_pbdma(priv, unit); 503 gf100_fifo_intr_pbdma(fifo, unit);
811 nv_wr32(priv, 0x0025a0, (1 << unit)); 504 nvkm_wr32(device, 0x0025a0, (1 << unit));
812 mask &= ~(1 << unit); 505 mask &= ~(1 << unit);
813 } 506 }
814 stat &= ~0x20000000; 507 stat &= ~0x20000000;
815 } 508 }
816 509
817 if (stat & 0x40000000) { 510 if (stat & 0x40000000) {
818 gf100_fifo_intr_runlist(priv); 511 gf100_fifo_intr_runlist(fifo);
819 stat &= ~0x40000000; 512 stat &= ~0x40000000;
820 } 513 }
821 514
822 if (stat & 0x80000000) { 515 if (stat & 0x80000000) {
823 gf100_fifo_intr_engine(priv); 516 gf100_fifo_intr_engine(fifo);
824 stat &= ~0x80000000; 517 stat &= ~0x80000000;
825 } 518 }
826 519
827 if (stat) { 520 if (stat) {
828 nv_error(priv, "INTR 0x%08x\n", stat); 521 nvkm_error(subdev, "INTR %08x\n", stat);
829 nv_mask(priv, 0x002140, stat, 0x00000000); 522 nvkm_mask(device, 0x002140, stat, 0x00000000);
830 nv_wr32(priv, 0x002100, stat); 523 nvkm_wr32(device, 0x002100, stat);
831 } 524 }
832} 525}
833 526
834static void
835gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
836{
837 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
838 nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
839}
840
841static void
842gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
843{
844 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
845 nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
846}
847
848static const struct nvkm_event_func
849gf100_fifo_uevent_func = {
850 .ctor = nvkm_fifo_uevent_ctor,
851 .init = gf100_fifo_uevent_init,
852 .fini = gf100_fifo_uevent_fini,
853};
854
855static int 527static int
856gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 528gf100_fifo_oneinit(struct nvkm_fifo *base)
857 struct nvkm_oclass *oclass, void *data, u32 size,
858 struct nvkm_object **pobject)
859{ 529{
860 struct gf100_fifo_priv *priv; 530 struct gf100_fifo *fifo = gf100_fifo(base);
531 struct nvkm_device *device = fifo->base.engine.subdev.device;
861 int ret; 532 int ret;
862 533
863 ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &priv); 534 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
864 *pobject = nv_object(priv); 535 false, &fifo->runlist.mem[0]);
865 if (ret)
866 return ret;
867
868 INIT_WORK(&priv->fault, gf100_fifo_recover_work);
869
870 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
871 &priv->runlist.mem[0]);
872 if (ret) 536 if (ret)
873 return ret; 537 return ret;
874 538
875 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, 539 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
876 &priv->runlist.mem[1]); 540 false, &fifo->runlist.mem[1]);
877 if (ret) 541 if (ret)
878 return ret; 542 return ret;
879 543
880 init_waitqueue_head(&priv->runlist.wait); 544 init_waitqueue_head(&fifo->runlist.wait);
881 545
882 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0, 546 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
883 &priv->user.mem); 547 0x1000, false, &fifo->user.mem);
884 if (ret) 548 if (ret)
885 return ret; 549 return ret;
886 550
887 ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, 551 ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
888 &priv->user.bar);
889 if (ret) 552 if (ret)
890 return ret; 553 return ret;
891 554
892 ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &priv->base.uevent); 555 nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
893 if (ret)
894 return ret;
895
896 nv_subdev(priv)->unit = 0x00000100;
897 nv_subdev(priv)->intr = gf100_fifo_intr;
898 nv_engine(priv)->cclass = &gf100_fifo_cclass;
899 nv_engine(priv)->sclass = gf100_fifo_sclass;
900 return 0; 556 return 0;
901} 557}
902 558
903static void 559static void
904gf100_fifo_dtor(struct nvkm_object *object) 560gf100_fifo_fini(struct nvkm_fifo *base)
905{ 561{
906 struct gf100_fifo_priv *priv = (void *)object; 562 struct gf100_fifo *fifo = gf100_fifo(base);
907 563 flush_work(&fifo->fault);
908 nvkm_gpuobj_unmap(&priv->user.bar);
909 nvkm_gpuobj_ref(NULL, &priv->user.mem);
910 nvkm_gpuobj_ref(NULL, &priv->runlist.mem[0]);
911 nvkm_gpuobj_ref(NULL, &priv->runlist.mem[1]);
912
913 nvkm_fifo_destroy(&priv->base);
914} 564}
915 565
916static int 566static void
917gf100_fifo_init(struct nvkm_object *object) 567gf100_fifo_init(struct nvkm_fifo *base)
918{ 568{
919 struct gf100_fifo_priv *priv = (void *)object; 569 struct gf100_fifo *fifo = gf100_fifo(base);
920 int ret, i; 570 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
921 571 struct nvkm_device *device = subdev->device;
922 ret = nvkm_fifo_init(&priv->base); 572 int i;
923 if (ret)
924 return ret;
925 573
926 nv_wr32(priv, 0x000204, 0xffffffff); 574 nvkm_wr32(device, 0x000204, 0xffffffff);
927 nv_wr32(priv, 0x002204, 0xffffffff); 575 nvkm_wr32(device, 0x002204, 0xffffffff);
928 576
929 priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204)); 577 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
930 nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr); 578 nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
931 579
932 /* assign engines to PBDMAs */ 580 /* assign engines to PBDMAs */
933 if (priv->spoon_nr >= 3) { 581 if (fifo->spoon_nr >= 3) {
934 nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */ 582 nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
935 nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */ 583 nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
936 nv_wr32(priv, 0x002210, ~(1 << 1)); /* PMSPP */ 584 nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
937 nv_wr32(priv, 0x002214, ~(1 << 1)); /* PMSVLD */ 585 nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
938 nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */ 586 nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
939 nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */ 587 nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
940 } 588 }
941 589
942 /* PBDMA[n] */ 590 /* PBDMA[n] */
943 for (i = 0; i < priv->spoon_nr; i++) { 591 for (i = 0; i < fifo->spoon_nr; i++) {
944 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 592 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
945 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 593 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
946 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 594 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
947 } 595 }
948 596
949 nv_mask(priv, 0x002200, 0x00000001, 0x00000001); 597 nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
950 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); 598 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
951 599
952 nv_wr32(priv, 0x002100, 0xffffffff); 600 nvkm_wr32(device, 0x002100, 0xffffffff);
953 nv_wr32(priv, 0x002140, 0x7fffffff); 601 nvkm_wr32(device, 0x002140, 0x7fffffff);
954 nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */ 602 nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
955 return 0; 603}
604
605static void *
606gf100_fifo_dtor(struct nvkm_fifo *base)
607{
608 struct gf100_fifo *fifo = gf100_fifo(base);
609 nvkm_vm_put(&fifo->user.bar);
610 nvkm_memory_del(&fifo->user.mem);
611 nvkm_memory_del(&fifo->runlist.mem[0]);
612 nvkm_memory_del(&fifo->runlist.mem[1]);
613 return fifo;
956} 614}
957 615
958struct nvkm_oclass * 616static const struct nvkm_fifo_func
959gf100_fifo_oclass = &(struct nvkm_oclass) { 617gf100_fifo = {
960 .handle = NV_ENGINE(FIFO, 0xc0), 618 .dtor = gf100_fifo_dtor,
961 .ofuncs = &(struct nvkm_ofuncs) { 619 .oneinit = gf100_fifo_oneinit,
962 .ctor = gf100_fifo_ctor, 620 .init = gf100_fifo_init,
963 .dtor = gf100_fifo_dtor, 621 .fini = gf100_fifo_fini,
964 .init = gf100_fifo_init, 622 .intr = gf100_fifo_intr,
965 .fini = _nvkm_fifo_fini, 623 .uevent_init = gf100_fifo_uevent_init,
624 .uevent_fini = gf100_fifo_uevent_fini,
625 .chan = {
626 &gf100_fifo_gpfifo_oclass,
627 NULL
966 }, 628 },
967}; 629};
630
631int
632gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
633{
634 struct gf100_fifo *fifo;
635
636 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
637 return -ENOMEM;
638 INIT_LIST_HEAD(&fifo->chan);
639 INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
640 *pfifo = &fifo->base;
641
642 return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
643}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
new file mode 100644
index 000000000000..c649ca9b53e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -0,0 +1,31 @@
1#ifndef __GF100_FIFO_H__
2#define __GF100_FIFO_H__
3#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
4#include "priv.h"
5
6#include <subdev/mmu.h>
7
8struct gf100_fifo {
9 struct nvkm_fifo base;
10
11 struct list_head chan;
12
13 struct work_struct fault;
14 u64 mask;
15
16 struct {
17 struct nvkm_memory *mem[2];
18 int active;
19 wait_queue_head_t wait;
20 } runlist;
21
22 struct {
23 struct nvkm_memory *mem;
24 struct nvkm_vma bar;
25 } user;
26 int spoon_nr;
27};
28
29void gf100_fifo_intr_engine(struct gf100_fifo *);
30void gf100_fifo_runlist_update(struct gf100_fifo *);
31#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index e10f9644140f..98970a0b7a66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -22,486 +22,121 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gk104.h" 24#include "gk104.h"
25#include "changk104.h"
25 26
26#include <core/client.h> 27#include <core/client.h>
27#include <core/engctx.h>
28#include <core/enum.h> 28#include <core/enum.h>
29#include <core/handle.h> 29#include <core/gpuobj.h>
30#include <subdev/bar.h> 30#include <subdev/bar.h>
31#include <subdev/fb.h> 31#include <engine/sw.h>
32#include <subdev/mmu.h>
33#include <subdev/timer.h>
34 32
35#include <nvif/class.h> 33#include <nvif/class.h>
36#include <nvif/unpack.h>
37
38#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
39static const struct {
40 u64 subdev;
41 u64 mask;
42} fifo_engine[] = {
43 _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
44 (1ULL << NVDEV_ENGINE_CE2)),
45 _(NVDEV_ENGINE_MSPDEC , 0),
46 _(NVDEV_ENGINE_MSPPP , 0),
47 _(NVDEV_ENGINE_MSVLD , 0),
48 _(NVDEV_ENGINE_CE0 , 0),
49 _(NVDEV_ENGINE_CE1 , 0),
50 _(NVDEV_ENGINE_MSENC , 0),
51};
52#undef _
53#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
54
55struct gk104_fifo_engn {
56 struct nvkm_gpuobj *runlist[2];
57 int cur_runlist;
58 wait_queue_head_t wait;
59};
60
61struct gk104_fifo_priv {
62 struct nvkm_fifo base;
63
64 struct work_struct fault;
65 u64 mask;
66
67 struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
68 struct {
69 struct nvkm_gpuobj *mem;
70 struct nvkm_vma bar;
71 } user;
72 int spoon_nr;
73};
74
75struct gk104_fifo_base {
76 struct nvkm_fifo_base base;
77 struct nvkm_gpuobj *pgd;
78 struct nvkm_vm *vm;
79};
80
81struct gk104_fifo_chan {
82 struct nvkm_fifo_chan base;
83 u32 engine;
84 enum {
85 STOPPED,
86 RUNNING,
87 KILLED
88 } state;
89};
90
91/*******************************************************************************
92 * FIFO channel objects
93 ******************************************************************************/
94
95static void
96gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine)
97{
98 struct nvkm_bar *bar = nvkm_bar(priv);
99 struct gk104_fifo_engn *engn = &priv->engine[engine];
100 struct nvkm_gpuobj *cur;
101 int i, p;
102
103 mutex_lock(&nv_subdev(priv)->mutex);
104 cur = engn->runlist[engn->cur_runlist];
105 engn->cur_runlist = !engn->cur_runlist;
106
107 for (i = 0, p = 0; i < priv->base.max; i++) {
108 struct gk104_fifo_chan *chan = (void *)priv->base.channel[i];
109 if (chan && chan->state == RUNNING && chan->engine == engine) {
110 nv_wo32(cur, p + 0, i);
111 nv_wo32(cur, p + 4, 0x00000000);
112 p += 8;
113 }
114 }
115 bar->flush(bar);
116
117 nv_wr32(priv, 0x002270, cur->addr >> 12);
118 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
119
120 if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
121 (engine * 0x08)) & 0x00100000),
122 msecs_to_jiffies(2000)) == 0)
123 nv_error(priv, "runlist %d update timeout\n", engine);
124 mutex_unlock(&nv_subdev(priv)->mutex);
125}
126 34
127static int 35void
128gk104_fifo_context_attach(struct nvkm_object *parent, 36gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
129 struct nvkm_object *object)
130{ 37{
131 struct nvkm_bar *bar = nvkm_bar(parent); 38 struct nvkm_device *device = fifo->engine.subdev.device;
132 struct gk104_fifo_base *base = (void *)parent->parent; 39 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
133 struct nvkm_engctx *ectx = (void *)object;
134 u32 addr;
135 int ret;
136
137 switch (nv_engidx(object->engine)) {
138 case NVDEV_ENGINE_SW :
139 return 0;
140 case NVDEV_ENGINE_CE0:
141 case NVDEV_ENGINE_CE1:
142 case NVDEV_ENGINE_CE2:
143 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
144 return 0;
145 case NVDEV_ENGINE_GR : addr = 0x0210; break;
146 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
147 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
148 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
149 default:
150 return -EINVAL;
151 }
152
153 if (!ectx->vma.node) {
154 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
155 NV_MEM_ACCESS_RW, &ectx->vma);
156 if (ret)
157 return ret;
158
159 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
160 }
161
162 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
163 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
164 bar->flush(bar);
165 return 0;
166} 40}
167 41
168static int 42void
169gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, 43gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
170 struct nvkm_object *object)
171{ 44{
172 struct nvkm_bar *bar = nvkm_bar(parent); 45 struct nvkm_device *device = fifo->engine.subdev.device;
173 struct gk104_fifo_priv *priv = (void *)parent->engine; 46 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
174 struct gk104_fifo_base *base = (void *)parent->parent;
175 struct gk104_fifo_chan *chan = (void *)parent;
176 u32 addr;
177
178 switch (nv_engidx(object->engine)) {
179 case NVDEV_ENGINE_SW : return 0;
180 case NVDEV_ENGINE_CE0 :
181 case NVDEV_ENGINE_CE1 :
182 case NVDEV_ENGINE_CE2 : addr = 0x0000; break;
183 case NVDEV_ENGINE_GR : addr = 0x0210; break;
184 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
185 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
186 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
187 default:
188 return -EINVAL;
189 }
190
191 nv_wr32(priv, 0x002634, chan->base.chid);
192 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
193 nv_error(priv, "channel %d [%s] kick timeout\n",
194 chan->base.chid, nvkm_client_name(chan));
195 if (suspend)
196 return -EBUSY;
197 }
198
199 if (addr) {
200 nv_wo32(base, addr + 0x00, 0x00000000);
201 nv_wo32(base, addr + 0x04, 0x00000000);
202 bar->flush(bar);
203 }
204
205 return 0;
206} 47}
207 48
208static int 49void
209gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 50gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
210 struct nvkm_oclass *oclass, void *data, u32 size,
211 struct nvkm_object **pobject)
212{ 51{
213 union { 52 struct gk104_fifo_engn *engn = &fifo->engine[engine];
214 struct kepler_channel_gpfifo_a_v0 v0;
215 } *args = data;
216 struct nvkm_bar *bar = nvkm_bar(parent);
217 struct gk104_fifo_priv *priv = (void *)engine;
218 struct gk104_fifo_base *base = (void *)parent;
219 struct gk104_fifo_chan *chan; 53 struct gk104_fifo_chan *chan;
220 u64 usermem, ioffset, ilength; 54 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
221 int ret, i; 55 struct nvkm_device *device = subdev->device;
222 56 struct nvkm_memory *cur;
223 nv_ioctl(parent, "create channel gpfifo size %d\n", size); 57 int nr = 0;
224 if (nvif_unpack(args->v0, 0, 0, false)) {
225 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
226 "ioffset %016llx ilength %08x engine %08x\n",
227 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
228 args->v0.ilength, args->v0.engine);
229 } else
230 return ret;
231
232 for (i = 0; i < FIFO_ENGINE_NR; i++) {
233 if (args->v0.engine & (1 << i)) {
234 if (nvkm_engine(parent, fifo_engine[i].subdev)) {
235 args->v0.engine = (1 << i);
236 break;
237 }
238 }
239 }
240
241 if (i == FIFO_ENGINE_NR) {
242 nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
243 return -ENODEV;
244 }
245
246 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
247 priv->user.bar.offset, 0x200,
248 args->v0.pushbuf,
249 fifo_engine[i].mask, &chan);
250 *pobject = nv_object(chan);
251 if (ret)
252 return ret;
253
254 args->v0.chid = chan->base.chid;
255
256 nv_parent(chan)->context_attach = gk104_fifo_context_attach;
257 nv_parent(chan)->context_detach = gk104_fifo_context_detach;
258 chan->engine = i;
259
260 usermem = chan->base.chid * 0x200;
261 ioffset = args->v0.ioffset;
262 ilength = order_base_2(args->v0.ilength / 8);
263
264 for (i = 0; i < 0x200; i += 4)
265 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
266
267 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
268 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
269 nv_wo32(base, 0x10, 0x0000face);
270 nv_wo32(base, 0x30, 0xfffff902);
271 nv_wo32(base, 0x48, lower_32_bits(ioffset));
272 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
273 nv_wo32(base, 0x84, 0x20400000);
274 nv_wo32(base, 0x94, 0x30000001);
275 nv_wo32(base, 0x9c, 0x00000100);
276 nv_wo32(base, 0xac, 0x0000001f);
277 nv_wo32(base, 0xe8, chan->base.chid);
278 nv_wo32(base, 0xb8, 0xf8000000);
279 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
280 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
281 bar->flush(bar);
282 return 0;
283}
284
285static int
286gk104_fifo_chan_init(struct nvkm_object *object)
287{
288 struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
289 struct gk104_fifo_priv *priv = (void *)object->engine;
290 struct gk104_fifo_chan *chan = (void *)object;
291 u32 chid = chan->base.chid;
292 int ret;
293 58
294 ret = nvkm_fifo_channel_init(&chan->base); 59 mutex_lock(&subdev->mutex);
295 if (ret) 60 cur = engn->runlist[engn->cur_runlist];
296 return ret; 61 engn->cur_runlist = !engn->cur_runlist;
297
298 nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
299 nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
300
301 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
302 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
303 gk104_fifo_runlist_update(priv, chan->engine);
304 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
305 }
306
307 return 0;
308}
309
310static int
311gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
312{
313 struct gk104_fifo_priv *priv = (void *)object->engine;
314 struct gk104_fifo_chan *chan = (void *)object;
315 u32 chid = chan->base.chid;
316 62
317 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { 63 nvkm_kmap(cur);
318 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800); 64 list_for_each_entry(chan, &engn->chan, head) {
319 gk104_fifo_runlist_update(priv, chan->engine); 65 nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
66 nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
67 nr++;
320 } 68 }
69 nvkm_done(cur);
321 70
322 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); 71 nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
323 return nvkm_fifo_channel_fini(&chan->base, suspend); 72 nvkm_wr32(device, 0x002274, (engine << 20) | nr);
324}
325
326struct nvkm_ofuncs
327gk104_fifo_chan_ofuncs = {
328 .ctor = gk104_fifo_chan_ctor,
329 .dtor = _nvkm_fifo_channel_dtor,
330 .init = gk104_fifo_chan_init,
331 .fini = gk104_fifo_chan_fini,
332 .map = _nvkm_fifo_channel_map,
333 .rd32 = _nvkm_fifo_channel_rd32,
334 .wr32 = _nvkm_fifo_channel_wr32,
335 .ntfy = _nvkm_fifo_channel_ntfy
336};
337
338static struct nvkm_oclass
339gk104_fifo_sclass[] = {
340 { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
341 {}
342};
343
344/*******************************************************************************
345 * FIFO context - instmem heap and vm setup
346 ******************************************************************************/
347
348static int
349gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
350 struct nvkm_oclass *oclass, void *data, u32 size,
351 struct nvkm_object **pobject)
352{
353 struct gk104_fifo_base *base;
354 int ret;
355
356 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
357 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
358 *pobject = nv_object(base);
359 if (ret)
360 return ret;
361
362 ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
363 &base->pgd);
364 if (ret)
365 return ret;
366
367 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
368 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
369 nv_wo32(base, 0x0208, 0xffffffff);
370 nv_wo32(base, 0x020c, 0x000000ff);
371
372 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
373 if (ret)
374 return ret;
375
376 return 0;
377}
378
379static void
380gk104_fifo_context_dtor(struct nvkm_object *object)
381{
382 struct gk104_fifo_base *base = (void *)object;
383 nvkm_vm_ref(NULL, &base->vm, base->pgd);
384 nvkm_gpuobj_ref(NULL, &base->pgd);
385 nvkm_fifo_context_destroy(&base->base);
386}
387
388static struct nvkm_oclass
389gk104_fifo_cclass = {
390 .handle = NV_ENGCTX(FIFO, 0xe0),
391 .ofuncs = &(struct nvkm_ofuncs) {
392 .ctor = gk104_fifo_context_ctor,
393 .dtor = gk104_fifo_context_dtor,
394 .init = _nvkm_fifo_context_init,
395 .fini = _nvkm_fifo_context_fini,
396 .rd32 = _nvkm_fifo_context_rd32,
397 .wr32 = _nvkm_fifo_context_wr32,
398 },
399};
400 73
401/******************************************************************************* 74 if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
402 * PFIFO engine 75 (engine * 0x08)) & 0x00100000),
403 ******************************************************************************/ 76 msecs_to_jiffies(2000)) == 0)
404 77 nvkm_error(subdev, "runlist %d update timeout\n", engine);
405static inline int 78 mutex_unlock(&subdev->mutex);
406gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn)
407{
408 switch (engn) {
409 case NVDEV_ENGINE_GR :
410 case NVDEV_ENGINE_CE2 : engn = 0; break;
411 case NVDEV_ENGINE_MSVLD : engn = 1; break;
412 case NVDEV_ENGINE_MSPPP : engn = 2; break;
413 case NVDEV_ENGINE_MSPDEC: engn = 3; break;
414 case NVDEV_ENGINE_CE0 : engn = 4; break;
415 case NVDEV_ENGINE_CE1 : engn = 5; break;
416 case NVDEV_ENGINE_MSENC : engn = 6; break;
417 default:
418 return -1;
419 }
420
421 return engn;
422} 79}
423 80
424static inline struct nvkm_engine * 81static inline struct nvkm_engine *
425gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn) 82gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
426{ 83{
427 if (engn >= ARRAY_SIZE(fifo_engine)) 84 struct nvkm_device *device = fifo->base.engine.subdev.device;
428 return NULL; 85 u64 subdevs = gk104_fifo_engine_subdev(engn);
429 return nvkm_engine(priv, fifo_engine[engn].subdev); 86 if (subdevs)
87 return nvkm_device_engine(device, __ffs(subdevs));
88 return NULL;
430} 89}
431 90
432static void 91static void
433gk104_fifo_recover_work(struct work_struct *work) 92gk104_fifo_recover_work(struct work_struct *work)
434{ 93{
435 struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault); 94 struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
436 struct nvkm_object *engine; 95 struct nvkm_device *device = fifo->base.engine.subdev.device;
96 struct nvkm_engine *engine;
437 unsigned long flags; 97 unsigned long flags;
438 u32 engn, engm = 0; 98 u32 engn, engm = 0;
439 u64 mask, todo; 99 u64 mask, todo;
440 100
441 spin_lock_irqsave(&priv->base.lock, flags); 101 spin_lock_irqsave(&fifo->base.lock, flags);
442 mask = priv->mask; 102 mask = fifo->mask;
443 priv->mask = 0ULL; 103 fifo->mask = 0ULL;
444 spin_unlock_irqrestore(&priv->base.lock, flags); 104 spin_unlock_irqrestore(&fifo->base.lock, flags);
445 105
446 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) 106 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
447 engm |= 1 << gk104_fifo_engidx(priv, engn); 107 engm |= 1 << gk104_fifo_subdev_engine(engn);
448 nv_mask(priv, 0x002630, engm, engm); 108 nvkm_mask(device, 0x002630, engm, engm);
449 109
450 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { 110 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
451 if ((engine = (void *)nvkm_engine(priv, engn))) { 111 if ((engine = nvkm_device_engine(device, engn))) {
452 nv_ofuncs(engine)->fini(engine, false); 112 nvkm_subdev_fini(&engine->subdev, false);
453 WARN_ON(nv_ofuncs(engine)->init(engine)); 113 WARN_ON(nvkm_subdev_init(&engine->subdev));
454 } 114 }
455 gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn)); 115 gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
456 } 116 }
457 117
458 nv_wr32(priv, 0x00262c, engm); 118 nvkm_wr32(device, 0x00262c, engm);
459 nv_mask(priv, 0x002630, engm, 0x00000000); 119 nvkm_mask(device, 0x002630, engm, 0x00000000);
460} 120}
461 121
462static void 122static void
463gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine, 123gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
464 struct gk104_fifo_chan *chan) 124 struct gk104_fifo_chan *chan)
465{ 125{
126 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
127 struct nvkm_device *device = subdev->device;
466 u32 chid = chan->base.chid; 128 u32 chid = chan->base.chid;
467 unsigned long flags;
468 129
469 nv_error(priv, "%s engine fault on channel %d, recovering...\n", 130 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
470 nv_subdev(engine)->name, chid); 131 nvkm_subdev_name[engine->subdev.index], chid);
132 assert_spin_locked(&fifo->base.lock);
471 133
472 nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); 134 nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
473 chan->state = KILLED; 135 list_del_init(&chan->head);
136 chan->killed = true;
474 137
475 spin_lock_irqsave(&priv->base.lock, flags); 138 fifo->mask |= 1ULL << engine->subdev.index;
476 priv->mask |= 1ULL << nv_engidx(engine); 139 schedule_work(&fifo->fault);
477 spin_unlock_irqrestore(&priv->base.lock, flags);
478 schedule_work(&priv->fault);
479}
480
481static int
482gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
483{
484 struct gk104_fifo_chan *chan = NULL;
485 struct nvkm_handle *bind;
486 unsigned long flags;
487 int ret = -EINVAL;
488
489 spin_lock_irqsave(&priv->base.lock, flags);
490 if (likely(chid >= priv->base.min && chid <= priv->base.max))
491 chan = (void *)priv->base.channel[chid];
492 if (unlikely(!chan))
493 goto out;
494
495 bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
496 if (likely(bind)) {
497 if (!mthd || !nv_call(bind->object, mthd, data))
498 ret = 0;
499 nvkm_namedb_put(bind);
500 }
501
502out:
503 spin_unlock_irqrestore(&priv->base.lock, flags);
504 return ret;
505} 140}
506 141
507static const struct nvkm_enum 142static const struct nvkm_enum
@@ -516,18 +151,16 @@ gk104_fifo_bind_reason[] = {
516}; 151};
517 152
518static void 153static void
519gk104_fifo_intr_bind(struct gk104_fifo_priv *priv) 154gk104_fifo_intr_bind(struct gk104_fifo *fifo)
520{ 155{
521 u32 intr = nv_rd32(priv, 0x00252c); 156 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
157 struct nvkm_device *device = subdev->device;
158 u32 intr = nvkm_rd32(device, 0x00252c);
522 u32 code = intr & 0x000000ff; 159 u32 code = intr & 0x000000ff;
523 const struct nvkm_enum *en; 160 const struct nvkm_enum *en =
524 char enunk[6] = ""; 161 nvkm_enum_find(gk104_fifo_bind_reason, code);
525
526 en = nvkm_enum_find(gk104_fifo_bind_reason, code);
527 if (!en)
528 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
529 162
530 nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk); 163 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
531} 164}
532 165
533static const struct nvkm_enum 166static const struct nvkm_enum
@@ -537,14 +170,17 @@ gk104_fifo_sched_reason[] = {
537}; 170};
538 171
539static void 172static void
540gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv) 173gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
541{ 174{
175 struct nvkm_device *device = fifo->base.engine.subdev.device;
542 struct nvkm_engine *engine; 176 struct nvkm_engine *engine;
543 struct gk104_fifo_chan *chan; 177 struct gk104_fifo_chan *chan;
178 unsigned long flags;
544 u32 engn; 179 u32 engn;
545 180
546 for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { 181 spin_lock_irqsave(&fifo->base.lock, flags);
547 u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04)); 182 for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
183 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
548 u32 busy = (stat & 0x80000000); 184 u32 busy = (stat & 0x80000000);
549 u32 next = (stat & 0x07ff0000) >> 16; 185 u32 next = (stat & 0x07ff0000) >> 16;
550 u32 chsw = (stat & 0x00008000); 186 u32 chsw = (stat & 0x00008000);
@@ -555,32 +191,35 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
555 (void)save; 191 (void)save;
556 192
557 if (busy && chsw) { 193 if (busy && chsw) {
558 if (!(chan = (void *)priv->base.channel[chid])) 194 list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
559 continue; 195 if (chan->base.chid == chid) {
560 if (!(engine = gk104_fifo_engine(priv, engn))) 196 engine = gk104_fifo_engine(fifo, engn);
561 continue; 197 if (!engine)
562 gk104_fifo_recover(priv, engine, chan); 198 break;
199 gk104_fifo_recover(fifo, engine, chan);
200 break;
201 }
202 }
563 } 203 }
564 } 204 }
205 spin_unlock_irqrestore(&fifo->base.lock, flags);
565} 206}
566 207
567static void 208static void
568gk104_fifo_intr_sched(struct gk104_fifo_priv *priv) 209gk104_fifo_intr_sched(struct gk104_fifo *fifo)
569{ 210{
570 u32 intr = nv_rd32(priv, 0x00254c); 211 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
212 struct nvkm_device *device = subdev->device;
213 u32 intr = nvkm_rd32(device, 0x00254c);
571 u32 code = intr & 0x000000ff; 214 u32 code = intr & 0x000000ff;
572 const struct nvkm_enum *en; 215 const struct nvkm_enum *en =
573 char enunk[6] = ""; 216 nvkm_enum_find(gk104_fifo_sched_reason, code);
574
575 en = nvkm_enum_find(gk104_fifo_sched_reason, code);
576 if (!en)
577 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
578 217
579 nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk); 218 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
580 219
581 switch (code) { 220 switch (code) {
582 case 0x0a: 221 case 0x0a:
583 gk104_fifo_intr_sched_ctxsw(priv); 222 gk104_fifo_intr_sched_ctxsw(fifo);
584 break; 223 break;
585 default: 224 default:
586 break; 225 break;
@@ -588,38 +227,42 @@ gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
588} 227}
589 228
590static void 229static void
591gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv) 230gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
592{ 231{
593 u32 stat = nv_rd32(priv, 0x00256c); 232 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
594 nv_error(priv, "CHSW_ERROR 0x%08x\n", stat); 233 struct nvkm_device *device = subdev->device;
595 nv_wr32(priv, 0x00256c, stat); 234 u32 stat = nvkm_rd32(device, 0x00256c);
235 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
236 nvkm_wr32(device, 0x00256c, stat);
596} 237}
597 238
598static void 239static void
599gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv) 240gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
600{ 241{
601 u32 stat = nv_rd32(priv, 0x00259c); 242 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
602 nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat); 243 struct nvkm_device *device = subdev->device;
244 u32 stat = nvkm_rd32(device, 0x00259c);
245 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
603} 246}
604 247
605static const struct nvkm_enum 248static const struct nvkm_enum
606gk104_fifo_fault_engine[] = { 249gk104_fifo_fault_engine[] = {
607 { 0x00, "GR", NULL, NVDEV_ENGINE_GR }, 250 { 0x00, "GR", NULL, NVKM_ENGINE_GR },
608 { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB }, 251 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
609 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR }, 252 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
610 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM }, 253 { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
611 { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO }, 254 { 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
612 { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO }, 255 { 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
613 { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO }, 256 { 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
614 { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD }, 257 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
615 { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP }, 258 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
616 { 0x13, "PERF" }, 259 { 0x13, "PERF" },
617 { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC }, 260 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
618 { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 }, 261 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
619 { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 }, 262 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
620 { 0x17, "PMU" }, 263 { 0x17, "PMU" },
621 { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC }, 264 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
622 { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 }, 265 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
623 {} 266 {}
624}; 267};
625 268
@@ -708,80 +351,65 @@ gk104_fifo_fault_gpcclient[] = {
708}; 351};
709 352
710static void 353static void
711gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit) 354gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
712{ 355{
713 u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); 356 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
714 u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); 357 struct nvkm_device *device = subdev->device;
715 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); 358 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
716 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); 359 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
360 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
361 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
717 u32 gpc = (stat & 0x1f000000) >> 24; 362 u32 gpc = (stat & 0x1f000000) >> 24;
718 u32 client = (stat & 0x00001f00) >> 8; 363 u32 client = (stat & 0x00001f00) >> 8;
719 u32 write = (stat & 0x00000080); 364 u32 write = (stat & 0x00000080);
720 u32 hub = (stat & 0x00000040); 365 u32 hub = (stat & 0x00000040);
721 u32 reason = (stat & 0x0000000f); 366 u32 reason = (stat & 0x0000000f);
722 struct nvkm_object *engctx = NULL, *object;
723 struct nvkm_engine *engine = NULL;
724 const struct nvkm_enum *er, *eu, *ec; 367 const struct nvkm_enum *er, *eu, *ec;
725 char erunk[6] = ""; 368 struct nvkm_engine *engine = NULL;
726 char euunk[6] = ""; 369 struct nvkm_fifo_chan *chan;
727 char ecunk[6] = ""; 370 unsigned long flags;
728 char gpcid[3] = ""; 371 char gpcid[8] = "";
729 372
730 er = nvkm_enum_find(gk104_fifo_fault_reason, reason); 373 er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
731 if (!er)
732 snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
733
734 eu = nvkm_enum_find(gk104_fifo_fault_engine, unit); 374 eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
375 if (hub) {
376 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
377 } else {
378 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
379 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
380 }
381
735 if (eu) { 382 if (eu) {
736 switch (eu->data2) { 383 switch (eu->data2) {
737 case NVDEV_SUBDEV_BAR: 384 case NVKM_SUBDEV_BAR:
738 nv_mask(priv, 0x001704, 0x00000000, 0x00000000); 385 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
739 break; 386 break;
740 case NVDEV_SUBDEV_INSTMEM: 387 case NVKM_SUBDEV_INSTMEM:
741 nv_mask(priv, 0x001714, 0x00000000, 0x00000000); 388 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
742 break; 389 break;
743 case NVDEV_ENGINE_IFB: 390 case NVKM_ENGINE_IFB:
744 nv_mask(priv, 0x001718, 0x00000000, 0x00000000); 391 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
745 break; 392 break;
746 default: 393 default:
747 engine = nvkm_engine(priv, eu->data2); 394 engine = nvkm_device_engine(device, eu->data2);
748 if (engine)
749 engctx = nvkm_engctx_get(engine, inst);
750 break; 395 break;
751 } 396 }
752 } else {
753 snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
754 } 397 }
755 398
756 if (hub) { 399 chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
757 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
758 } else {
759 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
760 snprintf(gpcid, sizeof(gpcid), "%d", gpc);
761 }
762 400
763 if (!ec) 401 nvkm_error(subdev,
764 snprintf(ecunk, sizeof(ecunk), "UNK%02x", client); 402 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
765 403 "reason %02x [%s] on channel %d [%010llx %s]\n",
766 nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on " 404 write ? "write" : "read", (u64)vahi << 32 | valo,
767 "channel 0x%010llx [%s]\n", write ? "write" : "read", 405 unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
768 (u64)vahi << 32 | valo, er ? er->name : erunk, 406 reason, er ? er->name : "", chan ? chan->chid : -1,
769 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/", 407 (u64)inst << 12,
770 ec ? ec->name : ecunk, (u64)inst << 12, 408 chan ? chan->object.client->name : "unknown");
771 nvkm_client_name(engctx));
772
773 object = engctx;
774 while (object) {
775 switch (nv_mclass(object)) {
776 case KEPLER_CHANNEL_GPFIFO_A:
777 case MAXWELL_CHANNEL_GPFIFO_A:
778 gk104_fifo_recover(priv, engine, (void *)object);
779 break;
780 }
781 object = object->parent;
782 }
783 409
784 nvkm_engctx_put(engctx); 410 if (engine && chan)
411 gk104_fifo_recover(fifo, engine, (void *)chan);
412 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
785} 413}
786 414
787static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 415static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -819,35 +447,42 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
819}; 447};
820 448
821static void 449static void
822gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit) 450gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
823{ 451{
824 u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000)); 452 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
825 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask; 453 struct nvkm_device *device = subdev->device;
826 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); 454 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
827 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); 455 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
828 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; 456 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
457 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
458 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
829 u32 subc = (addr & 0x00070000) >> 16; 459 u32 subc = (addr & 0x00070000) >> 16;
830 u32 mthd = (addr & 0x00003ffc); 460 u32 mthd = (addr & 0x00003ffc);
831 u32 show = stat; 461 u32 show = stat;
462 struct nvkm_fifo_chan *chan;
463 unsigned long flags;
464 char msg[128];
832 465
833 if (stat & 0x00800000) { 466 if (stat & 0x00800000) {
834 if (!gk104_fifo_swmthd(priv, chid, mthd, data)) 467 if (device->sw) {
835 show &= ~0x00800000; 468 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
836 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); 469 show &= ~0x00800000;
470 }
471 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
837 } 472 }
838 473
839 if (show) { 474 if (show) {
840 nv_error(priv, "PBDMA%d:", unit); 475 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
841 nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show); 476 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
842 pr_cont("\n"); 477 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
843 nv_error(priv, 478 "subc %d mthd %04x data %08x\n",
844 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 479 unit, show, msg, chid, chan ? chan->inst->addr : 0,
845 unit, chid, 480 chan ? chan->object.client->name : "unknown",
846 nvkm_client_name_for_fifo_chid(&priv->base, chid), 481 subc, mthd, data);
847 subc, mthd, data); 482 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
848 } 483 }
849 484
850 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); 485 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
851} 486}
852 487
853static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 488static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
@@ -860,280 +495,266 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
860}; 495};
861 496
862static void 497static void
863gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit) 498gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
864{ 499{
865 u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000)); 500 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
866 u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask; 501 struct nvkm_device *device = subdev->device;
867 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; 502 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
503 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
504 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
505 char msg[128];
868 506
869 if (stat) { 507 if (stat) {
870 nv_error(priv, "PBDMA%d:", unit); 508 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
871 nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat); 509 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
872 pr_cont("\n"); 510 unit, stat, msg, chid,
873 nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid, 511 nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
874 nv_rd32(priv, 0x040150 + (unit * 0x2000)), 512 nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
875 nv_rd32(priv, 0x040154 + (unit * 0x2000)));
876 } 513 }
877 514
878 nv_wr32(priv, 0x040148 + (unit * 0x2000), stat); 515 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
879} 516}
880 517
881static void 518static void
882gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv) 519gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
883{ 520{
884 u32 mask = nv_rd32(priv, 0x002a00); 521 struct nvkm_device *device = fifo->base.engine.subdev.device;
522 u32 mask = nvkm_rd32(device, 0x002a00);
885 while (mask) { 523 while (mask) {
886 u32 engn = __ffs(mask); 524 u32 engn = __ffs(mask);
887 wake_up(&priv->engine[engn].wait); 525 wake_up(&fifo->engine[engn].wait);
888 nv_wr32(priv, 0x002a00, 1 << engn); 526 nvkm_wr32(device, 0x002a00, 1 << engn);
889 mask &= ~(1 << engn); 527 mask &= ~(1 << engn);
890 } 528 }
891} 529}
892 530
893static void 531static void
894gk104_fifo_intr_engine(struct gk104_fifo_priv *priv) 532gk104_fifo_intr_engine(struct gk104_fifo *fifo)
895{ 533{
896 nvkm_fifo_uevent(&priv->base); 534 nvkm_fifo_uevent(&fifo->base);
897} 535}
898 536
899static void 537void
900gk104_fifo_intr(struct nvkm_subdev *subdev) 538gk104_fifo_intr(struct nvkm_fifo *base)
901{ 539{
902 struct gk104_fifo_priv *priv = (void *)subdev; 540 struct gk104_fifo *fifo = gk104_fifo(base);
903 u32 mask = nv_rd32(priv, 0x002140); 541 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
904 u32 stat = nv_rd32(priv, 0x002100) & mask; 542 struct nvkm_device *device = subdev->device;
543 u32 mask = nvkm_rd32(device, 0x002140);
544 u32 stat = nvkm_rd32(device, 0x002100) & mask;
905 545
906 if (stat & 0x00000001) { 546 if (stat & 0x00000001) {
907 gk104_fifo_intr_bind(priv); 547 gk104_fifo_intr_bind(fifo);
908 nv_wr32(priv, 0x002100, 0x00000001); 548 nvkm_wr32(device, 0x002100, 0x00000001);
909 stat &= ~0x00000001; 549 stat &= ~0x00000001;
910 } 550 }
911 551
912 if (stat & 0x00000010) { 552 if (stat & 0x00000010) {
913 nv_error(priv, "PIO_ERROR\n"); 553 nvkm_error(subdev, "PIO_ERROR\n");
914 nv_wr32(priv, 0x002100, 0x00000010); 554 nvkm_wr32(device, 0x002100, 0x00000010);
915 stat &= ~0x00000010; 555 stat &= ~0x00000010;
916 } 556 }
917 557
918 if (stat & 0x00000100) { 558 if (stat & 0x00000100) {
919 gk104_fifo_intr_sched(priv); 559 gk104_fifo_intr_sched(fifo);
920 nv_wr32(priv, 0x002100, 0x00000100); 560 nvkm_wr32(device, 0x002100, 0x00000100);
921 stat &= ~0x00000100; 561 stat &= ~0x00000100;
922 } 562 }
923 563
924 if (stat & 0x00010000) { 564 if (stat & 0x00010000) {
925 gk104_fifo_intr_chsw(priv); 565 gk104_fifo_intr_chsw(fifo);
926 nv_wr32(priv, 0x002100, 0x00010000); 566 nvkm_wr32(device, 0x002100, 0x00010000);
927 stat &= ~0x00010000; 567 stat &= ~0x00010000;
928 } 568 }
929 569
930 if (stat & 0x00800000) { 570 if (stat & 0x00800000) {
931 nv_error(priv, "FB_FLUSH_TIMEOUT\n"); 571 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
932 nv_wr32(priv, 0x002100, 0x00800000); 572 nvkm_wr32(device, 0x002100, 0x00800000);
933 stat &= ~0x00800000; 573 stat &= ~0x00800000;
934 } 574 }
935 575
936 if (stat & 0x01000000) { 576 if (stat & 0x01000000) {
937 nv_error(priv, "LB_ERROR\n"); 577 nvkm_error(subdev, "LB_ERROR\n");
938 nv_wr32(priv, 0x002100, 0x01000000); 578 nvkm_wr32(device, 0x002100, 0x01000000);
939 stat &= ~0x01000000; 579 stat &= ~0x01000000;
940 } 580 }
941 581
942 if (stat & 0x08000000) { 582 if (stat & 0x08000000) {
943 gk104_fifo_intr_dropped_fault(priv); 583 gk104_fifo_intr_dropped_fault(fifo);
944 nv_wr32(priv, 0x002100, 0x08000000); 584 nvkm_wr32(device, 0x002100, 0x08000000);
945 stat &= ~0x08000000; 585 stat &= ~0x08000000;
946 } 586 }
947 587
948 if (stat & 0x10000000) { 588 if (stat & 0x10000000) {
949 u32 mask = nv_rd32(priv, 0x00259c); 589 u32 mask = nvkm_rd32(device, 0x00259c);
950 while (mask) { 590 while (mask) {
951 u32 unit = __ffs(mask); 591 u32 unit = __ffs(mask);
952 gk104_fifo_intr_fault(priv, unit); 592 gk104_fifo_intr_fault(fifo, unit);
953 nv_wr32(priv, 0x00259c, (1 << unit)); 593 nvkm_wr32(device, 0x00259c, (1 << unit));
954 mask &= ~(1 << unit); 594 mask &= ~(1 << unit);
955 } 595 }
956 stat &= ~0x10000000; 596 stat &= ~0x10000000;
957 } 597 }
958 598
959 if (stat & 0x20000000) { 599 if (stat & 0x20000000) {
960 u32 mask = nv_rd32(priv, 0x0025a0); 600 u32 mask = nvkm_rd32(device, 0x0025a0);
961 while (mask) { 601 while (mask) {
962 u32 unit = __ffs(mask); 602 u32 unit = __ffs(mask);
963 gk104_fifo_intr_pbdma_0(priv, unit); 603 gk104_fifo_intr_pbdma_0(fifo, unit);
964 gk104_fifo_intr_pbdma_1(priv, unit); 604 gk104_fifo_intr_pbdma_1(fifo, unit);
965 nv_wr32(priv, 0x0025a0, (1 << unit)); 605 nvkm_wr32(device, 0x0025a0, (1 << unit));
966 mask &= ~(1 << unit); 606 mask &= ~(1 << unit);
967 } 607 }
968 stat &= ~0x20000000; 608 stat &= ~0x20000000;
969 } 609 }
970 610
971 if (stat & 0x40000000) { 611 if (stat & 0x40000000) {
972 gk104_fifo_intr_runlist(priv); 612 gk104_fifo_intr_runlist(fifo);
973 stat &= ~0x40000000; 613 stat &= ~0x40000000;
974 } 614 }
975 615
976 if (stat & 0x80000000) { 616 if (stat & 0x80000000) {
977 nv_wr32(priv, 0x002100, 0x80000000); 617 nvkm_wr32(device, 0x002100, 0x80000000);
978 gk104_fifo_intr_engine(priv); 618 gk104_fifo_intr_engine(fifo);
979 stat &= ~0x80000000; 619 stat &= ~0x80000000;
980 } 620 }
981 621
982 if (stat) { 622 if (stat) {
983 nv_error(priv, "INTR 0x%08x\n", stat); 623 nvkm_error(subdev, "INTR %08x\n", stat);
984 nv_mask(priv, 0x002140, stat, 0x00000000); 624 nvkm_mask(device, 0x002140, stat, 0x00000000);
985 nv_wr32(priv, 0x002100, stat); 625 nvkm_wr32(device, 0x002100, stat);
986 } 626 }
987} 627}
988 628
989static void 629void
990gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index) 630gk104_fifo_fini(struct nvkm_fifo *base)
991{ 631{
992 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 632 struct gk104_fifo *fifo = gk104_fifo(base);
993 nv_mask(fifo, 0x002140, 0x80000000, 0x80000000); 633 struct nvkm_device *device = fifo->base.engine.subdev.device;
634 flush_work(&fifo->fault);
635 /* allow mmu fault interrupts, even when we're not using fifo */
636 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
994} 637}
995 638
996static void 639int
997gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index) 640gk104_fifo_oneinit(struct nvkm_fifo *base)
998{ 641{
999 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 642 struct gk104_fifo *fifo = gk104_fifo(base);
1000 nv_mask(fifo, 0x002140, 0x80000000, 0x00000000); 643 struct nvkm_device *device = fifo->base.engine.subdev.device;
1001} 644 int ret, i;
1002 645
1003static const struct nvkm_event_func 646 for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
1004gk104_fifo_uevent_func = { 647 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
1005 .ctor = nvkm_fifo_uevent_ctor, 648 0x8000, 0x1000, false,
1006 .init = gk104_fifo_uevent_init, 649 &fifo->engine[i].runlist[0]);
1007 .fini = gk104_fifo_uevent_fini, 650 if (ret)
1008}; 651 return ret;
1009 652
1010int 653 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
1011gk104_fifo_fini(struct nvkm_object *object, bool suspend) 654 0x8000, 0x1000, false,
1012{ 655 &fifo->engine[i].runlist[1]);
1013 struct gk104_fifo_priv *priv = (void *)object; 656 if (ret)
1014 int ret; 657 return ret;
658
659 init_waitqueue_head(&fifo->engine[i].wait);
660 INIT_LIST_HEAD(&fifo->engine[i].chan);
661 }
1015 662
1016 ret = nvkm_fifo_fini(&priv->base, suspend); 663 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
664 fifo->base.nr * 0x200, 0x1000, true,
665 &fifo->user.mem);
1017 if (ret) 666 if (ret)
1018 return ret; 667 return ret;
1019 668
1020 /* allow mmu fault interrupts, even when we're not using fifo */ 669 ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
1021 nv_mask(priv, 0x002140, 0x10000000, 0x10000000); 670 &fifo->user.bar);
671 if (ret)
672 return ret;
673
674 nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
1022 return 0; 675 return 0;
1023} 676}
1024 677
1025int 678void
1026gk104_fifo_init(struct nvkm_object *object) 679gk104_fifo_init(struct nvkm_fifo *base)
1027{ 680{
1028 struct gk104_fifo_priv *priv = (void *)object; 681 struct gk104_fifo *fifo = gk104_fifo(base);
1029 int ret, i; 682 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
1030 683 struct nvkm_device *device = subdev->device;
1031 ret = nvkm_fifo_init(&priv->base); 684 int i;
1032 if (ret)
1033 return ret;
1034 685
1035 /* enable all available PBDMA units */ 686 /* enable all available PBDMA units */
1036 nv_wr32(priv, 0x000204, 0xffffffff); 687 nvkm_wr32(device, 0x000204, 0xffffffff);
1037 priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204)); 688 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
1038 nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr); 689 nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
1039 690
1040 /* PBDMA[n] */ 691 /* PBDMA[n] */
1041 for (i = 0; i < priv->spoon_nr; i++) { 692 for (i = 0; i < fifo->spoon_nr; i++) {
1042 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 693 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
1043 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 694 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
1044 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 695 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
1045 } 696 }
1046 697
1047 /* PBDMA[n].HCE */ 698 /* PBDMA[n].HCE */
1048 for (i = 0; i < priv->spoon_nr; i++) { 699 for (i = 0; i < fifo->spoon_nr; i++) {
1049 nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 700 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
1050 nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 701 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1051 } 702 }
1052 703
1053 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); 704 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
1054 705
1055 nv_wr32(priv, 0x002100, 0xffffffff); 706 nvkm_wr32(device, 0x002100, 0xffffffff);
1056 nv_wr32(priv, 0x002140, 0x7fffffff); 707 nvkm_wr32(device, 0x002140, 0x7fffffff);
1057 return 0;
1058} 708}
1059 709
1060void 710void *
1061gk104_fifo_dtor(struct nvkm_object *object) 711gk104_fifo_dtor(struct nvkm_fifo *base)
1062{ 712{
1063 struct gk104_fifo_priv *priv = (void *)object; 713 struct gk104_fifo *fifo = gk104_fifo(base);
1064 int i; 714 int i;
1065 715
1066 nvkm_gpuobj_unmap(&priv->user.bar); 716 nvkm_vm_put(&fifo->user.bar);
1067 nvkm_gpuobj_ref(NULL, &priv->user.mem); 717 nvkm_memory_del(&fifo->user.mem);
1068 718
1069 for (i = 0; i < FIFO_ENGINE_NR; i++) { 719 for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
1070 nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]); 720 nvkm_memory_del(&fifo->engine[i].runlist[1]);
1071 nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]); 721 nvkm_memory_del(&fifo->engine[i].runlist[0]);
1072 } 722 }
1073 723
1074 nvkm_fifo_destroy(&priv->base); 724 return fifo;
1075} 725}
1076 726
1077int 727int
1078gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 728gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
1079 struct nvkm_oclass *oclass, void *data, u32 size, 729 int index, int nr, struct nvkm_fifo **pfifo)
1080 struct nvkm_object **pobject)
1081{ 730{
1082 struct gk104_fifo_impl *impl = (void *)oclass; 731 struct gk104_fifo *fifo;
1083 struct gk104_fifo_priv *priv;
1084 int ret, i;
1085 732
1086 ret = nvkm_fifo_create(parent, engine, oclass, 0, 733 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
1087 impl->channels - 1, &priv); 734 return -ENOMEM;
1088 *pobject = nv_object(priv); 735 INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
1089 if (ret) 736 *pfifo = &fifo->base;
1090 return ret;
1091
1092 INIT_WORK(&priv->fault, gk104_fifo_recover_work);
1093
1094 for (i = 0; i < FIFO_ENGINE_NR; i++) {
1095 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
1096 0, &priv->engine[i].runlist[0]);
1097 if (ret)
1098 return ret;
1099
1100 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
1101 0, &priv->engine[i].runlist[1]);
1102 if (ret)
1103 return ret;
1104
1105 init_waitqueue_head(&priv->engine[i].wait);
1106 }
1107
1108 ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
1109 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
1110 if (ret)
1111 return ret;
1112 737
1113 ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, 738 return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
1114 &priv->user.bar);
1115 if (ret)
1116 return ret;
1117
1118 ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent);
1119 if (ret)
1120 return ret;
1121
1122 nv_subdev(priv)->unit = 0x00000100;
1123 nv_subdev(priv)->intr = gk104_fifo_intr;
1124 nv_engine(priv)->cclass = &gk104_fifo_cclass;
1125 nv_engine(priv)->sclass = gk104_fifo_sclass;
1126 return 0;
1127} 739}
1128 740
1129struct nvkm_oclass * 741static const struct nvkm_fifo_func
1130gk104_fifo_oclass = &(struct gk104_fifo_impl) { 742gk104_fifo = {
1131 .base.handle = NV_ENGINE(FIFO, 0xe0), 743 .dtor = gk104_fifo_dtor,
1132 .base.ofuncs = &(struct nvkm_ofuncs) { 744 .oneinit = gk104_fifo_oneinit,
1133 .ctor = gk104_fifo_ctor, 745 .init = gk104_fifo_init,
1134 .dtor = gk104_fifo_dtor, 746 .fini = gk104_fifo_fini,
1135 .init = gk104_fifo_init, 747 .intr = gk104_fifo_intr,
1136 .fini = gk104_fifo_fini, 748 .uevent_init = gk104_fifo_uevent_init,
749 .uevent_fini = gk104_fifo_uevent_fini,
750 .chan = {
751 &gk104_fifo_gpfifo_oclass,
752 NULL
1137 }, 753 },
1138 .channels = 4096, 754};
1139}.base; 755
756int
757gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
758{
759 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
760}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 318d30d6ee1a..5afd9b5ec5d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -1,18 +1,77 @@
1#ifndef __NVKM_FIFO_NVE0_H__ 1#ifndef __GK104_FIFO_H__
2#define __NVKM_FIFO_NVE0_H__ 2#define __GK104_FIFO_H__
3#include <engine/fifo.h> 3#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
4 4#include "priv.h"
5int gk104_fifo_ctor(struct nvkm_object *, struct nvkm_object *, 5
6 struct nvkm_oclass *, void *, u32, 6#include <subdev/mmu.h>
7 struct nvkm_object **); 7
8void gk104_fifo_dtor(struct nvkm_object *); 8struct gk104_fifo_engn {
9int gk104_fifo_init(struct nvkm_object *); 9 struct nvkm_memory *runlist[2];
10int gk104_fifo_fini(struct nvkm_object *, bool); 10 int cur_runlist;
11 11 wait_queue_head_t wait;
12struct gk104_fifo_impl { 12 struct list_head chan;
13 struct nvkm_oclass base; 13};
14 u32 channels; 14
15struct gk104_fifo {
16 struct nvkm_fifo base;
17
18 struct work_struct fault;
19 u64 mask;
20
21 struct gk104_fifo_engn engine[7];
22 struct {
23 struct nvkm_memory *mem;
24 struct nvkm_vma bar;
25 } user;
26 int spoon_nr;
15}; 27};
16 28
17extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs; 29int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
30 int index, int nr, struct nvkm_fifo **);
31void *gk104_fifo_dtor(struct nvkm_fifo *);
32int gk104_fifo_oneinit(struct nvkm_fifo *);
33void gk104_fifo_init(struct nvkm_fifo *);
34void gk104_fifo_fini(struct nvkm_fifo *);
35void gk104_fifo_intr(struct nvkm_fifo *);
36void gk104_fifo_uevent_init(struct nvkm_fifo *);
37void gk104_fifo_uevent_fini(struct nvkm_fifo *);
38void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
39
40static inline u64
41gk104_fifo_engine_subdev(int engine)
42{
43 switch (engine) {
44 case 0: return (1ULL << NVKM_ENGINE_GR) |
45 (1ULL << NVKM_ENGINE_SW) |
46 (1ULL << NVKM_ENGINE_CE2);
47 case 1: return (1ULL << NVKM_ENGINE_MSPDEC);
48 case 2: return (1ULL << NVKM_ENGINE_MSPPP);
49 case 3: return (1ULL << NVKM_ENGINE_MSVLD);
50 case 4: return (1ULL << NVKM_ENGINE_CE0);
51 case 5: return (1ULL << NVKM_ENGINE_CE1);
52 case 6: return (1ULL << NVKM_ENGINE_MSENC);
53 default:
54 WARN_ON(1);
55 return 0;
56 }
57}
58
59static inline int
60gk104_fifo_subdev_engine(int subdev)
61{
62 switch (subdev) {
63 case NVKM_ENGINE_GR:
64 case NVKM_ENGINE_SW:
65 case NVKM_ENGINE_CE2 : return 0;
66 case NVKM_ENGINE_MSPDEC: return 1;
67 case NVKM_ENGINE_MSPPP : return 2;
68 case NVKM_ENGINE_MSVLD : return 3;
69 case NVKM_ENGINE_CE0 : return 4;
70 case NVKM_ENGINE_CE1 : return 5;
71 case NVKM_ENGINE_MSENC : return 6;
72 default:
73 WARN_ON(1);
74 return 0;
75 }
76}
18#endif 77#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 927092217a06..ce01c1a7d41c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -22,15 +22,25 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gk104.h" 24#include "gk104.h"
25#include "changk104.h"
25 26
26struct nvkm_oclass * 27static const struct nvkm_fifo_func
27gk208_fifo_oclass = &(struct gk104_fifo_impl) { 28gk208_fifo = {
28 .base.handle = NV_ENGINE(FIFO, 0x08), 29 .dtor = gk104_fifo_dtor,
29 .base.ofuncs = &(struct nvkm_ofuncs) { 30 .oneinit = gk104_fifo_oneinit,
30 .ctor = gk104_fifo_ctor, 31 .init = gk104_fifo_init,
31 .dtor = gk104_fifo_dtor, 32 .fini = gk104_fifo_fini,
32 .init = gk104_fifo_init, 33 .intr = gk104_fifo_intr,
33 .fini = _nvkm_fifo_fini, 34 .uevent_init = gk104_fifo_uevent_init,
35 .uevent_fini = gk104_fifo_uevent_fini,
36 .chan = {
37 &gk104_fifo_gpfifo_oclass,
38 NULL
34 }, 39 },
35 .channels = 1024, 40};
36}.base; 41
42int
43gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
44{
45 return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo);
46}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index b30dc87a1357..b47fe98f4181 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -20,15 +20,25 @@
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22#include "gk104.h" 22#include "gk104.h"
23#include "changk104.h"
23 24
24struct nvkm_oclass * 25static const struct nvkm_fifo_func
25gk20a_fifo_oclass = &(struct gk104_fifo_impl) { 26gk20a_fifo = {
26 .base.handle = NV_ENGINE(FIFO, 0xea), 27 .dtor = gk104_fifo_dtor,
27 .base.ofuncs = &(struct nvkm_ofuncs) { 28 .oneinit = gk104_fifo_oneinit,
28 .ctor = gk104_fifo_ctor, 29 .init = gk104_fifo_init,
29 .dtor = gk104_fifo_dtor, 30 .fini = gk104_fifo_fini,
30 .init = gk104_fifo_init, 31 .intr = gk104_fifo_intr,
31 .fini = gk104_fifo_fini, 32 .uevent_init = gk104_fifo_uevent_init,
33 .uevent_fini = gk104_fifo_uevent_fini,
34 .chan = {
35 &gk104_fifo_gpfifo_oclass,
36 NULL
32 }, 37 },
33 .channels = 128, 38};
34}.base; 39
40int
41gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
42{
43 return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo);
44}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
index 749d525dd8e3..2db629f1bf7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
@@ -22,36 +22,25 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gk104.h" 24#include "gk104.h"
25#include "changk104.h"
25 26
26#include <nvif/class.h> 27static const struct nvkm_fifo_func
27 28gm204_fifo = {
28static struct nvkm_oclass 29 .dtor = gk104_fifo_dtor,
29gm204_fifo_sclass[] = { 30 .oneinit = gk104_fifo_oneinit,
30 { MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs }, 31 .init = gk104_fifo_init,
31 {} 32 .fini = gk104_fifo_fini,
33 .intr = gk104_fifo_intr,
34 .uevent_init = gk104_fifo_uevent_init,
35 .uevent_fini = gk104_fifo_uevent_fini,
36 .chan = {
37 &gm204_fifo_gpfifo_oclass,
38 NULL
39 },
32}; 40};
33 41
34static int 42int
35gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 43gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
36 struct nvkm_oclass *oclass, void *data, u32 size,
37 struct nvkm_object **pobject)
38{ 44{
39 int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject); 45 return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
40 if (ret == 0) {
41 struct gk104_fifo_priv *priv = (void *)*pobject;
42 nv_engine(priv)->sclass = gm204_fifo_sclass;
43 }
44 return ret;
45} 46}
46
47struct nvkm_oclass *
48gm204_fifo_oclass = &(struct gk104_fifo_impl) {
49 .base.handle = NV_ENGINE(FIFO, 0x24),
50 .base.ofuncs = &(struct nvkm_ofuncs) {
51 .ctor = gm204_fifo_ctor,
52 .dtor = gk104_fifo_dtor,
53 .init = gk104_fifo_init,
54 .fini = _nvkm_fifo_fini,
55 },
56 .channels = 4096,
57}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
new file mode 100644
index 000000000000..ae6375d9760f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "gk104.h"
23#include "changk104.h"
24
25static const struct nvkm_fifo_func
26gm20b_fifo = {
27 .dtor = gk104_fifo_dtor,
28 .oneinit = gk104_fifo_oneinit,
29 .init = gk104_fifo_init,
30 .fini = gk104_fifo_fini,
31 .intr = gk104_fifo_intr,
32 .uevent_init = gk104_fifo_uevent_init,
33 .uevent_fini = gk104_fifo_uevent_fini,
34 .chan = {
35 &gm204_fifo_gpfifo_oclass,
36 NULL
37 },
38};
39
40int
41gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
42{
43 return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo);
44}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
new file mode 100644
index 000000000000..820132363f68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25
26#include <core/client.h>
27#include <core/ramht.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32static int
33g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
34 void *data, u32 size, struct nvkm_object **pobject)
35{
36 struct nvkm_object *parent = oclass->parent;
37 union {
38 struct nv50_channel_gpfifo_v0 v0;
39 } *args = data;
40 struct nv50_fifo *fifo = nv50_fifo(base);
41 struct nv50_fifo_chan *chan;
42 u64 ioffset, ilength;
43 int ret;
44
45 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, false)) {
47 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
48 "pushbuf %llx ioffset %016llx "
49 "ilength %08x\n",
50 args->v0.version, args->v0.vm, args->v0.pushbuf,
51 args->v0.ioffset, args->v0.ilength);
52 if (!args->v0.pushbuf)
53 return -EINVAL;
54 } else
55 return ret;
56
57 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
58 return -ENOMEM;
59 *pobject = &chan->base.object;
60
61 ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
62 oclass, chan);
63 if (ret)
64 return ret;
65
66 args->v0.chid = chan->base.chid;
67 ioffset = args->v0.ioffset;
68 ilength = order_base_2(args->v0.ilength / 8);
69
70 nvkm_kmap(chan->ramfc);
71 nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
72 nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
73 nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
74 nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
75 nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
76 nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
77 nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
78 nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
79 nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
80 (4 << 24) /* SEARCH_FULL */ |
81 (chan->ramht->gpuobj->node->offset >> 4));
82 nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
83 nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
84 nvkm_done(chan->ramfc);
85 return 0;
86}
87
88const struct nvkm_fifo_chan_oclass
89g84_fifo_gpfifo_oclass = {
90 .base.oclass = G82_CHANNEL_GPFIFO,
91 .base.minver = 0,
92 .base.maxver = 0,
93 .ctor = g84_fifo_gpfifo_new,
94};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
new file mode 100644
index 000000000000..e7cbc139c1d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -0,0 +1,293 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "changf100.h"
25
26#include <core/client.h>
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29#include <subdev/timer.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34static u32
35gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
36{
37 switch (engine->subdev.index) {
38 case NVKM_ENGINE_SW : return 0;
39 case NVKM_ENGINE_GR : return 0x0210;
40 case NVKM_ENGINE_CE0 : return 0x0230;
41 case NVKM_ENGINE_CE1 : return 0x0240;
42 case NVKM_ENGINE_MSPDEC: return 0x0250;
43 case NVKM_ENGINE_MSPPP : return 0x0260;
44 case NVKM_ENGINE_MSVLD : return 0x0270;
45 default:
46 WARN_ON(1);
47 return 0;
48 }
49}
50
51static int
52gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
53 struct nvkm_engine *engine, bool suspend)
54{
55 const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
56 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
57 struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
58 struct nvkm_device *device = subdev->device;
59 struct nvkm_gpuobj *inst = chan->base.inst;
60 int ret = 0;
61
62 nvkm_wr32(device, 0x002634, chan->base.chid);
63 if (nvkm_msec(device, 2000,
64 if (nvkm_rd32(device, 0x002634) == chan->base.chid)
65 break;
66 ) < 0) {
67 nvkm_error(subdev, "channel %d [%s] kick timeout\n",
68 chan->base.chid, chan->base.object.client->name);
69 ret = -EBUSY;
70 if (suspend)
71 return ret;
72 }
73
74 if (offset) {
75 nvkm_kmap(inst);
76 nvkm_wo32(inst, offset + 0x00, 0x00000000);
77 nvkm_wo32(inst, offset + 0x04, 0x00000000);
78 nvkm_done(inst);
79 }
80
81 return ret;
82}
83
84static int
85gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
86 struct nvkm_engine *engine)
87{
88 const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
89 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
90 struct nvkm_gpuobj *inst = chan->base.inst;
91
92 if (offset) {
93 u64 addr = chan->engn[engine->subdev.index].vma.offset;
94 nvkm_kmap(inst);
95 nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
96 nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
97 nvkm_done(inst);
98 }
99
100 return 0;
101}
102
103static void
104gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
105 struct nvkm_engine *engine)
106{
107 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
108 nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
109 nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
110}
111
112static int
113gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
114 struct nvkm_engine *engine,
115 struct nvkm_object *object)
116{
117 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
118 int engn = engine->subdev.index;
119 int ret;
120
121 if (!gf100_fifo_gpfifo_engine_addr(engine))
122 return 0;
123
124 ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
125 if (ret)
126 return ret;
127
128 return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
129 NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
130}
131
132static void
133gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
134{
135 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
136 struct gf100_fifo *fifo = chan->fifo;
137 struct nvkm_device *device = fifo->base.engine.subdev.device;
138 u32 coff = chan->base.chid * 8;
139
140 if (!list_empty(&chan->head) && !chan->killed) {
141 list_del_init(&chan->head);
142 nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
143 gf100_fifo_runlist_update(fifo);
144 }
145
146 gf100_fifo_intr_engine(fifo);
147
148 nvkm_wr32(device, 0x003000 + coff, 0x00000000);
149}
150
151static void
152gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
153{
154 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
155 struct gf100_fifo *fifo = chan->fifo;
156 struct nvkm_device *device = fifo->base.engine.subdev.device;
157 u32 addr = chan->base.inst->addr >> 12;
158 u32 coff = chan->base.chid * 8;
159
160 nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr);
161
162 if (list_empty(&chan->head) && !chan->killed) {
163 list_add_tail(&chan->head, &fifo->chan);
164 nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
165 gf100_fifo_runlist_update(fifo);
166 }
167}
168
169static void *
170gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
171{
172 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
173 nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
174 nvkm_gpuobj_del(&chan->pgd);
175 return chan;
176}
177
178static const struct nvkm_fifo_chan_func
179gf100_fifo_gpfifo_func = {
180 .dtor = gf100_fifo_gpfifo_dtor,
181 .init = gf100_fifo_gpfifo_init,
182 .fini = gf100_fifo_gpfifo_fini,
183 .ntfy = g84_fifo_chan_ntfy,
184 .engine_ctor = gf100_fifo_gpfifo_engine_ctor,
185 .engine_dtor = gf100_fifo_gpfifo_engine_dtor,
186 .engine_init = gf100_fifo_gpfifo_engine_init,
187 .engine_fini = gf100_fifo_gpfifo_engine_fini,
188};
189
190static int
191gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
192 void *data, u32 size, struct nvkm_object **pobject)
193{
194 union {
195 struct fermi_channel_gpfifo_v0 v0;
196 } *args = data;
197 struct gf100_fifo *fifo = gf100_fifo(base);
198 struct nvkm_device *device = fifo->base.engine.subdev.device;
199 struct nvkm_object *parent = oclass->parent;
200 struct gf100_fifo_chan *chan;
201 u64 usermem, ioffset, ilength;
202 int ret, i;
203
204 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
205 if (nvif_unpack(args->v0, 0, 0, false)) {
206 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
207 "ioffset %016llx ilength %08x\n",
208 args->v0.version, args->v0.vm, args->v0.ioffset,
209 args->v0.ilength);
210 } else
211 return ret;
212
213 /* allocate channel */
214 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
215 return -ENOMEM;
216 *pobject = &chan->base.object;
217 chan->fifo = fifo;
218 INIT_LIST_HEAD(&chan->head);
219
220 ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
221 0x1000, 0x1000, true, args->v0.vm, 0,
222 (1ULL << NVKM_ENGINE_CE0) |
223 (1ULL << NVKM_ENGINE_CE1) |
224 (1ULL << NVKM_ENGINE_GR) |
225 (1ULL << NVKM_ENGINE_MSPDEC) |
226 (1ULL << NVKM_ENGINE_MSPPP) |
227 (1ULL << NVKM_ENGINE_MSVLD) |
228 (1ULL << NVKM_ENGINE_SW),
229 1, fifo->user.bar.offset, 0x1000,
230 oclass, &chan->base);
231 if (ret)
232 return ret;
233
234 args->v0.chid = chan->base.chid;
235
236 /* page directory */
237 ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
238 if (ret)
239 return ret;
240
241 nvkm_kmap(chan->base.inst);
242 nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
243 nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
244 nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
245 nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
246 nvkm_done(chan->base.inst);
247
248 ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
249 if (ret)
250 return ret;
251
252 /* clear channel control registers */
253
254 usermem = chan->base.chid * 0x1000;
255 ioffset = args->v0.ioffset;
256 ilength = order_base_2(args->v0.ilength / 8);
257
258 nvkm_kmap(fifo->user.mem);
259 for (i = 0; i < 0x1000; i += 4)
260 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
261 nvkm_done(fifo->user.mem);
262 usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
263
264 /* RAMFC */
265 nvkm_kmap(chan->base.inst);
266 nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
267 nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
268 nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
269 nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
270 nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
271 nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
272 (ilength << 16));
273 nvkm_wo32(chan->base.inst, 0x54, 0x00000002);
274 nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
275 nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
276 nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
277 nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f);
278 nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f);
279 nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
280 nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
281 nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
282 nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
283 nvkm_done(chan->base.inst);
284 return 0;
285}
286
287const struct nvkm_fifo_chan_oclass
288gf100_fifo_gpfifo_oclass = {
289 .base.oclass = FERMI_CHANNEL_GPFIFO,
290 .base.minver = 0,
291 .base.maxver = 0,
292 .ctor = gf100_fifo_gpfifo_new,
293};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
new file mode 100644
index 000000000000..0b817540a9e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -0,0 +1,323 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "changk104.h"
25
26#include <core/client.h>
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29#include <subdev/mmu.h>
30#include <subdev/timer.h>
31
32#include <nvif/class.h>
33#include <nvif/unpack.h>
34
35static int
36gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
37{
38 struct gk104_fifo *fifo = chan->fifo;
39 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
40 struct nvkm_device *device = subdev->device;
41 struct nvkm_client *client = chan->base.object.client;
42
43 nvkm_wr32(device, 0x002634, chan->base.chid);
44 if (nvkm_msec(device, 2000,
45 if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
46 break;
47 ) < 0) {
48 nvkm_error(subdev, "channel %d [%s] kick timeout\n",
49 chan->base.chid, client->name);
50 return -EBUSY;
51 }
52
53 return 0;
54}
55
56static u32
57gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
58{
59 switch (engine->subdev.index) {
60 case NVKM_ENGINE_SW :
61 case NVKM_ENGINE_CE0 :
62 case NVKM_ENGINE_CE1 :
63 case NVKM_ENGINE_CE2 : return 0x0000;
64 case NVKM_ENGINE_GR : return 0x0210;
65 case NVKM_ENGINE_MSPDEC: return 0x0250;
66 case NVKM_ENGINE_MSPPP : return 0x0260;
67 case NVKM_ENGINE_MSVLD : return 0x0270;
68 default:
69 WARN_ON(1);
70 return 0;
71 }
72}
73
74static int
75gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
76 struct nvkm_engine *engine, bool suspend)
77{
78 const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
79 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
80 struct nvkm_gpuobj *inst = chan->base.inst;
81 int ret;
82
83 ret = gk104_fifo_gpfifo_kick(chan);
84 if (ret && suspend)
85 return ret;
86
87 if (offset) {
88 nvkm_kmap(inst);
89 nvkm_wo32(inst, offset + 0x00, 0x00000000);
90 nvkm_wo32(inst, offset + 0x04, 0x00000000);
91 nvkm_done(inst);
92 }
93
94 return ret;
95}
96
97static int
98gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
99 struct nvkm_engine *engine)
100{
101 const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
102 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
103 struct nvkm_gpuobj *inst = chan->base.inst;
104
105 if (offset) {
106 u64 addr = chan->engn[engine->subdev.index].vma.offset;
107 nvkm_kmap(inst);
108 nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
109 nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
110 nvkm_done(inst);
111 }
112
113 return 0;
114}
115
116static void
117gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
118 struct nvkm_engine *engine)
119{
120 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
121 nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
122 nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
123}
124
125static int
126gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
127 struct nvkm_engine *engine,
128 struct nvkm_object *object)
129{
130 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
131 int engn = engine->subdev.index;
132 int ret;
133
134 if (!gk104_fifo_gpfifo_engine_addr(engine))
135 return 0;
136
137 ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
138 if (ret)
139 return ret;
140
141 return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
142 NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
143}
144
145static void
146gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
147{
148 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
149 struct gk104_fifo *fifo = chan->fifo;
150 struct nvkm_device *device = fifo->base.engine.subdev.device;
151 u32 coff = chan->base.chid * 8;
152
153 if (!list_empty(&chan->head)) {
154 list_del_init(&chan->head);
155 nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
156 gk104_fifo_runlist_update(fifo, chan->engine);
157 }
158
159 nvkm_wr32(device, 0x800000 + coff, 0x00000000);
160}
161
162static void
163gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
164{
165 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
166 struct gk104_fifo *fifo = chan->fifo;
167 struct nvkm_device *device = fifo->base.engine.subdev.device;
168 u32 addr = chan->base.inst->addr >> 12;
169 u32 coff = chan->base.chid * 8;
170
171 nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
172 nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
173
174 if (list_empty(&chan->head) && !chan->killed) {
175 list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
176 nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
177 gk104_fifo_runlist_update(fifo, chan->engine);
178 nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
179 }
180}
181
182static void *
183gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
184{
185 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
186 nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
187 nvkm_gpuobj_del(&chan->pgd);
188 return chan;
189}
190
191static const struct nvkm_fifo_chan_func
192gk104_fifo_gpfifo_func = {
193 .dtor = gk104_fifo_gpfifo_dtor,
194 .init = gk104_fifo_gpfifo_init,
195 .fini = gk104_fifo_gpfifo_fini,
196 .ntfy = g84_fifo_chan_ntfy,
197 .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
198 .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
199 .engine_init = gk104_fifo_gpfifo_engine_init,
200 .engine_fini = gk104_fifo_gpfifo_engine_fini,
201};
202
203int
204gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
205 void *data, u32 size, struct nvkm_object **pobject)
206{
207 union {
208 struct kepler_channel_gpfifo_a_v0 v0;
209 } *args = data;
210 struct gk104_fifo *fifo = gk104_fifo(base);
211 struct nvkm_device *device = fifo->base.engine.subdev.device;
212 struct nvkm_object *parent = oclass->parent;
213 struct gk104_fifo_chan *chan;
214 u64 usermem, ioffset, ilength;
215 u32 engines;
216 int ret, i;
217
218 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
219 if (nvif_unpack(args->v0, 0, 0, false)) {
220 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
221 "ioffset %016llx ilength %08x engine %08x\n",
222 args->v0.version, args->v0.vm, args->v0.ioffset,
223 args->v0.ilength, args->v0.engine);
224 } else
225 return ret;
226
227 /* determine which downstream engines are present */
228 for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
229 u64 subdevs = gk104_fifo_engine_subdev(i);
230 if (!nvkm_device_engine(device, __ffs64(subdevs)))
231 continue;
232 engines |= (1 << i);
233 }
234
235 /* if this is an engine mask query, we're done */
236 if (!args->v0.engine) {
237 args->v0.engine = engines;
238 return nvkm_object_new(oclass, NULL, 0, pobject);
239 }
240
241 /* check that we support a requested engine - note that the user
242 * argument is a mask in order to allow the user to request (for
243 * example) *any* copy engine, but doesn't matter which.
244 */
245 args->v0.engine &= engines;
246 if (!args->v0.engine) {
247 nvif_ioctl(parent, "no supported engine\n");
248 return -ENODEV;
249 }
250
251 /* allocate the channel */
252 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
253 return -ENOMEM;
254 *pobject = &chan->base.object;
255 chan->fifo = fifo;
256 chan->engine = __ffs(args->v0.engine);
257 INIT_LIST_HEAD(&chan->head);
258
259 ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
260 0x1000, 0x1000, true, args->v0.vm, 0,
261 gk104_fifo_engine_subdev(chan->engine),
262 1, fifo->user.bar.offset, 0x200,
263 oclass, &chan->base);
264 if (ret)
265 return ret;
266
267 args->v0.chid = chan->base.chid;
268
269 /* page directory */
270 ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
271 if (ret)
272 return ret;
273
274 nvkm_kmap(chan->base.inst);
275 nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
276 nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
277 nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
278 nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
279 nvkm_done(chan->base.inst);
280
281 ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
282 if (ret)
283 return ret;
284
285 /* clear channel control registers */
286 usermem = chan->base.chid * 0x200;
287 ioffset = args->v0.ioffset;
288 ilength = order_base_2(args->v0.ilength / 8);
289
290 nvkm_kmap(fifo->user.mem);
291 for (i = 0; i < 0x200; i += 4)
292 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
293 nvkm_done(fifo->user.mem);
294 usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
295
296 /* RAMFC */
297 nvkm_kmap(chan->base.inst);
298 nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
299 nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
300 nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
301 nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
302 nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
303 nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
304 (ilength << 16));
305 nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
306 nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
307 nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
308 nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
309 nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
310 nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
311 nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
312 nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
313 nvkm_done(chan->base.inst);
314 return 0;
315}
316
317const struct nvkm_fifo_chan_oclass
318gk104_fifo_gpfifo_oclass = {
319 .base.oclass = KEPLER_CHANNEL_GPFIFO_A,
320 .base.minver = 0,
321 .base.maxver = 0,
322 .ctor = gk104_fifo_gpfifo_new,
323};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
new file mode 100644
index 000000000000..6511d6e21ecc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "changk104.h"
25
26#include <nvif/class.h>
27
28const struct nvkm_fifo_chan_oclass
29gm204_fifo_gpfifo_oclass = {
30 .base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
31 .base.minver = 0,
32 .base.maxver = 0,
33 .ctor = gk104_fifo_gpfifo_new,
34};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
new file mode 100644
index 000000000000..a8c69f878221
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -0,0 +1,92 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25
26#include <core/client.h>
27#include <core/ramht.h>
28
29#include <nvif/class.h>
30#include <nvif/unpack.h>
31
32static int
33nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
34 void *data, u32 size, struct nvkm_object **pobject)
35{
36 struct nvkm_object *parent = oclass->parent;
37 union {
38 struct nv50_channel_gpfifo_v0 v0;
39 } *args = data;
40 struct nv50_fifo *fifo = nv50_fifo(base);
41 struct nv50_fifo_chan *chan;
42 u64 ioffset, ilength;
43 int ret;
44
45 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, false)) {
47 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
48 "pushbuf %llx ioffset %016llx "
49 "ilength %08x\n",
50 args->v0.version, args->v0.vm, args->v0.pushbuf,
51 args->v0.ioffset, args->v0.ilength);
52 if (!args->v0.pushbuf)
53 return -EINVAL;
54 } else
55 return ret;
56
57 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
58 return -ENOMEM;
59 *pobject = &chan->base.object;
60
61 ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
62 oclass, chan);
63 if (ret)
64 return ret;
65
66 args->v0.chid = chan->base.chid;
67 ioffset = args->v0.ioffset;
68 ilength = order_base_2(args->v0.ilength / 8);
69
70 nvkm_kmap(chan->ramfc);
71 nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
72 nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
73 nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
74 nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
75 nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
76 nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
77 nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
78 nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
79 nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
80 (4 << 24) /* SEARCH_FULL */ |
81 (chan->ramht->gpuobj->node->offset >> 4));
82 nvkm_done(chan->ramfc);
83 return 0;
84}
85
86const struct nvkm_fifo_chan_oclass
87nv50_fifo_gpfifo_oclass = {
88 .base.oclass = NV50_CHANNEL_GPFIFO,
89 .base.minver = 0,
90 .base.maxver = 0,
91 .ctor = nv50_fifo_gpfifo_new,
92};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 043e4296084c..ad707ff176cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -22,20 +22,17 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "nv04.h"
25#include "channv04.h"
26#include "regsnv04.h"
25 27
26#include <core/client.h> 28#include <core/client.h>
27#include <core/device.h>
28#include <core/engctx.h>
29#include <core/handle.h>
30#include <core/ramht.h> 29#include <core/ramht.h>
31#include <subdev/instmem/nv04.h> 30#include <subdev/instmem.h>
32#include <subdev/timer.h> 31#include <subdev/timer.h>
32#include <engine/sw.h>
33 33
34#include <nvif/class.h> 34static const struct nv04_fifo_ramfc
35#include <nvif/unpack.h> 35nv04_fifo_ramfc[] = {
36
37static struct ramfc_desc
38nv04_ramfc[] = {
39 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 36 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
40 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 37 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
41 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 38 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
@@ -47,268 +44,19 @@ nv04_ramfc[] = {
47 {} 44 {}
48}; 45};
49 46
50/*******************************************************************************
51 * FIFO channel objects
52 ******************************************************************************/
53
54int
55nv04_fifo_object_attach(struct nvkm_object *parent,
56 struct nvkm_object *object, u32 handle)
57{
58 struct nv04_fifo_priv *priv = (void *)parent->engine;
59 struct nv04_fifo_chan *chan = (void *)parent;
60 u32 context, chid = chan->base.chid;
61 int ret;
62
63 if (nv_iclass(object, NV_GPUOBJ_CLASS))
64 context = nv_gpuobj(object)->addr >> 4;
65 else
66 context = 0x00000004; /* just non-zero */
67
68 switch (nv_engidx(object->engine)) {
69 case NVDEV_ENGINE_DMAOBJ:
70 case NVDEV_ENGINE_SW:
71 context |= 0x00000000;
72 break;
73 case NVDEV_ENGINE_GR:
74 context |= 0x00010000;
75 break;
76 case NVDEV_ENGINE_MPEG:
77 context |= 0x00020000;
78 break;
79 default:
80 return -EINVAL;
81 }
82
83 context |= 0x80000000; /* valid */
84 context |= chid << 24;
85
86 mutex_lock(&nv_subdev(priv)->mutex);
87 ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
88 mutex_unlock(&nv_subdev(priv)->mutex);
89 return ret;
90}
91
92void
93nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
94{
95 struct nv04_fifo_priv *priv = (void *)parent->engine;
96 mutex_lock(&nv_subdev(priv)->mutex);
97 nvkm_ramht_remove(priv->ramht, cookie);
98 mutex_unlock(&nv_subdev(priv)->mutex);
99}
100
101int
102nv04_fifo_context_attach(struct nvkm_object *parent,
103 struct nvkm_object *object)
104{
105 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
106 return 0;
107}
108
109static int
110nv04_fifo_chan_ctor(struct nvkm_object *parent,
111 struct nvkm_object *engine,
112 struct nvkm_oclass *oclass, void *data, u32 size,
113 struct nvkm_object **pobject)
114{
115 union {
116 struct nv03_channel_dma_v0 v0;
117 } *args = data;
118 struct nv04_fifo_priv *priv = (void *)engine;
119 struct nv04_fifo_chan *chan;
120 int ret;
121
122 nv_ioctl(parent, "create channel dma size %d\n", size);
123 if (nvif_unpack(args->v0, 0, 0, false)) {
124 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
125 "offset %016llx\n", args->v0.version,
126 args->v0.pushbuf, args->v0.offset);
127 } else
128 return ret;
129
130 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
131 0x10000, args->v0.pushbuf,
132 (1ULL << NVDEV_ENGINE_DMAOBJ) |
133 (1ULL << NVDEV_ENGINE_SW) |
134 (1ULL << NVDEV_ENGINE_GR), &chan);
135 *pobject = nv_object(chan);
136 if (ret)
137 return ret;
138
139 args->v0.chid = chan->base.chid;
140
141 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
142 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
143 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
144 chan->ramfc = chan->base.chid * 32;
145
146 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
147 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
148 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
149 nv_wo32(priv->ramfc, chan->ramfc + 0x10,
150 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
151 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
152#ifdef __BIG_ENDIAN
153 NV_PFIFO_CACHE1_BIG_ENDIAN |
154#endif
155 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
156 return 0;
157}
158
159void
160nv04_fifo_chan_dtor(struct nvkm_object *object)
161{
162 struct nv04_fifo_priv *priv = (void *)object->engine;
163 struct nv04_fifo_chan *chan = (void *)object;
164 struct ramfc_desc *c = priv->ramfc_desc;
165
166 do {
167 nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
168 } while ((++c)->bits);
169
170 nvkm_fifo_channel_destroy(&chan->base);
171}
172
173int
174nv04_fifo_chan_init(struct nvkm_object *object)
175{
176 struct nv04_fifo_priv *priv = (void *)object->engine;
177 struct nv04_fifo_chan *chan = (void *)object;
178 u32 mask = 1 << chan->base.chid;
179 unsigned long flags;
180 int ret;
181
182 ret = nvkm_fifo_channel_init(&chan->base);
183 if (ret)
184 return ret;
185
186 spin_lock_irqsave(&priv->base.lock, flags);
187 nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
188 spin_unlock_irqrestore(&priv->base.lock, flags);
189 return 0;
190}
191
192int
193nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
194{
195 struct nv04_fifo_priv *priv = (void *)object->engine;
196 struct nv04_fifo_chan *chan = (void *)object;
197 struct nvkm_gpuobj *fctx = priv->ramfc;
198 struct ramfc_desc *c;
199 unsigned long flags;
200 u32 data = chan->ramfc;
201 u32 chid;
202
203 /* prevent fifo context switches */
204 spin_lock_irqsave(&priv->base.lock, flags);
205 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
206
207 /* if this channel is active, replace it with a null context */
208 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
209 if (chid == chan->base.chid) {
210 nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
211 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
212 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
213
214 c = priv->ramfc_desc;
215 do {
216 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
217 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
218 u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
219 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
220 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
221 } while ((++c)->bits);
222
223 c = priv->ramfc_desc;
224 do {
225 nv_wr32(priv, c->regp, 0x00000000);
226 } while ((++c)->bits);
227
228 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
229 nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
230 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
231 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
232 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
233 }
234
235 /* restore normal operation, after disabling dma mode */
236 nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
237 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
238 spin_unlock_irqrestore(&priv->base.lock, flags);
239
240 return nvkm_fifo_channel_fini(&chan->base, suspend);
241}
242
243static struct nvkm_ofuncs
244nv04_fifo_ofuncs = {
245 .ctor = nv04_fifo_chan_ctor,
246 .dtor = nv04_fifo_chan_dtor,
247 .init = nv04_fifo_chan_init,
248 .fini = nv04_fifo_chan_fini,
249 .map = _nvkm_fifo_channel_map,
250 .rd32 = _nvkm_fifo_channel_rd32,
251 .wr32 = _nvkm_fifo_channel_wr32,
252 .ntfy = _nvkm_fifo_channel_ntfy
253};
254
255static struct nvkm_oclass
256nv04_fifo_sclass[] = {
257 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
258 {}
259};
260
261/*******************************************************************************
262 * FIFO context - basically just the instmem reserved for the channel
263 ******************************************************************************/
264
265int
266nv04_fifo_context_ctor(struct nvkm_object *parent,
267 struct nvkm_object *engine,
268 struct nvkm_oclass *oclass, void *data, u32 size,
269 struct nvkm_object **pobject)
270{
271 struct nv04_fifo_base *base;
272 int ret;
273
274 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
275 0x1000, NVOBJ_FLAG_HEAP, &base);
276 *pobject = nv_object(base);
277 if (ret)
278 return ret;
279
280 return 0;
281}
282
283static struct nvkm_oclass
284nv04_fifo_cclass = {
285 .handle = NV_ENGCTX(FIFO, 0x04),
286 .ofuncs = &(struct nvkm_ofuncs) {
287 .ctor = nv04_fifo_context_ctor,
288 .dtor = _nvkm_fifo_context_dtor,
289 .init = _nvkm_fifo_context_init,
290 .fini = _nvkm_fifo_context_fini,
291 .rd32 = _nvkm_fifo_context_rd32,
292 .wr32 = _nvkm_fifo_context_wr32,
293 },
294};
295
296/*******************************************************************************
297 * PFIFO engine
298 ******************************************************************************/
299
300void 47void
301nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags) 48nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
302__acquires(priv->base.lock) 49__acquires(fifo->base.lock)
303{ 50{
304 struct nv04_fifo_priv *priv = (void *)pfifo; 51 struct nv04_fifo *fifo = nv04_fifo(base);
52 struct nvkm_device *device = fifo->base.engine.subdev.device;
305 unsigned long flags; 53 unsigned long flags;
306 54
307 spin_lock_irqsave(&priv->base.lock, flags); 55 spin_lock_irqsave(&fifo->base.lock, flags);
308 *pflags = flags; 56 *pflags = flags;
309 57
310 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000); 58 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
311 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 59 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
312 60
313 /* in some cases the puller may be left in an inconsistent state 61 /* in some cases the puller may be left in an inconsistent state
314 * if you try to stop it while it's busy translating handles. 62 * if you try to stop it while it's busy translating handles.
@@ -319,28 +67,31 @@ __acquires(priv->base.lock)
319 * to avoid this, we invalidate the most recently calculated 67 * to avoid this, we invalidate the most recently calculated
320 * instance. 68 * instance.
321 */ 69 */
322 if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0, 70 nvkm_msec(device, 2000,
323 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000)) 71 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
324 nv_warn(priv, "timeout idling puller\n"); 72 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
73 break;
74 );
325 75
326 if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) & 76 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
327 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 77 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
328 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 78 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
329 79
330 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000); 80 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
331} 81}
332 82
333void 83void
334nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags) 84nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
335__releases(priv->base.lock) 85__releases(fifo->base.lock)
336{ 86{
337 struct nv04_fifo_priv *priv = (void *)pfifo; 87 struct nv04_fifo *fifo = nv04_fifo(base);
88 struct nvkm_device *device = fifo->base.engine.subdev.device;
338 unsigned long flags = *pflags; 89 unsigned long flags = *pflags;
339 90
340 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 91 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
341 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001); 92 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
342 93
343 spin_unlock_irqrestore(&priv->base.lock, flags); 94 spin_unlock_irqrestore(&fifo->base.lock, flags);
344} 95}
345 96
346static const char * 97static const char *
@@ -354,61 +105,40 @@ nv_dma_state_err(u32 state)
354} 105}
355 106
356static bool 107static bool
357nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) 108nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
358{ 109{
359 struct nv04_fifo_chan *chan = NULL; 110 struct nvkm_sw *sw = device->sw;
360 struct nvkm_handle *bind; 111 const int subc = (addr & 0x0000e000) >> 13;
361 const int subc = (addr >> 13) & 0x7; 112 const int mthd = (addr & 0x00001ffc);
362 const int mthd = addr & 0x1ffc; 113 const u32 mask = 0x0000000f << (subc * 4);
114 u32 engine = nvkm_rd32(device, 0x003280);
363 bool handled = false; 115 bool handled = false;
364 unsigned long flags;
365 u32 engine;
366
367 spin_lock_irqsave(&priv->base.lock, flags);
368 if (likely(chid >= priv->base.min && chid <= priv->base.max))
369 chan = (void *)priv->base.channel[chid];
370 if (unlikely(!chan))
371 goto out;
372 116
373 switch (mthd) { 117 switch (mthd) {
374 case 0x0000: 118 case 0x0000 ... 0x0000: /* subchannel's engine -> software */
375 bind = nvkm_namedb_get(nv_namedb(chan), data); 119 nvkm_wr32(device, 0x003280, (engine &= ~mask));
376 if (unlikely(!bind)) 120 case 0x0180 ... 0x01fc: /* handle -> instance */
377 break; 121 data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
378 122 case 0x0100 ... 0x017c:
379 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { 123 case 0x0200 ... 0x1ffc: /* pass method down to sw */
380 engine = 0x0000000f << (subc * 4); 124 if (!(engine & mask) && sw)
381 chan->subc[subc] = data; 125 handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
382 handled = true;
383
384 nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
385 }
386
387 nvkm_namedb_put(bind);
388 break; 126 break;
389 default: 127 default:
390 engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
391 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
392 break;
393
394 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
395 if (likely(bind)) {
396 if (!nv_call(bind->object, mthd, data))
397 handled = true;
398 nvkm_namedb_put(bind);
399 }
400 break; 128 break;
401 } 129 }
402 130
403out:
404 spin_unlock_irqrestore(&priv->base.lock, flags);
405 return handled; 131 return handled;
406} 132}
407 133
408static void 134static void
409nv04_fifo_cache_error(struct nvkm_device *device, 135nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
410 struct nv04_fifo_priv *priv, u32 chid, u32 get)
411{ 136{
137 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
138 struct nvkm_device *device = subdev->device;
139 struct nvkm_fifo_chan *chan;
140 unsigned long flags;
141 u32 pull0 = nvkm_rd32(device, 0x003250);
412 u32 mthd, data; 142 u32 mthd, data;
413 int ptr; 143 int ptr;
414 144
@@ -420,216 +150,214 @@ nv04_fifo_cache_error(struct nvkm_device *device,
420 ptr = (get & 0x7ff) >> 2; 150 ptr = (get & 0x7ff) >> 2;
421 151
422 if (device->card_type < NV_40) { 152 if (device->card_type < NV_40) {
423 mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr)); 153 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
424 data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr)); 154 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
425 } else { 155 } else {
426 mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr)); 156 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
427 data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr)); 157 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
428 } 158 }
429 159
430 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { 160 if (!(pull0 & 0x00000100) ||
431 const char *client_name = 161 !nv04_fifo_swmthd(device, chid, mthd, data)) {
432 nvkm_client_name_for_fifo_chid(&priv->base, chid); 162 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
433 nv_error(priv, 163 nvkm_error(subdev, "CACHE_ERROR - "
434 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 164 "ch %d [%s] subc %d mthd %04x data %08x\n",
435 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, 165 chid, chan ? chan->object.client->name : "unknown",
436 data); 166 (mthd >> 13) & 7, mthd & 0x1ffc, data);
167 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
437 } 168 }
438 169
439 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 170 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
440 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 171 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
441 172
442 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 173 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
443 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1); 174 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
444 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 175 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
445 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 176 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
446 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1); 177 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
447 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0); 178 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
448 179
449 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 180 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
450 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 181 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
451 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 182 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
452} 183}
453 184
454static void 185static void
455nv04_fifo_dma_pusher(struct nvkm_device *device, 186nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
456 struct nv04_fifo_priv *priv, u32 chid)
457{ 187{
458 const char *client_name; 188 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
459 u32 dma_get = nv_rd32(priv, 0x003244); 189 struct nvkm_device *device = subdev->device;
460 u32 dma_put = nv_rd32(priv, 0x003240); 190 u32 dma_get = nvkm_rd32(device, 0x003244);
461 u32 push = nv_rd32(priv, 0x003220); 191 u32 dma_put = nvkm_rd32(device, 0x003240);
462 u32 state = nv_rd32(priv, 0x003228); 192 u32 push = nvkm_rd32(device, 0x003220);
463 193 u32 state = nvkm_rd32(device, 0x003228);
464 client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid); 194 struct nvkm_fifo_chan *chan;
195 unsigned long flags;
196 const char *name;
465 197
198 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
199 name = chan ? chan->object.client->name : "unknown";
466 if (device->card_type == NV_50) { 200 if (device->card_type == NV_50) {
467 u32 ho_get = nv_rd32(priv, 0x003328); 201 u32 ho_get = nvkm_rd32(device, 0x003328);
468 u32 ho_put = nv_rd32(priv, 0x003320); 202 u32 ho_put = nvkm_rd32(device, 0x003320);
469 u32 ib_get = nv_rd32(priv, 0x003334); 203 u32 ib_get = nvkm_rd32(device, 0x003334);
470 u32 ib_put = nv_rd32(priv, 0x003330); 204 u32 ib_put = nvkm_rd32(device, 0x003330);
471 205
472 nv_error(priv, 206 nvkm_error(subdev, "DMA_PUSHER - "
473 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", 207 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
474 chid, client_name, ho_get, dma_get, ho_put, dma_put, 208 "ib_put %08x state %08x (err: %s) push %08x\n",
475 ib_get, ib_put, state, nv_dma_state_err(state), push); 209 chid, name, ho_get, dma_get, ho_put, dma_put,
210 ib_get, ib_put, state, nv_dma_state_err(state),
211 push);
476 212
477 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 213 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
478 nv_wr32(priv, 0x003364, 0x00000000); 214 nvkm_wr32(device, 0x003364, 0x00000000);
479 if (dma_get != dma_put || ho_get != ho_put) { 215 if (dma_get != dma_put || ho_get != ho_put) {
480 nv_wr32(priv, 0x003244, dma_put); 216 nvkm_wr32(device, 0x003244, dma_put);
481 nv_wr32(priv, 0x003328, ho_put); 217 nvkm_wr32(device, 0x003328, ho_put);
482 } else 218 } else
483 if (ib_get != ib_put) 219 if (ib_get != ib_put)
484 nv_wr32(priv, 0x003334, ib_put); 220 nvkm_wr32(device, 0x003334, ib_put);
485 } else { 221 } else {
486 nv_error(priv, 222 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
487 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", 223 "state %08x (err: %s) push %08x\n",
488 chid, client_name, dma_get, dma_put, state, 224 chid, name, dma_get, dma_put, state,
489 nv_dma_state_err(state), push); 225 nv_dma_state_err(state), push);
490 226
491 if (dma_get != dma_put) 227 if (dma_get != dma_put)
492 nv_wr32(priv, 0x003244, dma_put); 228 nvkm_wr32(device, 0x003244, dma_put);
493 } 229 }
230 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
494 231
495 nv_wr32(priv, 0x003228, 0x00000000); 232 nvkm_wr32(device, 0x003228, 0x00000000);
496 nv_wr32(priv, 0x003220, 0x00000001); 233 nvkm_wr32(device, 0x003220, 0x00000001);
497 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 234 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
498} 235}
499 236
500void 237void
501nv04_fifo_intr(struct nvkm_subdev *subdev) 238nv04_fifo_intr(struct nvkm_fifo *base)
502{ 239{
503 struct nvkm_device *device = nv_device(subdev); 240 struct nv04_fifo *fifo = nv04_fifo(base);
504 struct nv04_fifo_priv *priv = (void *)subdev; 241 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
505 u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); 242 struct nvkm_device *device = subdev->device;
506 u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; 243 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
244 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
507 u32 reassign, chid, get, sem; 245 u32 reassign, chid, get, sem;
508 246
509 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; 247 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
510 nv_wr32(priv, NV03_PFIFO_CACHES, 0); 248 nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
511 249
512 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; 250 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1);
513 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); 251 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
514 252
515 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 253 if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
516 nv04_fifo_cache_error(device, priv, chid, get); 254 nv04_fifo_cache_error(fifo, chid, get);
517 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 255 stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
518 } 256 }
519 257
520 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 258 if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
521 nv04_fifo_dma_pusher(device, priv, chid); 259 nv04_fifo_dma_pusher(fifo, chid);
522 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 260 stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
523 } 261 }
524 262
525 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 263 if (stat & NV_PFIFO_INTR_SEMAPHORE) {
526 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 264 stat &= ~NV_PFIFO_INTR_SEMAPHORE;
527 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 265 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
528 266
529 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); 267 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
530 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 268 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
531 269
532 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 270 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
533 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 271 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
534 } 272 }
535 273
536 if (device->card_type == NV_50) { 274 if (device->card_type == NV_50) {
537 if (stat & 0x00000010) { 275 if (stat & 0x00000010) {
538 stat &= ~0x00000010; 276 stat &= ~0x00000010;
539 nv_wr32(priv, 0x002100, 0x00000010); 277 nvkm_wr32(device, 0x002100, 0x00000010);
540 } 278 }
541 279
542 if (stat & 0x40000000) { 280 if (stat & 0x40000000) {
543 nv_wr32(priv, 0x002100, 0x40000000); 281 nvkm_wr32(device, 0x002100, 0x40000000);
544 nvkm_fifo_uevent(&priv->base); 282 nvkm_fifo_uevent(&fifo->base);
545 stat &= ~0x40000000; 283 stat &= ~0x40000000;
546 } 284 }
547 } 285 }
548 286
549 if (stat) { 287 if (stat) {
550 nv_warn(priv, "unknown intr 0x%08x\n", stat); 288 nvkm_warn(subdev, "intr %08x\n", stat);
551 nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 289 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
552 nv_wr32(priv, NV03_PFIFO_INTR_0, stat); 290 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
553 } 291 }
554 292
555 nv_wr32(priv, NV03_PFIFO_CACHES, reassign); 293 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
556}
557
558static int
559nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
560 struct nvkm_oclass *oclass, void *data, u32 size,
561 struct nvkm_object **pobject)
562{
563 struct nv04_instmem_priv *imem = nv04_instmem(parent);
564 struct nv04_fifo_priv *priv;
565 int ret;
566
567 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv);
568 *pobject = nv_object(priv);
569 if (ret)
570 return ret;
571
572 nvkm_ramht_ref(imem->ramht, &priv->ramht);
573 nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
574 nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
575
576 nv_subdev(priv)->unit = 0x00000100;
577 nv_subdev(priv)->intr = nv04_fifo_intr;
578 nv_engine(priv)->cclass = &nv04_fifo_cclass;
579 nv_engine(priv)->sclass = nv04_fifo_sclass;
580 priv->base.pause = nv04_fifo_pause;
581 priv->base.start = nv04_fifo_start;
582 priv->ramfc_desc = nv04_ramfc;
583 return 0;
584} 294}
585 295
586void 296void
587nv04_fifo_dtor(struct nvkm_object *object) 297nv04_fifo_init(struct nvkm_fifo *base)
588{ 298{
589 struct nv04_fifo_priv *priv = (void *)object; 299 struct nv04_fifo *fifo = nv04_fifo(base);
590 nvkm_gpuobj_ref(NULL, &priv->ramfc); 300 struct nvkm_device *device = fifo->base.engine.subdev.device;
591 nvkm_gpuobj_ref(NULL, &priv->ramro); 301 struct nvkm_instmem *imem = device->imem;
592 nvkm_ramht_ref(NULL, &priv->ramht); 302 struct nvkm_ramht *ramht = imem->ramht;
593 nvkm_fifo_destroy(&priv->base); 303 struct nvkm_memory *ramro = imem->ramro;
304 struct nvkm_memory *ramfc = imem->ramfc;
305
306 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
307 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
308
309 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
310 ((ramht->bits - 9) << 16) |
311 (ramht->gpuobj->addr >> 8));
312 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
313 nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
314
315 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
316
317 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
318 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
319
320 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
321 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
322 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
594} 323}
595 324
596int 325int
597nv04_fifo_init(struct nvkm_object *object) 326nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
327 int index, int nr, const struct nv04_fifo_ramfc *ramfc,
328 struct nvkm_fifo **pfifo)
598{ 329{
599 struct nv04_fifo_priv *priv = (void *)object; 330 struct nv04_fifo *fifo;
600 int ret; 331 int ret;
601 332
602 ret = nvkm_fifo_init(&priv->base); 333 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
334 return -ENOMEM;
335 fifo->ramfc = ramfc;
336 *pfifo = &fifo->base;
337
338 ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
603 if (ret) 339 if (ret)
604 return ret; 340 return ret;
605 341
606 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); 342 set_bit(nr - 1, fifo->base.mask); /* inactive channel */
607 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
608
609 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
610 ((priv->ramht->bits - 9) << 16) |
611 (priv->ramht->gpuobj.addr >> 8));
612 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
613 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
614
615 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
616
617 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
618 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
619
620 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
621 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
622 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
623 return 0; 343 return 0;
624} 344}
625 345
626struct nvkm_oclass * 346static const struct nvkm_fifo_func
627nv04_fifo_oclass = &(struct nvkm_oclass) { 347nv04_fifo = {
628 .handle = NV_ENGINE(FIFO, 0x04), 348 .init = nv04_fifo_init,
629 .ofuncs = &(struct nvkm_ofuncs) { 349 .intr = nv04_fifo_intr,
630 .ctor = nv04_fifo_ctor, 350 .pause = nv04_fifo_pause,
631 .dtor = nv04_fifo_dtor, 351 .start = nv04_fifo_start,
632 .init = nv04_fifo_init, 352 .chan = {
633 .fini = _nvkm_fifo_fini, 353 &nv04_fifo_dma_oclass,
354 NULL
634 }, 355 },
635}; 356};
357
358int
359nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
360{
361 return nv04_fifo_new_(&nv04_fifo, device, index, 16,
362 nv04_fifo_ramfc, pfifo);
363}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index e0e0c47cb4ca..03f60004bf7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,137 +1,9 @@
1#ifndef __NV04_FIFO_H__ 1#ifndef __NV04_FIFO_H__
2#define __NV04_FIFO_H__ 2#define __NV04_FIFO_H__
3#include <engine/fifo.h> 3#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
4#include "priv.h"
4 5
5#define NV04_PFIFO_DELAY_0 0x00002040 6struct nv04_fifo_ramfc {
6#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
7#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
8#define NV03_PFIFO_INTR_0 0x00002100
9#define NV03_PFIFO_INTR_EN_0 0x00002140
10# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
11# define NV_PFIFO_INTR_RUNOUT (1<<4)
12# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
13# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
14# define NV_PFIFO_INTR_DMA_PT (1<<16)
15# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
16# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
17#define NV03_PFIFO_RAMHT 0x00002210
18#define NV03_PFIFO_RAMFC 0x00002214
19#define NV03_PFIFO_RAMRO 0x00002218
20#define NV40_PFIFO_RAMFC 0x00002220
21#define NV03_PFIFO_CACHES 0x00002500
22#define NV04_PFIFO_MODE 0x00002504
23#define NV04_PFIFO_DMA 0x00002508
24#define NV04_PFIFO_SIZE 0x0000250c
25#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
26#define NV50_PFIFO_CTX_TABLE__SIZE 128
27#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
28#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
29#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
30#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
31#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
32#define NV03_PFIFO_CACHE0_PULL0 0x00003040
33#define NV04_PFIFO_CACHE0_PULL0 0x00003050
34#define NV04_PFIFO_CACHE0_PULL1 0x00003054
35#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
36#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
37#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
38#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
39#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
40#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
41#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
42#define NV03_PFIFO_CACHE1_PUT 0x00003210
43#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
44#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
45# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
46# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
47# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
48# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
49# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
50# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
51# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
52# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
53# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
54# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
55# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
56# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
57# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
58# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
59# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
60# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
61# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
62# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
63# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
64# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
65# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
66# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
67# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
68# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
69# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
70# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
71# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
72# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
73# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
74# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
75# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
76# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
77# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
78# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
79# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
80# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
81# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
82# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
83# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
84# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
85# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
86# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
87# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
88# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
89# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
90# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
91# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
92# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
93# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
94# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
95# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
96# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
97# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
98# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
99# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
100# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
101# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
102# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
103# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
104# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
105# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
106#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
107#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
108#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
109#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
110#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
111#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
112#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
113#define NV03_PFIFO_CACHE1_PULL0 0x00003240
114#define NV04_PFIFO_CACHE1_PULL0 0x00003250
115# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
116# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
117#define NV03_PFIFO_CACHE1_PULL1 0x00003250
118#define NV04_PFIFO_CACHE1_PULL1 0x00003254
119#define NV04_PFIFO_CACHE1_HASH 0x00003258
120#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
121#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
122#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
123#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
124#define NV03_PFIFO_CACHE1_GET 0x00003270
125#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
126#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
127#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
128#define NV40_PFIFO_UNK32E4 0x000032E4
129#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
130#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
131#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
132#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
133
134struct ramfc_desc {
135 unsigned bits:6; 7 unsigned bits:6;
136 unsigned ctxs:5; 8 unsigned ctxs:5;
137 unsigned ctxp:8; 9 unsigned ctxp:8;
@@ -139,37 +11,13 @@ struct ramfc_desc {
139 unsigned regp; 11 unsigned regp;
140}; 12};
141 13
142struct nv04_fifo_priv { 14struct nv04_fifo {
143 struct nvkm_fifo base; 15 struct nvkm_fifo base;
144 struct ramfc_desc *ramfc_desc; 16 const struct nv04_fifo_ramfc *ramfc;
145 struct nvkm_ramht *ramht;
146 struct nvkm_gpuobj *ramro;
147 struct nvkm_gpuobj *ramfc;
148};
149
150struct nv04_fifo_base {
151 struct nvkm_fifo_base base;
152};
153
154struct nv04_fifo_chan {
155 struct nvkm_fifo_chan base;
156 u32 subc[8];
157 u32 ramfc;
158}; 17};
159 18
160int nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32); 19int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
161void nv04_fifo_object_detach(struct nvkm_object *, int); 20 int index, int nr, const struct nv04_fifo_ramfc *,
162 21 struct nvkm_fifo **);
163void nv04_fifo_chan_dtor(struct nvkm_object *); 22void nv04_fifo_init(struct nvkm_fifo *);
164int nv04_fifo_chan_init(struct nvkm_object *);
165int nv04_fifo_chan_fini(struct nvkm_object *, bool suspend);
166
167int nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *,
168 struct nvkm_oclass *, void *, u32,
169 struct nvkm_object **);
170
171void nv04_fifo_dtor(struct nvkm_object *);
172int nv04_fifo_init(struct nvkm_object *);
173void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
174void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
175#endif 23#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index 48ce4af6f543..f9a87deb2b3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -22,17 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "nv04.h"
25#include "channv04.h"
26#include "regsnv04.h"
25 27
26#include <core/client.h> 28static const struct nv04_fifo_ramfc
27#include <core/engctx.h> 29nv10_fifo_ramfc[] = {
28#include <core/ramht.h>
29#include <subdev/instmem/nv04.h>
30
31#include <nvif/class.h>
32#include <nvif/unpack.h>
33
34static struct ramfc_desc
35nv10_ramfc[] = {
36 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 30 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
37 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 31 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
38 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, 32 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -45,134 +39,21 @@ nv10_ramfc[] = {
45 {} 39 {}
46}; 40};
47 41
48/******************************************************************************* 42static const struct nvkm_fifo_func
49 * FIFO channel objects 43nv10_fifo = {
50 ******************************************************************************/ 44 .init = nv04_fifo_init,
51 45 .intr = nv04_fifo_intr,
52static int 46 .pause = nv04_fifo_pause,
53nv10_fifo_chan_ctor(struct nvkm_object *parent, 47 .start = nv04_fifo_start,
54 struct nvkm_object *engine, 48 .chan = {
55 struct nvkm_oclass *oclass, void *data, u32 size, 49 &nv10_fifo_dma_oclass,
56 struct nvkm_object **pobject) 50 NULL
57{
58 union {
59 struct nv03_channel_dma_v0 v0;
60 } *args = data;
61 struct nv04_fifo_priv *priv = (void *)engine;
62 struct nv04_fifo_chan *chan;
63 int ret;
64
65 nv_ioctl(parent, "create channel dma size %d\n", size);
66 if (nvif_unpack(args->v0, 0, 0, false)) {
67 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
68 "offset %016llx\n", args->v0.version,
69 args->v0.pushbuf, args->v0.offset);
70 } else
71 return ret;
72
73 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
74 0x10000, args->v0.pushbuf,
75 (1ULL << NVDEV_ENGINE_DMAOBJ) |
76 (1ULL << NVDEV_ENGINE_SW) |
77 (1ULL << NVDEV_ENGINE_GR), &chan);
78 *pobject = nv_object(chan);
79 if (ret)
80 return ret;
81
82 args->v0.chid = chan->base.chid;
83
84 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
85 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
86 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
87 chan->ramfc = chan->base.chid * 32;
88
89 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
90 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
91 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
92 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
93 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
94 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
95#ifdef __BIG_ENDIAN
96 NV_PFIFO_CACHE1_BIG_ENDIAN |
97#endif
98 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
99 return 0;
100}
101
102static struct nvkm_ofuncs
103nv10_fifo_ofuncs = {
104 .ctor = nv10_fifo_chan_ctor,
105 .dtor = nv04_fifo_chan_dtor,
106 .init = nv04_fifo_chan_init,
107 .fini = nv04_fifo_chan_fini,
108 .map = _nvkm_fifo_channel_map,
109 .rd32 = _nvkm_fifo_channel_rd32,
110 .wr32 = _nvkm_fifo_channel_wr32,
111 .ntfy = _nvkm_fifo_channel_ntfy
112};
113
114static struct nvkm_oclass
115nv10_fifo_sclass[] = {
116 { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
117 {}
118};
119
120/*******************************************************************************
121 * FIFO context - basically just the instmem reserved for the channel
122 ******************************************************************************/
123
124static struct nvkm_oclass
125nv10_fifo_cclass = {
126 .handle = NV_ENGCTX(FIFO, 0x10),
127 .ofuncs = &(struct nvkm_ofuncs) {
128 .ctor = nv04_fifo_context_ctor,
129 .dtor = _nvkm_fifo_context_dtor,
130 .init = _nvkm_fifo_context_init,
131 .fini = _nvkm_fifo_context_fini,
132 .rd32 = _nvkm_fifo_context_rd32,
133 .wr32 = _nvkm_fifo_context_wr32,
134 }, 51 },
135}; 52};
136 53
137/******************************************************************************* 54int
138 * PFIFO engine 55nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
139 ******************************************************************************/
140
141static int
142nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
143 struct nvkm_oclass *oclass, void *data, u32 size,
144 struct nvkm_object **pobject)
145{ 56{
146 struct nv04_instmem_priv *imem = nv04_instmem(parent); 57 return nv04_fifo_new_(&nv10_fifo, device, index, 32,
147 struct nv04_fifo_priv *priv; 58 nv10_fifo_ramfc, pfifo);
148 int ret;
149
150 ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
151 *pobject = nv_object(priv);
152 if (ret)
153 return ret;
154
155 nvkm_ramht_ref(imem->ramht, &priv->ramht);
156 nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
157 nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
158
159 nv_subdev(priv)->unit = 0x00000100;
160 nv_subdev(priv)->intr = nv04_fifo_intr;
161 nv_engine(priv)->cclass = &nv10_fifo_cclass;
162 nv_engine(priv)->sclass = nv10_fifo_sclass;
163 priv->base.pause = nv04_fifo_pause;
164 priv->base.start = nv04_fifo_start;
165 priv->ramfc_desc = nv10_ramfc;
166 return 0;
167} 59}
168
169struct nvkm_oclass *
170nv10_fifo_oclass = &(struct nvkm_oclass) {
171 .handle = NV_ENGINE(FIFO, 0x10),
172 .ofuncs = &(struct nvkm_ofuncs) {
173 .ctor = nv10_fifo_ctor,
174 .dtor = nv04_fifo_dtor,
175 .init = nv04_fifo_init,
176 .fini = _nvkm_fifo_fini,
177 },
178};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index 4a20a6fd3887..f6d383a21222 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -22,17 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "nv04.h"
25#include "channv04.h"
26#include "regsnv04.h"
25 27
26#include <core/client.h>
27#include <core/engctx.h>
28#include <core/ramht.h> 28#include <core/ramht.h>
29#include <subdev/instmem/nv04.h> 29#include <subdev/instmem.h>
30 30
31#include <nvif/class.h> 31static const struct nv04_fifo_ramfc
32#include <nvif/unpack.h> 32nv17_fifo_ramfc[] = {
33
34static struct ramfc_desc
35nv17_ramfc[] = {
36 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 33 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
37 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 34 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
38 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, 35 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -50,166 +47,51 @@ nv17_ramfc[] = {
50 {} 47 {}
51}; 48};
52 49
53/******************************************************************************* 50static void
54 * FIFO channel objects 51nv17_fifo_init(struct nvkm_fifo *base)
55 ******************************************************************************/
56
57static int
58nv17_fifo_chan_ctor(struct nvkm_object *parent,
59 struct nvkm_object *engine,
60 struct nvkm_oclass *oclass, void *data, u32 size,
61 struct nvkm_object **pobject)
62{ 52{
63 union { 53 struct nv04_fifo *fifo = nv04_fifo(base);
64 struct nv03_channel_dma_v0 v0; 54 struct nvkm_device *device = fifo->base.engine.subdev.device;
65 } *args = data; 55 struct nvkm_instmem *imem = device->imem;
66 struct nv04_fifo_priv *priv = (void *)engine; 56 struct nvkm_ramht *ramht = imem->ramht;
67 struct nv04_fifo_chan *chan; 57 struct nvkm_memory *ramro = imem->ramro;
68 int ret; 58 struct nvkm_memory *ramfc = imem->ramfc;
69 59
70 nv_ioctl(parent, "create channel dma size %d\n", size); 60 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
71 if (nvif_unpack(args->v0, 0, 0, false)) { 61 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
72 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " 62
73 "offset %016llx\n", args->v0.version, 63 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
74 args->v0.pushbuf, args->v0.offset); 64 ((ramht->bits - 9) << 16) |
75 } else 65 (ramht->gpuobj->addr >> 8));
76 return ret; 66 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
77 67 nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
78 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 68 0x00010000);
79 0x10000, args->v0.pushbuf, 69
80 (1ULL << NVDEV_ENGINE_DMAOBJ) | 70 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
81 (1ULL << NVDEV_ENGINE_SW) | 71
82 (1ULL << NVDEV_ENGINE_GR) | 72 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
83 (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */ 73 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
84 &chan); 74
85 *pobject = nv_object(chan); 75 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
86 if (ret) 76 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
87 return ret; 77 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
88
89 args->v0.chid = chan->base.chid;
90
91 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
92 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
93 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
94 chan->ramfc = chan->base.chid * 64;
95
96 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
97 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
98 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
99 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
100 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
101 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
102#ifdef __BIG_ENDIAN
103 NV_PFIFO_CACHE1_BIG_ENDIAN |
104#endif
105 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
106 return 0;
107} 78}
108 79
109static struct nvkm_ofuncs 80static const struct nvkm_fifo_func
110nv17_fifo_ofuncs = { 81nv17_fifo = {
111 .ctor = nv17_fifo_chan_ctor, 82 .init = nv17_fifo_init,
112 .dtor = nv04_fifo_chan_dtor, 83 .intr = nv04_fifo_intr,
113 .init = nv04_fifo_chan_init, 84 .pause = nv04_fifo_pause,
114 .fini = nv04_fifo_chan_fini, 85 .start = nv04_fifo_start,
115 .map = _nvkm_fifo_channel_map, 86 .chan = {
116 .rd32 = _nvkm_fifo_channel_rd32, 87 &nv17_fifo_dma_oclass,
117 .wr32 = _nvkm_fifo_channel_wr32, 88 NULL
118 .ntfy = _nvkm_fifo_channel_ntfy
119};
120
121static struct nvkm_oclass
122nv17_fifo_sclass[] = {
123 { NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
124 {}
125};
126
127/*******************************************************************************
128 * FIFO context - basically just the instmem reserved for the channel
129 ******************************************************************************/
130
131static struct nvkm_oclass
132nv17_fifo_cclass = {
133 .handle = NV_ENGCTX(FIFO, 0x17),
134 .ofuncs = &(struct nvkm_ofuncs) {
135 .ctor = nv04_fifo_context_ctor,
136 .dtor = _nvkm_fifo_context_dtor,
137 .init = _nvkm_fifo_context_init,
138 .fini = _nvkm_fifo_context_fini,
139 .rd32 = _nvkm_fifo_context_rd32,
140 .wr32 = _nvkm_fifo_context_wr32,
141 }, 89 },
142}; 90};
143 91
144/******************************************************************************* 92int
145 * PFIFO engine 93nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
146 ******************************************************************************/
147
148static int
149nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
150 struct nvkm_oclass *oclass, void *data, u32 size,
151 struct nvkm_object **pobject)
152{ 94{
153 struct nv04_instmem_priv *imem = nv04_instmem(parent); 95 return nv04_fifo_new_(&nv17_fifo, device, index, 32,
154 struct nv04_fifo_priv *priv; 96 nv17_fifo_ramfc, pfifo);
155 int ret;
156
157 ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
158 *pobject = nv_object(priv);
159 if (ret)
160 return ret;
161
162 nvkm_ramht_ref(imem->ramht, &priv->ramht);
163 nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
164 nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
165
166 nv_subdev(priv)->unit = 0x00000100;
167 nv_subdev(priv)->intr = nv04_fifo_intr;
168 nv_engine(priv)->cclass = &nv17_fifo_cclass;
169 nv_engine(priv)->sclass = nv17_fifo_sclass;
170 priv->base.pause = nv04_fifo_pause;
171 priv->base.start = nv04_fifo_start;
172 priv->ramfc_desc = nv17_ramfc;
173 return 0;
174}
175
176static int
177nv17_fifo_init(struct nvkm_object *object)
178{
179 struct nv04_fifo_priv *priv = (void *)object;
180 int ret;
181
182 ret = nvkm_fifo_init(&priv->base);
183 if (ret)
184 return ret;
185
186 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
187 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
188
189 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
190 ((priv->ramht->bits - 9) << 16) |
191 (priv->ramht->gpuobj.addr >> 8));
192 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
193 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
194
195 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
196
197 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
198 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
199
200 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
201 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
202 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
203 return 0;
204} 97}
205
206struct nvkm_oclass *
207nv17_fifo_oclass = &(struct nvkm_oclass) {
208 .handle = NV_ENGINE(FIFO, 0x17),
209 .ofuncs = &(struct nvkm_ofuncs) {
210 .ctor = nv17_fifo_ctor,
211 .dtor = nv04_fifo_dtor,
212 .init = nv17_fifo_init,
213 .fini = _nvkm_fifo_fini,
214 },
215};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 5bfc96265f3b..8c7ba32763c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -22,19 +22,15 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "nv04.h"
25#include "channv04.h"
26#include "regsnv04.h"
25 27
26#include <core/client.h>
27#include <core/device.h>
28#include <core/engctx.h>
29#include <core/ramht.h> 28#include <core/ramht.h>
30#include <subdev/fb.h> 29#include <subdev/fb.h>
31#include <subdev/instmem/nv04.h> 30#include <subdev/instmem.h>
32 31
33#include <nvif/class.h> 32static const struct nv04_fifo_ramfc
34#include <nvif/unpack.h> 33nv40_fifo_ramfc[] = {
35
36static struct ramfc_desc
37nv40_ramfc[] = {
38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 34 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 35 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
40 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, 36 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -60,297 +56,72 @@ nv40_ramfc[] = {
60 {} 56 {}
61}; 57};
62 58
63/******************************************************************************* 59static void
64 * FIFO channel objects 60nv40_fifo_init(struct nvkm_fifo *base)
65 ******************************************************************************/
66
67static int
68nv40_fifo_object_attach(struct nvkm_object *parent,
69 struct nvkm_object *object, u32 handle)
70{
71 struct nv04_fifo_priv *priv = (void *)parent->engine;
72 struct nv04_fifo_chan *chan = (void *)parent;
73 u32 context, chid = chan->base.chid;
74 int ret;
75
76 if (nv_iclass(object, NV_GPUOBJ_CLASS))
77 context = nv_gpuobj(object)->addr >> 4;
78 else
79 context = 0x00000004; /* just non-zero */
80
81 switch (nv_engidx(object->engine)) {
82 case NVDEV_ENGINE_DMAOBJ:
83 case NVDEV_ENGINE_SW:
84 context |= 0x00000000;
85 break;
86 case NVDEV_ENGINE_GR:
87 context |= 0x00100000;
88 break;
89 case NVDEV_ENGINE_MPEG:
90 context |= 0x00200000;
91 break;
92 default:
93 return -EINVAL;
94 }
95
96 context |= chid << 23;
97
98 mutex_lock(&nv_subdev(priv)->mutex);
99 ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
100 mutex_unlock(&nv_subdev(priv)->mutex);
101 return ret;
102}
103
104static int
105nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
106{
107 struct nv04_fifo_priv *priv = (void *)parent->engine;
108 struct nv04_fifo_chan *chan = (void *)parent;
109 unsigned long flags;
110 u32 reg, ctx;
111
112 switch (nv_engidx(engctx->engine)) {
113 case NVDEV_ENGINE_SW:
114 return 0;
115 case NVDEV_ENGINE_GR:
116 reg = 0x32e0;
117 ctx = 0x38;
118 break;
119 case NVDEV_ENGINE_MPEG:
120 reg = 0x330c;
121 ctx = 0x54;
122 break;
123 default:
124 return -EINVAL;
125 }
126
127 spin_lock_irqsave(&priv->base.lock, flags);
128 nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
129 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
130
131 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
132 nv_wr32(priv, reg, nv_engctx(engctx)->addr);
133 nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
134
135 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
136 spin_unlock_irqrestore(&priv->base.lock, flags);
137 return 0;
138}
139
140static int
141nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
142 struct nvkm_object *engctx)
143{ 61{
144 struct nv04_fifo_priv *priv = (void *)parent->engine; 62 struct nv04_fifo *fifo = nv04_fifo(base);
145 struct nv04_fifo_chan *chan = (void *)parent; 63 struct nvkm_device *device = fifo->base.engine.subdev.device;
146 unsigned long flags; 64 struct nvkm_fb *fb = device->fb;
147 u32 reg, ctx; 65 struct nvkm_instmem *imem = device->imem;
148 66 struct nvkm_ramht *ramht = imem->ramht;
149 switch (nv_engidx(engctx->engine)) { 67 struct nvkm_memory *ramro = imem->ramro;
150 case NVDEV_ENGINE_SW: 68 struct nvkm_memory *ramfc = imem->ramfc;
151 return 0; 69
152 case NVDEV_ENGINE_GR: 70 nvkm_wr32(device, 0x002040, 0x000000ff);
153 reg = 0x32e0; 71 nvkm_wr32(device, 0x002044, 0x2101ffff);
154 ctx = 0x38; 72 nvkm_wr32(device, 0x002058, 0x00000001);
155 break; 73
156 case NVDEV_ENGINE_MPEG: 74 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
157 reg = 0x330c; 75 ((ramht->bits - 9) << 16) |
158 ctx = 0x54; 76 (ramht->gpuobj->addr >> 8));
159 break; 77 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
160 default: 78
161 return -EINVAL; 79 switch (device->chipset) {
162 }
163
164 spin_lock_irqsave(&priv->base.lock, flags);
165 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
166
167 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
168 nv_wr32(priv, reg, 0x00000000);
169 nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
170
171 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
172 spin_unlock_irqrestore(&priv->base.lock, flags);
173 return 0;
174}
175
176static int
177nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
178 struct nvkm_oclass *oclass, void *data, u32 size,
179 struct nvkm_object **pobject)
180{
181 union {
182 struct nv03_channel_dma_v0 v0;
183 } *args = data;
184 struct nv04_fifo_priv *priv = (void *)engine;
185 struct nv04_fifo_chan *chan;
186 int ret;
187
188 nv_ioctl(parent, "create channel dma size %d\n", size);
189 if (nvif_unpack(args->v0, 0, 0, false)) {
190 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
191 "offset %016llx\n", args->v0.version,
192 args->v0.pushbuf, args->v0.offset);
193 } else
194 return ret;
195
196 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
197 0x1000, args->v0.pushbuf,
198 (1ULL << NVDEV_ENGINE_DMAOBJ) |
199 (1ULL << NVDEV_ENGINE_SW) |
200 (1ULL << NVDEV_ENGINE_GR) |
201 (1ULL << NVDEV_ENGINE_MPEG), &chan);
202 *pobject = nv_object(chan);
203 if (ret)
204 return ret;
205
206 args->v0.chid = chan->base.chid;
207
208 nv_parent(chan)->context_attach = nv40_fifo_context_attach;
209 nv_parent(chan)->context_detach = nv40_fifo_context_detach;
210 nv_parent(chan)->object_attach = nv40_fifo_object_attach;
211 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
212 chan->ramfc = chan->base.chid * 128;
213
214 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
215 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
216 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
217 nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
218 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
219 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
220#ifdef __BIG_ENDIAN
221 NV_PFIFO_CACHE1_BIG_ENDIAN |
222#endif
223 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
224 nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
225 return 0;
226}
227
228static struct nvkm_ofuncs
229nv40_fifo_ofuncs = {
230 .ctor = nv40_fifo_chan_ctor,
231 .dtor = nv04_fifo_chan_dtor,
232 .init = nv04_fifo_chan_init,
233 .fini = nv04_fifo_chan_fini,
234 .map = _nvkm_fifo_channel_map,
235 .rd32 = _nvkm_fifo_channel_rd32,
236 .wr32 = _nvkm_fifo_channel_wr32,
237 .ntfy = _nvkm_fifo_channel_ntfy
238};
239
240static struct nvkm_oclass
241nv40_fifo_sclass[] = {
242 { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
243 {}
244};
245
246/*******************************************************************************
247 * FIFO context - basically just the instmem reserved for the channel
248 ******************************************************************************/
249
250static struct nvkm_oclass
251nv40_fifo_cclass = {
252 .handle = NV_ENGCTX(FIFO, 0x40),
253 .ofuncs = &(struct nvkm_ofuncs) {
254 .ctor = nv04_fifo_context_ctor,
255 .dtor = _nvkm_fifo_context_dtor,
256 .init = _nvkm_fifo_context_init,
257 .fini = _nvkm_fifo_context_fini,
258 .rd32 = _nvkm_fifo_context_rd32,
259 .wr32 = _nvkm_fifo_context_wr32,
260 },
261};
262
263/*******************************************************************************
264 * PFIFO engine
265 ******************************************************************************/
266
267static int
268nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
269 struct nvkm_oclass *oclass, void *data, u32 size,
270 struct nvkm_object **pobject)
271{
272 struct nv04_instmem_priv *imem = nv04_instmem(parent);
273 struct nv04_fifo_priv *priv;
274 int ret;
275
276 ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
277 *pobject = nv_object(priv);
278 if (ret)
279 return ret;
280
281 nvkm_ramht_ref(imem->ramht, &priv->ramht);
282 nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
283 nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
284
285 nv_subdev(priv)->unit = 0x00000100;
286 nv_subdev(priv)->intr = nv04_fifo_intr;
287 nv_engine(priv)->cclass = &nv40_fifo_cclass;
288 nv_engine(priv)->sclass = nv40_fifo_sclass;
289 priv->base.pause = nv04_fifo_pause;
290 priv->base.start = nv04_fifo_start;
291 priv->ramfc_desc = nv40_ramfc;
292 return 0;
293}
294
295static int
296nv40_fifo_init(struct nvkm_object *object)
297{
298 struct nv04_fifo_priv *priv = (void *)object;
299 struct nvkm_fb *pfb = nvkm_fb(object);
300 int ret;
301
302 ret = nvkm_fifo_init(&priv->base);
303 if (ret)
304 return ret;
305
306 nv_wr32(priv, 0x002040, 0x000000ff);
307 nv_wr32(priv, 0x002044, 0x2101ffff);
308 nv_wr32(priv, 0x002058, 0x00000001);
309
310 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
311 ((priv->ramht->bits - 9) << 16) |
312 (priv->ramht->gpuobj.addr >> 8));
313 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
314
315 switch (nv_device(priv)->chipset) {
316 case 0x47: 80 case 0x47:
317 case 0x49: 81 case 0x49:
318 case 0x4b: 82 case 0x4b:
319 nv_wr32(priv, 0x002230, 0x00000001); 83 nvkm_wr32(device, 0x002230, 0x00000001);
320 case 0x40: 84 case 0x40:
321 case 0x41: 85 case 0x41:
322 case 0x42: 86 case 0x42:
323 case 0x43: 87 case 0x43:
324 case 0x45: 88 case 0x45:
325 case 0x48: 89 case 0x48:
326 nv_wr32(priv, 0x002220, 0x00030002); 90 nvkm_wr32(device, 0x002220, 0x00030002);
327 break; 91 break;
328 default: 92 default:
329 nv_wr32(priv, 0x002230, 0x00000000); 93 nvkm_wr32(device, 0x002230, 0x00000000);
330 nv_wr32(priv, 0x002220, ((pfb->ram->size - 512 * 1024 + 94 nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
331 priv->ramfc->addr) >> 16) | 95 nvkm_memory_addr(ramfc)) >> 16) |
332 0x00030000); 96 0x00030000);
333 break; 97 break;
334 } 98 }
335 99
336 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); 100 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
337 101
338 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); 102 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
339 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); 103 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
340 104
341 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); 105 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
342 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 106 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
343 nv_wr32(priv, NV03_PFIFO_CACHES, 1); 107 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
344 return 0;
345} 108}
346 109
347struct nvkm_oclass * 110static const struct nvkm_fifo_func
348nv40_fifo_oclass = &(struct nvkm_oclass) { 111nv40_fifo = {
349 .handle = NV_ENGINE(FIFO, 0x40), 112 .init = nv40_fifo_init,
350 .ofuncs = &(struct nvkm_ofuncs) { 113 .intr = nv04_fifo_intr,
351 .ctor = nv40_fifo_ctor, 114 .pause = nv04_fifo_pause,
352 .dtor = nv04_fifo_dtor, 115 .start = nv04_fifo_start,
353 .init = nv40_fifo_init, 116 .chan = {
354 .fini = _nvkm_fifo_fini, 117 &nv40_fifo_dma_oclass,
118 NULL
355 }, 119 },
356}; 120};
121
122int
123nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
124{
125 return nv04_fifo_new_(&nv40_fifo, device, index, 32,
126 nv40_fifo_ramfc, pfifo);
127}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index f25f0fd0655d..66eb12c2b5ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -22,513 +22,126 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "nv04.h" 25#include "channv50.h"
26 26
27#include <core/client.h> 27#include <core/gpuobj.h>
28#include <core/engctx.h>
29#include <core/ramht.h>
30#include <subdev/bar.h>
31#include <subdev/mmu.h>
32#include <subdev/timer.h>
33
34#include <nvif/class.h>
35#include <nvif/unpack.h>
36
37/*******************************************************************************
38 * FIFO channel objects
39 ******************************************************************************/
40 28
41static void 29static void
42nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv) 30nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
43{ 31{
44 struct nvkm_bar *bar = nvkm_bar(priv); 32 struct nvkm_device *device = fifo->base.engine.subdev.device;
45 struct nvkm_gpuobj *cur; 33 struct nvkm_memory *cur;
46 int i, p; 34 int i, p;
47 35
48 cur = priv->playlist[priv->cur_playlist]; 36 cur = fifo->runlist[fifo->cur_runlist];
49 priv->cur_playlist = !priv->cur_playlist; 37 fifo->cur_runlist = !fifo->cur_runlist;
50
51 for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
52 if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
53 nv_wo32(cur, p++ * 4, i);
54 }
55
56 bar->flush(bar);
57
58 nv_wr32(priv, 0x0032f4, cur->addr >> 12);
59 nv_wr32(priv, 0x0032ec, p);
60 nv_wr32(priv, 0x002500, 0x00000101);
61}
62
63void
64nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
65{
66 mutex_lock(&nv_subdev(priv)->mutex);
67 nv50_fifo_playlist_update_locked(priv);
68 mutex_unlock(&nv_subdev(priv)->mutex);
69}
70
71static int
72nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
73{
74 struct nvkm_bar *bar = nvkm_bar(parent);
75 struct nv50_fifo_base *base = (void *)parent->parent;
76 struct nvkm_gpuobj *ectx = (void *)object;
77 u64 limit = ectx->addr + ectx->size - 1;
78 u64 start = ectx->addr;
79 u32 addr;
80
81 switch (nv_engidx(object->engine)) {
82 case NVDEV_ENGINE_SW : return 0;
83 case NVDEV_ENGINE_GR : addr = 0x0000; break;
84 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
85 default:
86 return -EINVAL;
87 }
88
89 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
90 nv_wo32(base->eng, addr + 0x00, 0x00190000);
91 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
92 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
93 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
94 upper_32_bits(start));
95 nv_wo32(base->eng, addr + 0x10, 0x00000000);
96 nv_wo32(base->eng, addr + 0x14, 0x00000000);
97 bar->flush(bar);
98 return 0;
99}
100
101static int
102nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
103 struct nvkm_object *object)
104{
105 struct nvkm_bar *bar = nvkm_bar(parent);
106 struct nv50_fifo_priv *priv = (void *)parent->engine;
107 struct nv50_fifo_base *base = (void *)parent->parent;
108 struct nv50_fifo_chan *chan = (void *)parent;
109 u32 addr, me;
110 int ret = 0;
111
112 switch (nv_engidx(object->engine)) {
113 case NVDEV_ENGINE_SW : return 0;
114 case NVDEV_ENGINE_GR : addr = 0x0000; break;
115 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
116 default:
117 return -EINVAL;
118 }
119
120 /* HW bug workaround:
121 *
122 * PFIFO will hang forever if the connected engines don't report
123 * that they've processed the context switch request.
124 *
125 * In order for the kickoff to work, we need to ensure all the
126 * connected engines are in a state where they can answer.
127 *
128 * Newer chipsets don't seem to suffer from this issue, and well,
129 * there's also a "ignore these engines" bitmask reg we can use
130 * if we hit the issue there..
131 */
132 me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
133
134 /* do the kickoff... */
135 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
136 if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
137 nv_error(priv, "channel %d [%s] unload timeout\n",
138 chan->base.chid, nvkm_client_name(chan));
139 if (suspend)
140 ret = -EBUSY;
141 }
142 nv_wr32(priv, 0x00b860, me);
143
144 if (ret == 0) {
145 nv_wo32(base->eng, addr + 0x00, 0x00000000);
146 nv_wo32(base->eng, addr + 0x04, 0x00000000);
147 nv_wo32(base->eng, addr + 0x08, 0x00000000);
148 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
149 nv_wo32(base->eng, addr + 0x10, 0x00000000);
150 nv_wo32(base->eng, addr + 0x14, 0x00000000);
151 bar->flush(bar);
152 }
153
154 return ret;
155}
156
157static int
158nv50_fifo_object_attach(struct nvkm_object *parent,
159 struct nvkm_object *object, u32 handle)
160{
161 struct nv50_fifo_chan *chan = (void *)parent;
162 u32 context;
163
164 if (nv_iclass(object, NV_GPUOBJ_CLASS))
165 context = nv_gpuobj(object)->node->offset >> 4;
166 else
167 context = 0x00000004; /* just non-zero */
168 38
169 switch (nv_engidx(object->engine)) { 39 nvkm_kmap(cur);
170 case NVDEV_ENGINE_DMAOBJ: 40 for (i = 0, p = 0; i < fifo->base.nr; i++) {
171 case NVDEV_ENGINE_SW : context |= 0x00000000; break; 41 if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
172 case NVDEV_ENGINE_GR : context |= 0x00100000; break; 42 nvkm_wo32(cur, p++ * 4, i);
173 case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
174 default:
175 return -EINVAL;
176 } 43 }
44 nvkm_done(cur);
177 45
178 return nvkm_ramht_insert(chan->ramht, 0, handle, context); 46 nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
47 nvkm_wr32(device, 0x0032ec, p);
48 nvkm_wr32(device, 0x002500, 0x00000101);
179} 49}
180 50
181void 51void
182nv50_fifo_object_detach(struct nvkm_object *parent, int cookie) 52nv50_fifo_runlist_update(struct nv50_fifo *fifo)
183{ 53{
184 struct nv50_fifo_chan *chan = (void *)parent; 54 mutex_lock(&fifo->base.engine.subdev.mutex);
185 nvkm_ramht_remove(chan->ramht, cookie); 55 nv50_fifo_runlist_update_locked(fifo);
56 mutex_unlock(&fifo->base.engine.subdev.mutex);
186} 57}
187 58
188static int 59int
189nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, 60nv50_fifo_oneinit(struct nvkm_fifo *base)
190 struct nvkm_oclass *oclass, void *data, u32 size,
191 struct nvkm_object **pobject)
192{ 61{
193 union { 62 struct nv50_fifo *fifo = nv50_fifo(base);
194 struct nv03_channel_dma_v0 v0; 63 struct nvkm_device *device = fifo->base.engine.subdev.device;
195 } *args = data;
196 struct nvkm_bar *bar = nvkm_bar(parent);
197 struct nv50_fifo_base *base = (void *)parent;
198 struct nv50_fifo_chan *chan;
199 int ret; 64 int ret;
200 65
201 nv_ioctl(parent, "create channel dma size %d\n", size); 66 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
202 if (nvif_unpack(args->v0, 0, 0, false)) { 67 false, &fifo->runlist[0]);
203 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
204 "offset %016llx\n", args->v0.version,
205 args->v0.pushbuf, args->v0.offset);
206 } else
207 return ret;
208
209 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
210 0x2000, args->v0.pushbuf,
211 (1ULL << NVDEV_ENGINE_DMAOBJ) |
212 (1ULL << NVDEV_ENGINE_SW) |
213 (1ULL << NVDEV_ENGINE_GR) |
214 (1ULL << NVDEV_ENGINE_MPEG), &chan);
215 *pobject = nv_object(chan);
216 if (ret)
217 return ret;
218
219 args->v0.chid = chan->base.chid;
220
221 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
222 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
223 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
224 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
225
226 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
227 &chan->ramht);
228 if (ret) 68 if (ret)
229 return ret; 69 return ret;
230 70
231 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); 71 return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
232 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); 72 false, &fifo->runlist[1]);
233 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
234 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
235 nv_wo32(base->ramfc, 0x3c, 0x003f6078);
236 nv_wo32(base->ramfc, 0x44, 0x01003fff);
237 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
238 nv_wo32(base->ramfc, 0x4c, 0xffffffff);
239 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
240 nv_wo32(base->ramfc, 0x78, 0x00000000);
241 nv_wo32(base->ramfc, 0x7c, 0x30000001);
242 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
243 (4 << 24) /* SEARCH_FULL */ |
244 (chan->ramht->gpuobj.node->offset >> 4));
245 bar->flush(bar);
246 return 0;
247} 73}
248 74
249static int 75void
250nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, 76nv50_fifo_init(struct nvkm_fifo *base)
251 struct nvkm_oclass *oclass, void *data, u32 size,
252 struct nvkm_object **pobject)
253{ 77{
254 union { 78 struct nv50_fifo *fifo = nv50_fifo(base);
255 struct nv50_channel_gpfifo_v0 v0; 79 struct nvkm_device *device = fifo->base.engine.subdev.device;
256 } *args = data; 80 int i;
257 struct nvkm_bar *bar = nvkm_bar(parent);
258 struct nv50_fifo_base *base = (void *)parent;
259 struct nv50_fifo_chan *chan;
260 u64 ioffset, ilength;
261 int ret;
262 81
263 nv_ioctl(parent, "create channel gpfifo size %d\n", size); 82 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
264 if (nvif_unpack(args->v0, 0, 0, false)) { 83 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
265 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " 84 nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
266 "ioffset %016llx ilength %08x\n", 85 nvkm_wr32(device, 0x002044, 0x01003fff);
267 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
268 args->v0.ilength);
269 } else
270 return ret;
271
272 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
273 0x2000, args->v0.pushbuf,
274 (1ULL << NVDEV_ENGINE_DMAOBJ) |
275 (1ULL << NVDEV_ENGINE_SW) |
276 (1ULL << NVDEV_ENGINE_GR) |
277 (1ULL << NVDEV_ENGINE_MPEG), &chan);
278 *pobject = nv_object(chan);
279 if (ret)
280 return ret;
281 86
282 args->v0.chid = chan->base.chid; 87 nvkm_wr32(device, 0x002100, 0xffffffff);
88 nvkm_wr32(device, 0x002140, 0xbfffffff);
283 89
284 nv_parent(chan)->context_attach = nv50_fifo_context_attach; 90 for (i = 0; i < 128; i++)
285 nv_parent(chan)->context_detach = nv50_fifo_context_detach; 91 nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
286 nv_parent(chan)->object_attach = nv50_fifo_object_attach; 92 nv50_fifo_runlist_update_locked(fifo);
287 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
288
289 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
290 &chan->ramht);
291 if (ret)
292 return ret;
293
294 ioffset = args->v0.ioffset;
295 ilength = order_base_2(args->v0.ilength / 8);
296 93
297 nv_wo32(base->ramfc, 0x3c, 0x403f6078); 94 nvkm_wr32(device, 0x003200, 0x00000001);
298 nv_wo32(base->ramfc, 0x44, 0x01003fff); 95 nvkm_wr32(device, 0x003250, 0x00000001);
299 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); 96 nvkm_wr32(device, 0x002500, 0x00000001);
300 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
301 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
302 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
303 nv_wo32(base->ramfc, 0x78, 0x00000000);
304 nv_wo32(base->ramfc, 0x7c, 0x30000001);
305 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
306 (4 << 24) /* SEARCH_FULL */ |
307 (chan->ramht->gpuobj.node->offset >> 4));
308 bar->flush(bar);
309 return 0;
310} 97}
311 98
312void 99void *
313nv50_fifo_chan_dtor(struct nvkm_object *object) 100nv50_fifo_dtor(struct nvkm_fifo *base)
314{ 101{
315 struct nv50_fifo_chan *chan = (void *)object; 102 struct nv50_fifo *fifo = nv50_fifo(base);
316 nvkm_ramht_ref(NULL, &chan->ramht); 103 nvkm_memory_del(&fifo->runlist[1]);
317 nvkm_fifo_channel_destroy(&chan->base); 104 nvkm_memory_del(&fifo->runlist[0]);
318} 105 return fifo;
319
320static int
321nv50_fifo_chan_init(struct nvkm_object *object)
322{
323 struct nv50_fifo_priv *priv = (void *)object->engine;
324 struct nv50_fifo_base *base = (void *)object->parent;
325 struct nv50_fifo_chan *chan = (void *)object;
326 struct nvkm_gpuobj *ramfc = base->ramfc;
327 u32 chid = chan->base.chid;
328 int ret;
329
330 ret = nvkm_fifo_channel_init(&chan->base);
331 if (ret)
332 return ret;
333
334 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
335 nv50_fifo_playlist_update(priv);
336 return 0;
337} 106}
338 107
339int 108int
340nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend) 109nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
341{ 110 int index, struct nvkm_fifo **pfifo)
342 struct nv50_fifo_priv *priv = (void *)object->engine;
343 struct nv50_fifo_chan *chan = (void *)object;
344 u32 chid = chan->base.chid;
345
346 /* remove channel from playlist, fifo will unload context */
347 nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
348 nv50_fifo_playlist_update(priv);
349 nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
350
351 return nvkm_fifo_channel_fini(&chan->base, suspend);
352}
353
354static struct nvkm_ofuncs
355nv50_fifo_ofuncs_dma = {
356 .ctor = nv50_fifo_chan_ctor_dma,
357 .dtor = nv50_fifo_chan_dtor,
358 .init = nv50_fifo_chan_init,
359 .fini = nv50_fifo_chan_fini,
360 .map = _nvkm_fifo_channel_map,
361 .rd32 = _nvkm_fifo_channel_rd32,
362 .wr32 = _nvkm_fifo_channel_wr32,
363 .ntfy = _nvkm_fifo_channel_ntfy
364};
365
366static struct nvkm_ofuncs
367nv50_fifo_ofuncs_ind = {
368 .ctor = nv50_fifo_chan_ctor_ind,
369 .dtor = nv50_fifo_chan_dtor,
370 .init = nv50_fifo_chan_init,
371 .fini = nv50_fifo_chan_fini,
372 .map = _nvkm_fifo_channel_map,
373 .rd32 = _nvkm_fifo_channel_rd32,
374 .wr32 = _nvkm_fifo_channel_wr32,
375 .ntfy = _nvkm_fifo_channel_ntfy
376};
377
378static struct nvkm_oclass
379nv50_fifo_sclass[] = {
380 { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
381 { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
382 {}
383};
384
385/*******************************************************************************
386 * FIFO context - basically just the instmem reserved for the channel
387 ******************************************************************************/
388
389static int
390nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
391 struct nvkm_oclass *oclass, void *data, u32 size,
392 struct nvkm_object **pobject)
393{ 111{
394 struct nv50_fifo_base *base; 112 struct nv50_fifo *fifo;
395 int ret; 113 int ret;
396 114
397 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000, 115 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
398 0x1000, NVOBJ_FLAG_HEAP, &base); 116 return -ENOMEM;
399 *pobject = nv_object(base); 117 *pfifo = &fifo->base;
400 if (ret)
401 return ret;
402 118
403 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 119 ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base);
404 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
405 if (ret)
406 return ret;
407
408 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
409 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
410 if (ret)
411 return ret;
412
413 ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
414 &base->pgd);
415 if (ret)
416 return ret;
417
418 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
419 if (ret) 120 if (ret)
420 return ret; 121 return ret;
421 122
123 set_bit(0, fifo->base.mask); /* PIO channel */
124 set_bit(127, fifo->base.mask); /* inactive channel */
422 return 0; 125 return 0;
423} 126}
424 127
425void 128static const struct nvkm_fifo_func
426nv50_fifo_context_dtor(struct nvkm_object *object) 129nv50_fifo = {
427{ 130 .dtor = nv50_fifo_dtor,
428 struct nv50_fifo_base *base = (void *)object; 131 .oneinit = nv50_fifo_oneinit,
429 nvkm_vm_ref(NULL, &base->vm, base->pgd); 132 .init = nv50_fifo_init,
430 nvkm_gpuobj_ref(NULL, &base->pgd); 133 .intr = nv04_fifo_intr,
431 nvkm_gpuobj_ref(NULL, &base->eng); 134 .pause = nv04_fifo_pause,
432 nvkm_gpuobj_ref(NULL, &base->ramfc); 135 .start = nv04_fifo_start,
433 nvkm_gpuobj_ref(NULL, &base->cache); 136 .chan = {
434 nvkm_fifo_context_destroy(&base->base); 137 &nv50_fifo_dma_oclass,
435} 138 &nv50_fifo_gpfifo_oclass,
436 139 NULL
437static struct nvkm_oclass
438nv50_fifo_cclass = {
439 .handle = NV_ENGCTX(FIFO, 0x50),
440 .ofuncs = &(struct nvkm_ofuncs) {
441 .ctor = nv50_fifo_context_ctor,
442 .dtor = nv50_fifo_context_dtor,
443 .init = _nvkm_fifo_context_init,
444 .fini = _nvkm_fifo_context_fini,
445 .rd32 = _nvkm_fifo_context_rd32,
446 .wr32 = _nvkm_fifo_context_wr32,
447 }, 140 },
448}; 141};
449 142
450/*******************************************************************************
451 * PFIFO engine
452 ******************************************************************************/
453
454static int
455nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
456 struct nvkm_oclass *oclass, void *data, u32 size,
457 struct nvkm_object **pobject)
458{
459 struct nv50_fifo_priv *priv;
460 int ret;
461
462 ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
463 *pobject = nv_object(priv);
464 if (ret)
465 return ret;
466
467 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
468 &priv->playlist[0]);
469 if (ret)
470 return ret;
471
472 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
473 &priv->playlist[1]);
474 if (ret)
475 return ret;
476
477 nv_subdev(priv)->unit = 0x00000100;
478 nv_subdev(priv)->intr = nv04_fifo_intr;
479 nv_engine(priv)->cclass = &nv50_fifo_cclass;
480 nv_engine(priv)->sclass = nv50_fifo_sclass;
481 priv->base.pause = nv04_fifo_pause;
482 priv->base.start = nv04_fifo_start;
483 return 0;
484}
485
486void
487nv50_fifo_dtor(struct nvkm_object *object)
488{
489 struct nv50_fifo_priv *priv = (void *)object;
490
491 nvkm_gpuobj_ref(NULL, &priv->playlist[1]);
492 nvkm_gpuobj_ref(NULL, &priv->playlist[0]);
493
494 nvkm_fifo_destroy(&priv->base);
495}
496
497int 143int
498nv50_fifo_init(struct nvkm_object *object) 144nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
499{ 145{
500 struct nv50_fifo_priv *priv = (void *)object; 146 return nv50_fifo_new_(&nv50_fifo, device, index, pfifo);
501 int ret, i;
502
503 ret = nvkm_fifo_init(&priv->base);
504 if (ret)
505 return ret;
506
507 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
508 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
509 nv_wr32(priv, 0x00250c, 0x6f3cfc34);
510 nv_wr32(priv, 0x002044, 0x01003fff);
511
512 nv_wr32(priv, 0x002100, 0xffffffff);
513 nv_wr32(priv, 0x002140, 0xbfffffff);
514
515 for (i = 0; i < 128; i++)
516 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
517 nv50_fifo_playlist_update_locked(priv);
518
519 nv_wr32(priv, 0x003200, 0x00000001);
520 nv_wr32(priv, 0x003250, 0x00000001);
521 nv_wr32(priv, 0x002500, 0x00000001);
522 return 0;
523} 147}
524
525struct nvkm_oclass *
526nv50_fifo_oclass = &(struct nvkm_oclass) {
527 .handle = NV_ENGINE(FIFO, 0x50),
528 .ofuncs = &(struct nvkm_ofuncs) {
529 .ctor = nv50_fifo_ctor,
530 .dtor = nv50_fifo_dtor,
531 .init = nv50_fifo_init,
532 .fini = _nvkm_fifo_fini,
533 },
534};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 09ed93c66567..8ab53948cbb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -1,36 +1,19 @@
1#ifndef __NV50_FIFO_H__ 1#ifndef __NV50_FIFO_H__
2#define __NV50_FIFO_H__ 2#define __NV50_FIFO_H__
3#include <engine/fifo.h> 3#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
4#include "priv.h"
4 5
5struct nv50_fifo_priv { 6struct nv50_fifo {
6 struct nvkm_fifo base; 7 struct nvkm_fifo base;
7 struct nvkm_gpuobj *playlist[2]; 8 struct nvkm_memory *runlist[2];
8 int cur_playlist; 9 int cur_runlist;
9}; 10};
10 11
11struct nv50_fifo_base { 12int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
12 struct nvkm_fifo_base base; 13 int index, struct nvkm_fifo **);
13 struct nvkm_gpuobj *ramfc;
14 struct nvkm_gpuobj *cache;
15 struct nvkm_gpuobj *eng;
16 struct nvkm_gpuobj *pgd;
17 struct nvkm_vm *vm;
18};
19
20struct nv50_fifo_chan {
21 struct nvkm_fifo_chan base;
22 u32 subc[8];
23 struct nvkm_ramht *ramht;
24};
25
26void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
27
28void nv50_fifo_object_detach(struct nvkm_object *, int);
29void nv50_fifo_chan_dtor(struct nvkm_object *);
30int nv50_fifo_chan_fini(struct nvkm_object *, bool);
31
32void nv50_fifo_context_dtor(struct nvkm_object *);
33 14
34void nv50_fifo_dtor(struct nvkm_object *); 15void *nv50_fifo_dtor(struct nvkm_fifo *);
35int nv50_fifo_init(struct nvkm_object *); 16int nv50_fifo_oneinit(struct nvkm_fifo *);
17void nv50_fifo_init(struct nvkm_fifo *);
18void nv50_fifo_runlist_update(struct nv50_fifo *);
36#endif 19#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
new file mode 100644
index 000000000000..cb1432e9be08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -0,0 +1,26 @@
1#ifndef __NVKM_FIFO_PRIV_H__
2#define __NVKM_FIFO_PRIV_H__
3#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
4#include <engine/fifo.h>
5
6int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
7 int index, int nr, struct nvkm_fifo *);
8void nvkm_fifo_uevent(struct nvkm_fifo *);
9
10struct nvkm_fifo_func {
11 void *(*dtor)(struct nvkm_fifo *);
12 int (*oneinit)(struct nvkm_fifo *);
13 void (*init)(struct nvkm_fifo *);
14 void (*fini)(struct nvkm_fifo *);
15 void (*intr)(struct nvkm_fifo *);
16 void (*pause)(struct nvkm_fifo *, unsigned long *);
17 void (*start)(struct nvkm_fifo *, unsigned long *);
18 void (*uevent_init)(struct nvkm_fifo *);
19 void (*uevent_fini)(struct nvkm_fifo *);
20 const struct nvkm_fifo_chan_oclass *chan[];
21};
22
23void nv04_fifo_intr(struct nvkm_fifo *);
24void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
25void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
26#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
new file mode 100644
index 000000000000..92d56221197b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -0,0 +1,132 @@
1#ifndef __NV04_FIFO_REGS_H__
2#define __NV04_FIFO_REGS_H__
3
4#define NV04_PFIFO_DELAY_0 0x00002040
5#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
6#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
7#define NV03_PFIFO_INTR_0 0x00002100
8#define NV03_PFIFO_INTR_EN_0 0x00002140
9# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
10# define NV_PFIFO_INTR_RUNOUT (1<<4)
11# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
12# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
13# define NV_PFIFO_INTR_DMA_PT (1<<16)
14# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
15# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
16#define NV03_PFIFO_RAMHT 0x00002210
17#define NV03_PFIFO_RAMFC 0x00002214
18#define NV03_PFIFO_RAMRO 0x00002218
19#define NV40_PFIFO_RAMFC 0x00002220
20#define NV03_PFIFO_CACHES 0x00002500
21#define NV04_PFIFO_MODE 0x00002504
22#define NV04_PFIFO_DMA 0x00002508
23#define NV04_PFIFO_SIZE 0x0000250c
24#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
25#define NV50_PFIFO_CTX_TABLE__SIZE 128
26#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
27#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
28#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
29#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
30#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
31#define NV03_PFIFO_CACHE0_PULL0 0x00003040
32#define NV04_PFIFO_CACHE0_PULL0 0x00003050
33#define NV04_PFIFO_CACHE0_PULL1 0x00003054
34#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
35#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
36#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
37#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
38#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
39#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
40#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
41#define NV03_PFIFO_CACHE1_PUT 0x00003210
42#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
43#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
44# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
45# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
46# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
47# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
48# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
49# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
50# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
51# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
52# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
53# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
54# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
55# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
56# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
57# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
58# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
59# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
60# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
61# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
62# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
63# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
64# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
65# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
66# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
67# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
68# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
69# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
70# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
71# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
72# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
73# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
74# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
75# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
76# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
77# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
78# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
79# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
80# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
81# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
82# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
83# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
84# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
85# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
86# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
87# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
88# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
89# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
90# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
91# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
92# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
93# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
94# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
95# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
96# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
97# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
98# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
99# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
100# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
101# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
102# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
103# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
104# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
105#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
106#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
107#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
108#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
109#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
110#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
111#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
112#define NV03_PFIFO_CACHE1_PULL0 0x00003240
113#define NV04_PFIFO_CACHE1_PULL0 0x00003250
114# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
115# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
116#define NV03_PFIFO_CACHE1_PULL1 0x00003250
117#define NV04_PFIFO_CACHE1_PULL1 0x00003254
118#define NV04_PFIFO_CACHE1_HASH 0x00003258
119#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
120#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
121#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
122#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
123#define NV03_PFIFO_CACHE1_GET 0x00003270
124#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
125#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
126#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
127#define NV40_PFIFO_UNK32E4 0x000032E4
128#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
129#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
130#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
131#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
132#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 2e1b92f71d9e..9ad0d0e78a96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -1,21 +1,8 @@
1nvkm-y += nvkm/engine/gr/ctxnv40.o 1nvkm-y += nvkm/engine/gr/base.o
2nvkm-y += nvkm/engine/gr/ctxnv50.o
3nvkm-y += nvkm/engine/gr/ctxgf100.o
4nvkm-y += nvkm/engine/gr/ctxgf108.o
5nvkm-y += nvkm/engine/gr/ctxgf104.o
6nvkm-y += nvkm/engine/gr/ctxgf110.o
7nvkm-y += nvkm/engine/gr/ctxgf117.o
8nvkm-y += nvkm/engine/gr/ctxgf119.o
9nvkm-y += nvkm/engine/gr/ctxgk104.o
10nvkm-y += nvkm/engine/gr/ctxgk20a.o
11nvkm-y += nvkm/engine/gr/ctxgk110.o
12nvkm-y += nvkm/engine/gr/ctxgk110b.o
13nvkm-y += nvkm/engine/gr/ctxgk208.o
14nvkm-y += nvkm/engine/gr/ctxgm107.o
15nvkm-y += nvkm/engine/gr/ctxgm204.o
16nvkm-y += nvkm/engine/gr/ctxgm206.o
17nvkm-y += nvkm/engine/gr/nv04.o 2nvkm-y += nvkm/engine/gr/nv04.o
18nvkm-y += nvkm/engine/gr/nv10.o 3nvkm-y += nvkm/engine/gr/nv10.o
4nvkm-y += nvkm/engine/gr/nv15.o
5nvkm-y += nvkm/engine/gr/nv17.o
19nvkm-y += nvkm/engine/gr/nv20.o 6nvkm-y += nvkm/engine/gr/nv20.o
20nvkm-y += nvkm/engine/gr/nv25.o 7nvkm-y += nvkm/engine/gr/nv25.o
21nvkm-y += nvkm/engine/gr/nv2a.o 8nvkm-y += nvkm/engine/gr/nv2a.o
@@ -23,18 +10,43 @@ nvkm-y += nvkm/engine/gr/nv30.o
23nvkm-y += nvkm/engine/gr/nv34.o 10nvkm-y += nvkm/engine/gr/nv34.o
24nvkm-y += nvkm/engine/gr/nv35.o 11nvkm-y += nvkm/engine/gr/nv35.o
25nvkm-y += nvkm/engine/gr/nv40.o 12nvkm-y += nvkm/engine/gr/nv40.o
13nvkm-y += nvkm/engine/gr/nv44.o
26nvkm-y += nvkm/engine/gr/nv50.o 14nvkm-y += nvkm/engine/gr/nv50.o
15nvkm-y += nvkm/engine/gr/g84.o
16nvkm-y += nvkm/engine/gr/gt200.o
17nvkm-y += nvkm/engine/gr/mcp79.o
18nvkm-y += nvkm/engine/gr/gt215.o
19nvkm-y += nvkm/engine/gr/mcp89.o
27nvkm-y += nvkm/engine/gr/gf100.o 20nvkm-y += nvkm/engine/gr/gf100.o
28nvkm-y += nvkm/engine/gr/gf108.o
29nvkm-y += nvkm/engine/gr/gf104.o 21nvkm-y += nvkm/engine/gr/gf104.o
22nvkm-y += nvkm/engine/gr/gf108.o
30nvkm-y += nvkm/engine/gr/gf110.o 23nvkm-y += nvkm/engine/gr/gf110.o
31nvkm-y += nvkm/engine/gr/gf117.o 24nvkm-y += nvkm/engine/gr/gf117.o
32nvkm-y += nvkm/engine/gr/gf119.o 25nvkm-y += nvkm/engine/gr/gf119.o
33nvkm-y += nvkm/engine/gr/gk104.o 26nvkm-y += nvkm/engine/gr/gk104.o
34nvkm-y += nvkm/engine/gr/gk20a.o
35nvkm-y += nvkm/engine/gr/gk110.o 27nvkm-y += nvkm/engine/gr/gk110.o
36nvkm-y += nvkm/engine/gr/gk110b.o 28nvkm-y += nvkm/engine/gr/gk110b.o
37nvkm-y += nvkm/engine/gr/gk208.o 29nvkm-y += nvkm/engine/gr/gk208.o
30nvkm-y += nvkm/engine/gr/gk20a.o
38nvkm-y += nvkm/engine/gr/gm107.o 31nvkm-y += nvkm/engine/gr/gm107.o
39nvkm-y += nvkm/engine/gr/gm204.o 32nvkm-y += nvkm/engine/gr/gm204.o
40nvkm-y += nvkm/engine/gr/gm206.o 33nvkm-y += nvkm/engine/gr/gm206.o
34nvkm-y += nvkm/engine/gr/gm20b.o
35
36nvkm-y += nvkm/engine/gr/ctxnv40.o
37nvkm-y += nvkm/engine/gr/ctxnv50.o
38nvkm-y += nvkm/engine/gr/ctxgf100.o
39nvkm-y += nvkm/engine/gr/ctxgf104.o
40nvkm-y += nvkm/engine/gr/ctxgf108.o
41nvkm-y += nvkm/engine/gr/ctxgf110.o
42nvkm-y += nvkm/engine/gr/ctxgf117.o
43nvkm-y += nvkm/engine/gr/ctxgf119.o
44nvkm-y += nvkm/engine/gr/ctxgk104.o
45nvkm-y += nvkm/engine/gr/ctxgk110.o
46nvkm-y += nvkm/engine/gr/ctxgk110b.o
47nvkm-y += nvkm/engine/gr/ctxgk208.o
48nvkm-y += nvkm/engine/gr/ctxgk20a.o
49nvkm-y += nvkm/engine/gr/ctxgm107.o
50nvkm-y += nvkm/engine/gr/ctxgm204.o
51nvkm-y += nvkm/engine/gr/ctxgm206.o
52nvkm-y += nvkm/engine/gr/ctxgm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
new file mode 100644
index 000000000000..090765ff070d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26#include <engine/fifo.h>
27
28static void
29nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
30{
31 struct nvkm_gr *gr = nvkm_gr(engine);
32 if (gr->func->tile)
33 gr->func->tile(gr, region, tile);
34}
35
36u64
37nvkm_gr_units(struct nvkm_gr *gr)
38{
39 if (gr->func->units)
40 return gr->func->units(gr);
41 return 0;
42}
43
44int
45nvkm_gr_tlb_flush(struct nvkm_gr *gr)
46{
47 if (gr->func->tlb_flush)
48 return gr->func->tlb_flush(gr);
49 return -ENODEV;
50}
51
52static int
53nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
54{
55 struct nvkm_gr *gr = nvkm_gr(oclass->engine);
56 int c = 0;
57
58 if (gr->func->object_get) {
59 int ret = gr->func->object_get(gr, index, &oclass->base);
60 if (oclass->base.oclass)
61 return index;
62 return ret;
63 }
64
65 while (gr->func->sclass[c].oclass) {
66 if (c++ == index) {
67 oclass->base = gr->func->sclass[index];
68 return index;
69 }
70 }
71
72 return c;
73}
74
75static int
76nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
77 const struct nvkm_oclass *oclass,
78 struct nvkm_object **pobject)
79{
80 struct nvkm_gr *gr = nvkm_gr(oclass->engine);
81 if (gr->func->chan_new)
82 return gr->func->chan_new(gr, chan, oclass, pobject);
83 return 0;
84}
85
86static void
87nvkm_gr_intr(struct nvkm_engine *engine)
88{
89 struct nvkm_gr *gr = nvkm_gr(engine);
90 gr->func->intr(gr);
91}
92
93static int
94nvkm_gr_oneinit(struct nvkm_engine *engine)
95{
96 struct nvkm_gr *gr = nvkm_gr(engine);
97 if (gr->func->oneinit)
98 return gr->func->oneinit(gr);
99 return 0;
100}
101
102static int
103nvkm_gr_init(struct nvkm_engine *engine)
104{
105 struct nvkm_gr *gr = nvkm_gr(engine);
106 return gr->func->init(gr);
107}
108
109static void *
110nvkm_gr_dtor(struct nvkm_engine *engine)
111{
112 struct nvkm_gr *gr = nvkm_gr(engine);
113 if (gr->func->dtor)
114 return gr->func->dtor(gr);
115 return gr;
116}
117
118static const struct nvkm_engine_func
119nvkm_gr = {
120 .dtor = nvkm_gr_dtor,
121 .oneinit = nvkm_gr_oneinit,
122 .init = nvkm_gr_init,
123 .intr = nvkm_gr_intr,
124 .tile = nvkm_gr_tile,
125 .fifo.cclass = nvkm_gr_cclass_new,
126 .fifo.sclass = nvkm_gr_oclass_get,
127};
128
129int
130nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
131 int index, u32 pmc_enable, bool enable, struct nvkm_gr *gr)
132{
133 gr->func = func;
134 return nvkm_engine_ctor(&nvkm_gr, device, index, pmc_enable,
135 enable, &gr->engine);
136}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 57e2c5b13123..56f392d3d4fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -23,7 +23,6 @@
23 */ 23 */
24#include "ctxgf100.h" 24#include "ctxgf100.h"
25 25
26#include <subdev/bar.h>
27#include <subdev/fb.h> 26#include <subdev/fb.h>
28#include <subdev/mc.h> 27#include <subdev/mc.h>
29#include <subdev/timer.h> 28#include <subdev/timer.h>
@@ -1005,6 +1004,7 @@ void
1005gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data, 1004gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
1006 int shift, int buffer) 1005 int shift, int buffer)
1007{ 1006{
1007 struct nvkm_device *device = info->gr->base.engine.subdev.device;
1008 if (info->data) { 1008 if (info->data) {
1009 if (shift >= 0) { 1009 if (shift >= 0) {
1010 info->mmio->addr = addr; 1010 info->mmio->addr = addr;
@@ -1021,29 +1021,29 @@ gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
1021 return; 1021 return;
1022 } 1022 }
1023 1023
1024 nv_wr32(info->priv, addr, data); 1024 nvkm_wr32(device, addr, data);
1025} 1025}
1026 1026
1027void 1027void
1028gf100_grctx_generate_bundle(struct gf100_grctx *info) 1028gf100_grctx_generate_bundle(struct gf100_grctx *info)
1029{ 1029{
1030 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv); 1030 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
1031 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; 1031 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
1032 const int s = 8; 1032 const int s = 8;
1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 1033 const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
1034 mmio_refn(info, 0x408004, 0x00000000, s, b); 1034 mmio_refn(info, 0x408004, 0x00000000, s, b);
1035 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); 1035 mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
1036 mmio_refn(info, 0x418808, 0x00000000, s, b); 1036 mmio_refn(info, 0x418808, 0x00000000, s, b);
1037 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); 1037 mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
1038} 1038}
1039 1039
1040void 1040void
1041gf100_grctx_generate_pagepool(struct gf100_grctx *info) 1041gf100_grctx_generate_pagepool(struct gf100_grctx *info)
1042{ 1042{
1043 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv); 1043 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
1044 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; 1044 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
1045 const int s = 8; 1045 const int s = 8;
1046 const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); 1046 const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
1047 mmio_refn(info, 0x40800c, 0x00000000, s, b); 1047 mmio_refn(info, 0x40800c, 0x00000000, s, b);
1048 mmio_wr32(info, 0x408010, 0x80000000); 1048 mmio_wr32(info, 0x408010, 0x80000000);
1049 mmio_refn(info, 0x419004, 0x00000000, s, b); 1049 mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -1053,13 +1053,13 @@ gf100_grctx_generate_pagepool(struct gf100_grctx *info)
1053void 1053void
1054gf100_grctx_generate_attrib(struct gf100_grctx *info) 1054gf100_grctx_generate_attrib(struct gf100_grctx *info)
1055{ 1055{
1056 struct gf100_gr_priv *priv = info->priv; 1056 struct gf100_gr *gr = info->gr;
1057 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv); 1057 const struct gf100_grctx_func *grctx = gr->func->grctx;
1058 const u32 attrib = impl->attrib_nr; 1058 const u32 attrib = grctx->attrib_nr;
1059 const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); 1059 const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
1060 const u32 access = NV_MEM_ACCESS_RW; 1060 const u32 access = NV_MEM_ACCESS_RW;
1061 const int s = 12; 1061 const int s = 12;
1062 const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); 1062 const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
1063 int gpc, tpc; 1063 int gpc, tpc;
1064 u32 bo = 0; 1064 u32 bo = 0;
1065 1065
@@ -1067,91 +1067,95 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
1067 mmio_refn(info, 0x419848, 0x10000000, s, b); 1067 mmio_refn(info, 0x419848, 0x10000000, s, b);
1068 mmio_wr32(info, 0x405830, (attrib << 16)); 1068 mmio_wr32(info, 0x405830, (attrib << 16));
1069 1069
1070 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 1070 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1071 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 1071 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
1072 const u32 o = TPC_UNIT(gpc, tpc, 0x0520); 1072 const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
1073 mmio_skip(info, o, (attrib << 16) | ++bo); 1073 mmio_skip(info, o, (attrib << 16) | ++bo);
1074 mmio_wr32(info, o, (attrib << 16) | --bo); 1074 mmio_wr32(info, o, (attrib << 16) | --bo);
1075 bo += impl->attrib_nr_max; 1075 bo += grctx->attrib_nr_max;
1076 } 1076 }
1077 } 1077 }
1078} 1078}
1079 1079
1080void 1080void
1081gf100_grctx_generate_unkn(struct gf100_gr_priv *priv) 1081gf100_grctx_generate_unkn(struct gf100_gr *gr)
1082{ 1082{
1083} 1083}
1084 1084
1085void 1085void
1086gf100_grctx_generate_tpcid(struct gf100_gr_priv *priv) 1086gf100_grctx_generate_tpcid(struct gf100_gr *gr)
1087{ 1087{
1088 struct nvkm_device *device = gr->base.engine.subdev.device;
1088 int gpc, tpc, id; 1089 int gpc, tpc, id;
1089 1090
1090 for (tpc = 0, id = 0; tpc < 4; tpc++) { 1091 for (tpc = 0, id = 0; tpc < 4; tpc++) {
1091 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 1092 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1092 if (tpc < priv->tpc_nr[gpc]) { 1093 if (tpc < gr->tpc_nr[gpc]) {
1093 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id); 1094 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
1094 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id); 1095 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), id);
1095 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id); 1096 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
1096 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id); 1097 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
1097 id++; 1098 id++;
1098 } 1099 }
1099 1100
1100 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]); 1101 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
1101 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]); 1102 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
1102 } 1103 }
1103 } 1104 }
1104} 1105}
1105 1106
1106void 1107void
1107gf100_grctx_generate_r406028(struct gf100_gr_priv *priv) 1108gf100_grctx_generate_r406028(struct gf100_gr *gr)
1108{ 1109{
1110 struct nvkm_device *device = gr->base.engine.subdev.device;
1109 u32 tmp[GPC_MAX / 8] = {}, i = 0; 1111 u32 tmp[GPC_MAX / 8] = {}, i = 0;
1110 for (i = 0; i < priv->gpc_nr; i++) 1112 for (i = 0; i < gr->gpc_nr; i++)
1111 tmp[i / 8] |= priv->tpc_nr[i] << ((i % 8) * 4); 1113 tmp[i / 8] |= gr->tpc_nr[i] << ((i % 8) * 4);
1112 for (i = 0; i < 4; i++) { 1114 for (i = 0; i < 4; i++) {
1113 nv_wr32(priv, 0x406028 + (i * 4), tmp[i]); 1115 nvkm_wr32(device, 0x406028 + (i * 4), tmp[i]);
1114 nv_wr32(priv, 0x405870 + (i * 4), tmp[i]); 1116 nvkm_wr32(device, 0x405870 + (i * 4), tmp[i]);
1115 } 1117 }
1116} 1118}
1117 1119
1118void 1120void
1119gf100_grctx_generate_r4060a8(struct gf100_gr_priv *priv) 1121gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
1120{ 1122{
1123 struct nvkm_device *device = gr->base.engine.subdev.device;
1121 u8 tpcnr[GPC_MAX], data[TPC_MAX]; 1124 u8 tpcnr[GPC_MAX], data[TPC_MAX];
1122 int gpc, tpc, i; 1125 int gpc, tpc, i;
1123 1126
1124 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 1127 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1125 memset(data, 0x1f, sizeof(data)); 1128 memset(data, 0x1f, sizeof(data));
1126 1129
1127 gpc = -1; 1130 gpc = -1;
1128 for (tpc = 0; tpc < priv->tpc_total; tpc++) { 1131 for (tpc = 0; tpc < gr->tpc_total; tpc++) {
1129 do { 1132 do {
1130 gpc = (gpc + 1) % priv->gpc_nr; 1133 gpc = (gpc + 1) % gr->gpc_nr;
1131 } while (!tpcnr[gpc]); 1134 } while (!tpcnr[gpc]);
1132 tpcnr[gpc]--; 1135 tpcnr[gpc]--;
1133 data[tpc] = gpc; 1136 data[tpc] = gpc;
1134 } 1137 }
1135 1138
1136 for (i = 0; i < 4; i++) 1139 for (i = 0; i < 4; i++)
1137 nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]); 1140 nvkm_wr32(device, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
1138} 1141}
1139 1142
1140void 1143void
1141gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv) 1144gf100_grctx_generate_r418bb8(struct gf100_gr *gr)
1142{ 1145{
1146 struct nvkm_device *device = gr->base.engine.subdev.device;
1143 u32 data[6] = {}, data2[2] = {}; 1147 u32 data[6] = {}, data2[2] = {};
1144 u8 tpcnr[GPC_MAX]; 1148 u8 tpcnr[GPC_MAX];
1145 u8 shift, ntpcv; 1149 u8 shift, ntpcv;
1146 int gpc, tpc, i; 1150 int gpc, tpc, i;
1147 1151
1148 /* calculate first set of magics */ 1152 /* calculate first set of magics */
1149 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 1153 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1150 1154
1151 gpc = -1; 1155 gpc = -1;
1152 for (tpc = 0; tpc < priv->tpc_total; tpc++) { 1156 for (tpc = 0; tpc < gr->tpc_total; tpc++) {
1153 do { 1157 do {
1154 gpc = (gpc + 1) % priv->gpc_nr; 1158 gpc = (gpc + 1) % gr->gpc_nr;
1155 } while (!tpcnr[gpc]); 1159 } while (!tpcnr[gpc]);
1156 tpcnr[gpc]--; 1160 tpcnr[gpc]--;
1157 1161
@@ -1163,7 +1167,7 @@ gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
1163 1167
1164 /* and the second... */ 1168 /* and the second... */
1165 shift = 0; 1169 shift = 0;
1166 ntpcv = priv->tpc_total; 1170 ntpcv = gr->tpc_total;
1167 while (!(ntpcv & (1 << 4))) { 1171 while (!(ntpcv & (1 << 4))) {
1168 ntpcv <<= 1; 1172 ntpcv <<= 1;
1169 shift++; 1173 shift++;
@@ -1176,202 +1180,211 @@ gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
1176 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); 1180 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
1177 1181
1178 /* GPC_BROADCAST */ 1182 /* GPC_BROADCAST */
1179 nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | 1183 nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
1180 priv->magic_not_rop_nr); 1184 gr->magic_not_rop_nr);
1181 for (i = 0; i < 6; i++) 1185 for (i = 0; i < 6; i++)
1182 nv_wr32(priv, 0x418b08 + (i * 4), data[i]); 1186 nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
1183 1187
1184 /* GPC_BROADCAST.TP_BROADCAST */ 1188 /* GPC_BROADCAST.TP_BROADCAST */
1185 nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) | 1189 nvkm_wr32(device, 0x419bd0, (gr->tpc_total << 8) |
1186 priv->magic_not_rop_nr | data2[0]); 1190 gr->magic_not_rop_nr | data2[0]);
1187 nv_wr32(priv, 0x419be4, data2[1]); 1191 nvkm_wr32(device, 0x419be4, data2[1]);
1188 for (i = 0; i < 6; i++) 1192 for (i = 0; i < 6; i++)
1189 nv_wr32(priv, 0x419b00 + (i * 4), data[i]); 1193 nvkm_wr32(device, 0x419b00 + (i * 4), data[i]);
1190 1194
1191 /* UNK78xx */ 1195 /* UNK78xx */
1192 nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | 1196 nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
1193 priv->magic_not_rop_nr); 1197 gr->magic_not_rop_nr);
1194 for (i = 0; i < 6; i++) 1198 for (i = 0; i < 6; i++)
1195 nv_wr32(priv, 0x40780c + (i * 4), data[i]); 1199 nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
1196} 1200}
1197 1201
1198void 1202void
1199gf100_grctx_generate_r406800(struct gf100_gr_priv *priv) 1203gf100_grctx_generate_r406800(struct gf100_gr *gr)
1200{ 1204{
1205 struct nvkm_device *device = gr->base.engine.subdev.device;
1201 u64 tpc_mask = 0, tpc_set = 0; 1206 u64 tpc_mask = 0, tpc_set = 0;
1202 u8 tpcnr[GPC_MAX]; 1207 u8 tpcnr[GPC_MAX];
1203 int gpc, tpc; 1208 int gpc, tpc;
1204 int i, a, b; 1209 int i, a, b;
1205 1210
1206 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 1211 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1207 for (gpc = 0; gpc < priv->gpc_nr; gpc++) 1212 for (gpc = 0; gpc < gr->gpc_nr; gpc++)
1208 tpc_mask |= ((1ULL << priv->tpc_nr[gpc]) - 1) << (gpc * 8); 1213 tpc_mask |= ((1ULL << gr->tpc_nr[gpc]) - 1) << (gpc * 8);
1209 1214
1210 for (i = 0, gpc = -1, b = -1; i < 32; i++) { 1215 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
1211 a = (i * (priv->tpc_total - 1)) / 32; 1216 a = (i * (gr->tpc_total - 1)) / 32;
1212 if (a != b) { 1217 if (a != b) {
1213 b = a; 1218 b = a;
1214 do { 1219 do {
1215 gpc = (gpc + 1) % priv->gpc_nr; 1220 gpc = (gpc + 1) % gr->gpc_nr;
1216 } while (!tpcnr[gpc]); 1221 } while (!tpcnr[gpc]);
1217 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; 1222 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
1218 1223
1219 tpc_set |= 1ULL << ((gpc * 8) + tpc); 1224 tpc_set |= 1ULL << ((gpc * 8) + tpc);
1220 } 1225 }
1221 1226
1222 nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set)); 1227 nvkm_wr32(device, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
1223 nv_wr32(priv, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask)); 1228 nvkm_wr32(device, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
1224 if (priv->gpc_nr > 4) { 1229 if (gr->gpc_nr > 4) {
1225 nv_wr32(priv, 0x406804 + (i * 0x20), upper_32_bits(tpc_set)); 1230 nvkm_wr32(device, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
1226 nv_wr32(priv, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask)); 1231 nvkm_wr32(device, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
1227 } 1232 }
1228 } 1233 }
1229} 1234}
1230 1235
1231void 1236void
1232gf100_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info) 1237gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
1233{ 1238{
1234 struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 1239 struct nvkm_device *device = gr->base.engine.subdev.device;
1235 1240 const struct gf100_grctx_func *grctx = gr->func->grctx;
1236 nvkm_mc(priv)->unk260(nvkm_mc(priv), 0); 1241
1237 1242 nvkm_mc_unk260(device->mc, 0);
1238 gf100_gr_mmio(priv, oclass->hub); 1243
1239 gf100_gr_mmio(priv, oclass->gpc); 1244 gf100_gr_mmio(gr, grctx->hub);
1240 gf100_gr_mmio(priv, oclass->zcull); 1245 gf100_gr_mmio(gr, grctx->gpc);
1241 gf100_gr_mmio(priv, oclass->tpc); 1246 gf100_gr_mmio(gr, grctx->zcull);
1242 gf100_gr_mmio(priv, oclass->ppc); 1247 gf100_gr_mmio(gr, grctx->tpc);
1243 1248 gf100_gr_mmio(gr, grctx->ppc);
1244 nv_wr32(priv, 0x404154, 0x00000000); 1249
1245 1250 nvkm_wr32(device, 0x404154, 0x00000000);
1246 oclass->bundle(info); 1251
1247 oclass->pagepool(info); 1252 grctx->bundle(info);
1248 oclass->attrib(info); 1253 grctx->pagepool(info);
1249 oclass->unkn(priv); 1254 grctx->attrib(info);
1250 1255 grctx->unkn(gr);
1251 gf100_grctx_generate_tpcid(priv); 1256
1252 gf100_grctx_generate_r406028(priv); 1257 gf100_grctx_generate_tpcid(gr);
1253 gf100_grctx_generate_r4060a8(priv); 1258 gf100_grctx_generate_r406028(gr);
1254 gf100_grctx_generate_r418bb8(priv); 1259 gf100_grctx_generate_r4060a8(gr);
1255 gf100_grctx_generate_r406800(priv); 1260 gf100_grctx_generate_r418bb8(gr);
1256 1261 gf100_grctx_generate_r406800(gr);
1257 gf100_gr_icmd(priv, oclass->icmd); 1262
1258 nv_wr32(priv, 0x404154, 0x00000400); 1263 gf100_gr_icmd(gr, grctx->icmd);
1259 gf100_gr_mthd(priv, oclass->mthd); 1264 nvkm_wr32(device, 0x404154, 0x00000400);
1260 nvkm_mc(priv)->unk260(nvkm_mc(priv), 1); 1265 gf100_gr_mthd(gr, grctx->mthd);
1266 nvkm_mc_unk260(device->mc, 1);
1261} 1267}
1262 1268
1263int 1269int
1264gf100_grctx_generate(struct gf100_gr_priv *priv) 1270gf100_grctx_generate(struct gf100_gr *gr)
1265{ 1271{
1266 struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 1272 const struct gf100_grctx_func *grctx = gr->func->grctx;
1267 struct nvkm_bar *bar = nvkm_bar(priv); 1273 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1268 struct nvkm_gpuobj *chan; 1274 struct nvkm_device *device = subdev->device;
1275 struct nvkm_memory *chan;
1269 struct gf100_grctx info; 1276 struct gf100_grctx info;
1270 int ret, i; 1277 int ret, i;
1278 u64 addr;
1271 1279
1272 /* allocate memory to for a "channel", which we'll use to generate 1280 /* allocate memory to for a "channel", which we'll use to generate
1273 * the default context values 1281 * the default context values
1274 */ 1282 */
1275 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x80000 + priv->size, 1283 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
1276 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &chan); 1284 0x1000, true, &chan);
1277 if (ret) { 1285 if (ret) {
1278 nv_error(priv, "failed to allocate channel memory, %d\n", ret); 1286 nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
1279 return ret; 1287 return ret;
1280 } 1288 }
1281 1289
1290 addr = nvkm_memory_addr(chan);
1291
1282 /* PGD pointer */ 1292 /* PGD pointer */
1283 nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000)); 1293 nvkm_kmap(chan);
1284 nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000)); 1294 nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
1285 nv_wo32(chan, 0x0208, 0xffffffff); 1295 nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
1286 nv_wo32(chan, 0x020c, 0x000000ff); 1296 nvkm_wo32(chan, 0x0208, 0xffffffff);
1297 nvkm_wo32(chan, 0x020c, 0x000000ff);
1287 1298
1288 /* PGT[0] pointer */ 1299 /* PGT[0] pointer */
1289 nv_wo32(chan, 0x1000, 0x00000000); 1300 nvkm_wo32(chan, 0x1000, 0x00000000);
1290 nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8); 1301 nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
1291 1302
1292 /* identity-map the whole "channel" into its own vm */ 1303 /* identity-map the whole "channel" into its own vm */
1293 for (i = 0; i < chan->size / 4096; i++) { 1304 for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
1294 u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1; 1305 u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
1295 nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr)); 1306 nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
1296 nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr)); 1307 nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
1297 } 1308 }
1298 1309
1299 /* context pointer (virt) */ 1310 /* context pointer (virt) */
1300 nv_wo32(chan, 0x0210, 0x00080004); 1311 nvkm_wo32(chan, 0x0210, 0x00080004);
1301 nv_wo32(chan, 0x0214, 0x00000000); 1312 nvkm_wo32(chan, 0x0214, 0x00000000);
1313 nvkm_done(chan);
1302 1314
1303 bar->flush(bar); 1315 nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
1304 1316 nvkm_wr32(device, 0x100cbc, 0x80000001);
1305 nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8); 1317 nvkm_msec(device, 2000,
1306 nv_wr32(priv, 0x100cbc, 0x80000001); 1318 if (nvkm_rd32(device, 0x100c80) & 0x00008000)
1307 nv_wait(priv, 0x100c80, 0x00008000, 0x00008000); 1319 break;
1320 );
1308 1321
1309 /* setup default state for mmio list construction */ 1322 /* setup default state for mmio list construction */
1310 info.priv = priv; 1323 info.gr = gr;
1311 info.data = priv->mmio_data; 1324 info.data = gr->mmio_data;
1312 info.mmio = priv->mmio_list; 1325 info.mmio = gr->mmio_list;
1313 info.addr = 0x2000 + (i * 8); 1326 info.addr = 0x2000 + (i * 8);
1314 info.buffer_nr = 0; 1327 info.buffer_nr = 0;
1315 1328
1316 /* make channel current */ 1329 /* make channel current */
1317 if (priv->firmware) { 1330 if (gr->firmware) {
1318 nv_wr32(priv, 0x409840, 0x00000030); 1331 nvkm_wr32(device, 0x409840, 0x00000030);
1319 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12); 1332 nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
1320 nv_wr32(priv, 0x409504, 0x00000003); 1333 nvkm_wr32(device, 0x409504, 0x00000003);
1321 if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010)) 1334 nvkm_msec(device, 2000,
1322 nv_error(priv, "load_ctx timeout\n"); 1335 if (nvkm_rd32(device, 0x409800) & 0x00000010)
1323 1336 break;
1324 nv_wo32(chan, 0x8001c, 1); 1337 );
1325 nv_wo32(chan, 0x80020, 0); 1338
1326 nv_wo32(chan, 0x80028, 0); 1339 nvkm_kmap(chan);
1327 nv_wo32(chan, 0x8002c, 0); 1340 nvkm_wo32(chan, 0x8001c, 1);
1328 bar->flush(bar); 1341 nvkm_wo32(chan, 0x80020, 0);
1342 nvkm_wo32(chan, 0x80028, 0);
1343 nvkm_wo32(chan, 0x8002c, 0);
1344 nvkm_done(chan);
1329 } else { 1345 } else {
1330 nv_wr32(priv, 0x409840, 0x80000000); 1346 nvkm_wr32(device, 0x409840, 0x80000000);
1331 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12); 1347 nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
1332 nv_wr32(priv, 0x409504, 0x00000001); 1348 nvkm_wr32(device, 0x409504, 0x00000001);
1333 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) 1349 nvkm_msec(device, 2000,
1334 nv_error(priv, "HUB_SET_CHAN timeout\n"); 1350 if (nvkm_rd32(device, 0x409800) & 0x80000000)
1351 break;
1352 );
1335 } 1353 }
1336 1354
1337 oclass->main(priv, &info); 1355 grctx->main(gr, &info);
1338 1356
1339 /* trigger a context unload by unsetting the "next channel valid" bit 1357 /* trigger a context unload by unsetting the "next channel valid" bit
1340 * and faking a context switch interrupt 1358 * and faking a context switch interrupt
1341 */ 1359 */
1342 nv_mask(priv, 0x409b04, 0x80000000, 0x00000000); 1360 nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
1343 nv_wr32(priv, 0x409000, 0x00000100); 1361 nvkm_wr32(device, 0x409000, 0x00000100);
1344 if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) { 1362 if (nvkm_msec(device, 2000,
1345 nv_error(priv, "grctx template channel unload timeout\n"); 1363 if (!(nvkm_rd32(device, 0x409b00) & 0x80000000))
1364 break;
1365 ) < 0) {
1346 ret = -EBUSY; 1366 ret = -EBUSY;
1347 goto done; 1367 goto done;
1348 } 1368 }
1349 1369
1350 priv->data = kmalloc(priv->size, GFP_KERNEL); 1370 gr->data = kmalloc(gr->size, GFP_KERNEL);
1351 if (priv->data) { 1371 if (gr->data) {
1352 for (i = 0; i < priv->size; i += 4) 1372 nvkm_kmap(chan);
1353 priv->data[i / 4] = nv_ro32(chan, 0x80000 + i); 1373 for (i = 0; i < gr->size; i += 4)
1374 gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
1375 nvkm_done(chan);
1354 ret = 0; 1376 ret = 0;
1355 } else { 1377 } else {
1356 ret = -ENOMEM; 1378 ret = -ENOMEM;
1357 } 1379 }
1358 1380
1359done: 1381done:
1360 nvkm_gpuobj_ref(NULL, &chan); 1382 nvkm_memory_del(&chan);
1361 return ret; 1383 return ret;
1362} 1384}
1363 1385
1364struct nvkm_oclass * 1386const struct gf100_grctx_func
1365gf100_grctx_oclass = &(struct gf100_grctx_oclass) { 1387gf100_grctx = {
1366 .base.handle = NV_ENGCTX(GR, 0xc0),
1367 .base.ofuncs = &(struct nvkm_ofuncs) {
1368 .ctor = gf100_gr_context_ctor,
1369 .dtor = gf100_gr_context_dtor,
1370 .init = _nvkm_gr_context_init,
1371 .fini = _nvkm_gr_context_fini,
1372 .rd32 = _nvkm_gr_context_rd32,
1373 .wr32 = _nvkm_gr_context_wr32,
1374 },
1375 .main = gf100_grctx_generate_main, 1388 .main = gf100_grctx_generate_main,
1376 .unkn = gf100_grctx_generate_unkn, 1389 .unkn = gf100_grctx_generate_unkn,
1377 .hub = gf100_grctx_pack_hub, 1390 .hub = gf100_grctx_pack_hub,
@@ -1387,4 +1400,4 @@ gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
1387 .attrib = gf100_grctx_generate_attrib, 1400 .attrib = gf100_grctx_generate_attrib,
1388 .attrib_nr_max = 0x324, 1401 .attrib_nr_max = 0x324,
1389 .attrib_nr = 0x218, 1402 .attrib_nr = 0x218,
1390}.base; 1403};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3676a3342bc5..3c64040ec5a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -3,7 +3,7 @@
3#include "gf100.h" 3#include "gf100.h"
4 4
5struct gf100_grctx { 5struct gf100_grctx {
6 struct gf100_gr_priv *priv; 6 struct gf100_gr *gr;
7 struct gf100_gr_data *data; 7 struct gf100_gr_data *data;
8 struct gf100_gr_mmio *mmio; 8 struct gf100_gr_mmio *mmio;
9 int buffer_nr; 9 int buffer_nr;
@@ -19,12 +19,11 @@ void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int)
19#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1) 19#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
20#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1) 20#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
21 21
22struct gf100_grctx_oclass { 22struct gf100_grctx_func {
23 struct nvkm_oclass base;
24 /* main context generation function */ 23 /* main context generation function */
25 void (*main)(struct gf100_gr_priv *, struct gf100_grctx *); 24 void (*main)(struct gf100_gr *, struct gf100_grctx *);
26 /* context-specific modify-on-first-load list generation function */ 25 /* context-specific modify-on-first-load list generation function */
27 void (*unkn)(struct gf100_gr_priv *); 26 void (*unkn)(struct gf100_gr *);
28 /* mmio context data */ 27 /* mmio context data */
29 const struct gf100_gr_pack *hub; 28 const struct gf100_gr_pack *hub;
30 const struct gf100_gr_pack *gpc; 29 const struct gf100_gr_pack *gpc;
@@ -50,60 +49,61 @@ struct gf100_grctx_oclass {
50 u32 alpha_nr; 49 u32 alpha_nr;
51}; 50};
52 51
53static inline const struct gf100_grctx_oclass * 52extern const struct gf100_grctx_func gf100_grctx;
54gf100_grctx_impl(struct gf100_gr_priv *priv) 53int gf100_grctx_generate(struct gf100_gr *);
55{ 54void gf100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
56 return (void *)nv_engine(priv)->cclass;
57}
58
59extern struct nvkm_oclass *gf100_grctx_oclass;
60int gf100_grctx_generate(struct gf100_gr_priv *);
61void gf100_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
62void gf100_grctx_generate_bundle(struct gf100_grctx *); 55void gf100_grctx_generate_bundle(struct gf100_grctx *);
63void gf100_grctx_generate_pagepool(struct gf100_grctx *); 56void gf100_grctx_generate_pagepool(struct gf100_grctx *);
64void gf100_grctx_generate_attrib(struct gf100_grctx *); 57void gf100_grctx_generate_attrib(struct gf100_grctx *);
65void gf100_grctx_generate_unkn(struct gf100_gr_priv *); 58void gf100_grctx_generate_unkn(struct gf100_gr *);
66void gf100_grctx_generate_tpcid(struct gf100_gr_priv *); 59void gf100_grctx_generate_tpcid(struct gf100_gr *);
67void gf100_grctx_generate_r406028(struct gf100_gr_priv *); 60void gf100_grctx_generate_r406028(struct gf100_gr *);
68void gf100_grctx_generate_r4060a8(struct gf100_gr_priv *); 61void gf100_grctx_generate_r4060a8(struct gf100_gr *);
69void gf100_grctx_generate_r418bb8(struct gf100_gr_priv *); 62void gf100_grctx_generate_r418bb8(struct gf100_gr *);
70void gf100_grctx_generate_r406800(struct gf100_gr_priv *); 63void gf100_grctx_generate_r406800(struct gf100_gr *);
71 64
72extern struct nvkm_oclass *gf108_grctx_oclass; 65extern const struct gf100_grctx_func gf108_grctx;
73void gf108_grctx_generate_attrib(struct gf100_grctx *); 66void gf108_grctx_generate_attrib(struct gf100_grctx *);
74void gf108_grctx_generate_unkn(struct gf100_gr_priv *); 67void gf108_grctx_generate_unkn(struct gf100_gr *);
75 68
76extern struct nvkm_oclass *gf104_grctx_oclass; 69extern const struct gf100_grctx_func gf104_grctx;
77extern struct nvkm_oclass *gf110_grctx_oclass; 70extern const struct gf100_grctx_func gf110_grctx;
78 71
79extern struct nvkm_oclass *gf117_grctx_oclass; 72extern const struct gf100_grctx_func gf117_grctx;
80void gf117_grctx_generate_attrib(struct gf100_grctx *); 73void gf117_grctx_generate_attrib(struct gf100_grctx *);
81 74
82extern struct nvkm_oclass *gf119_grctx_oclass; 75extern const struct gf100_grctx_func gf119_grctx;
83 76
84extern struct nvkm_oclass *gk104_grctx_oclass; 77extern const struct gf100_grctx_func gk104_grctx;
85extern struct nvkm_oclass *gk20a_grctx_oclass; 78extern const struct gf100_grctx_func gk20a_grctx;
86void gk104_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *); 79void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
87void gk104_grctx_generate_bundle(struct gf100_grctx *); 80void gk104_grctx_generate_bundle(struct gf100_grctx *);
88void gk104_grctx_generate_pagepool(struct gf100_grctx *); 81void gk104_grctx_generate_pagepool(struct gf100_grctx *);
89void gk104_grctx_generate_unkn(struct gf100_gr_priv *); 82void gk104_grctx_generate_unkn(struct gf100_gr *);
90void gk104_grctx_generate_r418bb8(struct gf100_gr_priv *); 83void gk104_grctx_generate_r418bb8(struct gf100_gr *);
91void gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *); 84void gk104_grctx_generate_rop_active_fbps(struct gf100_gr *);
85
92 86
87void gm107_grctx_generate_bundle(struct gf100_grctx *);
88void gm107_grctx_generate_pagepool(struct gf100_grctx *);
89void gm107_grctx_generate_attrib(struct gf100_grctx *);
93 90
94extern struct nvkm_oclass *gk110_grctx_oclass; 91extern const struct gf100_grctx_func gk110_grctx;
95extern struct nvkm_oclass *gk110b_grctx_oclass; 92extern const struct gf100_grctx_func gk110b_grctx;
96extern struct nvkm_oclass *gk208_grctx_oclass; 93extern const struct gf100_grctx_func gk208_grctx;
97 94
98extern struct nvkm_oclass *gm107_grctx_oclass; 95extern const struct gf100_grctx_func gm107_grctx;
99void gm107_grctx_generate_bundle(struct gf100_grctx *); 96void gm107_grctx_generate_bundle(struct gf100_grctx *);
100void gm107_grctx_generate_pagepool(struct gf100_grctx *); 97void gm107_grctx_generate_pagepool(struct gf100_grctx *);
101void gm107_grctx_generate_attrib(struct gf100_grctx *); 98void gm107_grctx_generate_attrib(struct gf100_grctx *);
102 99
103extern struct nvkm_oclass *gm204_grctx_oclass; 100extern const struct gf100_grctx_func gm204_grctx;
104void gm204_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *); 101void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
102void gm204_grctx_generate_tpcid(struct gf100_gr *);
103void gm204_grctx_generate_405b60(struct gf100_gr *);
105 104
106extern struct nvkm_oclass *gm206_grctx_oclass; 105extern const struct gf100_grctx_func gm206_grctx;
106extern const struct gf100_grctx_func gm20b_grctx;
107 107
108/* context init value lists */ 108/* context init value lists */
109 109
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index c5a8d55e2cac..54fd74e9cca0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -79,17 +79,8 @@ gf104_grctx_pack_tpc[] = {
79 * PGRAPH context implementation 79 * PGRAPH context implementation
80 ******************************************************************************/ 80 ******************************************************************************/
81 81
82struct nvkm_oclass * 82const struct gf100_grctx_func
83gf104_grctx_oclass = &(struct gf100_grctx_oclass) { 83gf104_grctx = {
84 .base.handle = NV_ENGCTX(GR, 0xc3),
85 .base.ofuncs = &(struct nvkm_ofuncs) {
86 .ctor = gf100_gr_context_ctor,
87 .dtor = gf100_gr_context_dtor,
88 .init = _nvkm_gr_context_init,
89 .fini = _nvkm_gr_context_fini,
90 .rd32 = _nvkm_gr_context_rd32,
91 .wr32 = _nvkm_gr_context_wr32,
92 },
93 .main = gf100_grctx_generate_main, 84 .main = gf100_grctx_generate_main,
94 .unkn = gf100_grctx_generate_unkn, 85 .unkn = gf100_grctx_generate_unkn,
95 .hub = gf100_grctx_pack_hub, 86 .hub = gf100_grctx_pack_hub,
@@ -105,4 +96,4 @@ gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
105 .attrib = gf100_grctx_generate_attrib, 96 .attrib = gf100_grctx_generate_attrib,
106 .attrib_nr_max = 0x324, 97 .attrib_nr_max = 0x324,
107 .attrib_nr = 0x218, 98 .attrib_nr = 0x218,
108}.base; 99};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 87c844a5f34b..505cdcbfc085 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -730,18 +730,18 @@ gf108_grctx_pack_tpc[] = {
730void 730void
731gf108_grctx_generate_attrib(struct gf100_grctx *info) 731gf108_grctx_generate_attrib(struct gf100_grctx *info)
732{ 732{
733 struct gf100_gr_priv *priv = info->priv; 733 struct gf100_gr *gr = info->gr;
734 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv); 734 const struct gf100_grctx_func *grctx = gr->func->grctx;
735 const u32 alpha = impl->alpha_nr; 735 const u32 alpha = grctx->alpha_nr;
736 const u32 beta = impl->attrib_nr; 736 const u32 beta = grctx->attrib_nr;
737 const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); 737 const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
738 const u32 access = NV_MEM_ACCESS_RW; 738 const u32 access = NV_MEM_ACCESS_RW;
739 const int s = 12; 739 const int s = 12;
740 const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); 740 const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
741 const int timeslice_mode = 1; 741 const int timeslice_mode = 1;
742 const int max_batches = 0xffff; 742 const int max_batches = 0xffff;
743 u32 bo = 0; 743 u32 bo = 0;
744 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; 744 u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
745 int gpc, tpc; 745 int gpc, tpc;
746 746
747 mmio_refn(info, 0x418810, 0x80000000, s, b); 747 mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -749,43 +749,35 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
749 mmio_wr32(info, 0x405830, (beta << 16) | alpha); 749 mmio_wr32(info, 0x405830, (beta << 16) | alpha);
750 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); 750 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
751 751
752 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 752 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
753 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 753 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
754 const u32 a = alpha; 754 const u32 a = alpha;
755 const u32 b = beta; 755 const u32 b = beta;
756 const u32 t = timeslice_mode; 756 const u32 t = timeslice_mode;
757 const u32 o = TPC_UNIT(gpc, tpc, 0x500); 757 const u32 o = TPC_UNIT(gpc, tpc, 0x500);
758 mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo); 758 mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
759 mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo); 759 mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
760 bo += impl->attrib_nr_max; 760 bo += grctx->attrib_nr_max;
761 mmio_wr32(info, o + 0x44, (a << 16) | ao); 761 mmio_wr32(info, o + 0x44, (a << 16) | ao);
762 ao += impl->alpha_nr_max; 762 ao += grctx->alpha_nr_max;
763 } 763 }
764 } 764 }
765} 765}
766 766
767void 767void
768gf108_grctx_generate_unkn(struct gf100_gr_priv *priv) 768gf108_grctx_generate_unkn(struct gf100_gr *gr)
769{ 769{
770 nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001); 770 struct nvkm_device *device = gr->base.engine.subdev.device;
771 nv_mask(priv, 0x41980c, 0x00000010, 0x00000010); 771 nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
772 nv_mask(priv, 0x419814, 0x00000004, 0x00000004); 772 nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
773 nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000); 773 nvkm_mask(device, 0x419814, 0x00000004, 0x00000004);
774 nv_mask(priv, 0x405800, 0x08000000, 0x08000000); 774 nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
775 nv_mask(priv, 0x419c00, 0x00000008, 0x00000008); 775 nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
776 nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
776} 777}
777 778
778struct nvkm_oclass * 779const struct gf100_grctx_func
779gf108_grctx_oclass = &(struct gf100_grctx_oclass) { 780gf108_grctx = {
780 .base.handle = NV_ENGCTX(GR, 0xc1),
781 .base.ofuncs = &(struct nvkm_ofuncs) {
782 .ctor = gf100_gr_context_ctor,
783 .dtor = gf100_gr_context_dtor,
784 .init = _nvkm_gr_context_init,
785 .fini = _nvkm_gr_context_fini,
786 .rd32 = _nvkm_gr_context_rd32,
787 .wr32 = _nvkm_gr_context_wr32,
788 },
789 .main = gf100_grctx_generate_main, 781 .main = gf100_grctx_generate_main,
790 .unkn = gf108_grctx_generate_unkn, 782 .unkn = gf108_grctx_generate_unkn,
791 .hub = gf108_grctx_pack_hub, 783 .hub = gf108_grctx_pack_hub,
@@ -803,4 +795,4 @@ gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
803 .attrib_nr = 0x218, 795 .attrib_nr = 0x218,
804 .alpha_nr_max = 0x324, 796 .alpha_nr_max = 0x324,
805 .alpha_nr = 0x218, 797 .alpha_nr = 0x218,
806}.base; 798};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index b3acd931b978..7df398b53f8f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -330,17 +330,8 @@ gf110_grctx_pack_gpc[] = {
330 * PGRAPH context implementation 330 * PGRAPH context implementation
331 ******************************************************************************/ 331 ******************************************************************************/
332 332
333struct nvkm_oclass * 333const struct gf100_grctx_func
334gf110_grctx_oclass = &(struct gf100_grctx_oclass) { 334gf110_grctx = {
335 .base.handle = NV_ENGCTX(GR, 0xc8),
336 .base.ofuncs = &(struct nvkm_ofuncs) {
337 .ctor = gf100_gr_context_ctor,
338 .dtor = gf100_gr_context_dtor,
339 .init = _nvkm_gr_context_init,
340 .fini = _nvkm_gr_context_fini,
341 .rd32 = _nvkm_gr_context_rd32,
342 .wr32 = _nvkm_gr_context_wr32,
343 },
344 .main = gf100_grctx_generate_main, 335 .main = gf100_grctx_generate_main,
345 .unkn = gf100_grctx_generate_unkn, 336 .unkn = gf100_grctx_generate_unkn,
346 .hub = gf100_grctx_pack_hub, 337 .hub = gf100_grctx_pack_hub,
@@ -356,4 +347,4 @@ gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
356 .attrib = gf100_grctx_generate_attrib, 347 .attrib = gf100_grctx_generate_attrib,
357 .attrib_nr_max = 0x324, 348 .attrib_nr_max = 0x324,
358 .attrib_nr = 0x218, 349 .attrib_nr = 0x218,
359}.base; 350};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 9bbe2c97552e..b5b875928aba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -182,18 +182,18 @@ gf117_grctx_pack_ppc[] = {
182void 182void
183gf117_grctx_generate_attrib(struct gf100_grctx *info) 183gf117_grctx_generate_attrib(struct gf100_grctx *info)
184{ 184{
185 struct gf100_gr_priv *priv = info->priv; 185 struct gf100_gr *gr = info->gr;
186 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv); 186 const struct gf100_grctx_func *grctx = gr->func->grctx;
187 const u32 alpha = impl->alpha_nr; 187 const u32 alpha = grctx->alpha_nr;
188 const u32 beta = impl->attrib_nr; 188 const u32 beta = grctx->attrib_nr;
189 const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); 189 const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
190 const u32 access = NV_MEM_ACCESS_RW; 190 const u32 access = NV_MEM_ACCESS_RW;
191 const int s = 12; 191 const int s = 12;
192 const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); 192 const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
193 const int timeslice_mode = 1; 193 const int timeslice_mode = 1;
194 const int max_batches = 0xffff; 194 const int max_batches = 0xffff;
195 u32 bo = 0; 195 u32 bo = 0;
196 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; 196 u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
197 int gpc, ppc; 197 int gpc, ppc;
198 198
199 mmio_refn(info, 0x418810, 0x80000000, s, b); 199 mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -201,68 +201,60 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
201 mmio_wr32(info, 0x405830, (beta << 16) | alpha); 201 mmio_wr32(info, 0x405830, (beta << 16) | alpha);
202 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); 202 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
203 203
204 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 204 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
205 for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) { 205 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
206 const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc]; 206 const u32 a = alpha * gr->ppc_tpc_nr[gpc][ppc];
207 const u32 b = beta * priv->ppc_tpc_nr[gpc][ppc]; 207 const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc];
208 const u32 t = timeslice_mode; 208 const u32 t = timeslice_mode;
209 const u32 o = PPC_UNIT(gpc, ppc, 0); 209 const u32 o = PPC_UNIT(gpc, ppc, 0);
210 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); 210 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
211 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); 211 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
212 bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc]; 212 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
213 mmio_wr32(info, o + 0xe4, (a << 16) | ao); 213 mmio_wr32(info, o + 0xe4, (a << 16) | ao);
214 ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc]; 214 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
215 } 215 }
216 } 216 }
217} 217}
218 218
219void 219void
220gf117_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info) 220gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
221{ 221{
222 struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 222 struct nvkm_device *device = gr->base.engine.subdev.device;
223 const struct gf100_grctx_func *grctx = gr->func->grctx;
223 int i; 224 int i;
224 225
225 nvkm_mc(priv)->unk260(nvkm_mc(priv), 0); 226 nvkm_mc_unk260(device->mc, 0);
226 227
227 gf100_gr_mmio(priv, oclass->hub); 228 gf100_gr_mmio(gr, grctx->hub);
228 gf100_gr_mmio(priv, oclass->gpc); 229 gf100_gr_mmio(gr, grctx->gpc);
229 gf100_gr_mmio(priv, oclass->zcull); 230 gf100_gr_mmio(gr, grctx->zcull);
230 gf100_gr_mmio(priv, oclass->tpc); 231 gf100_gr_mmio(gr, grctx->tpc);
231 gf100_gr_mmio(priv, oclass->ppc); 232 gf100_gr_mmio(gr, grctx->ppc);
232 233
233 nv_wr32(priv, 0x404154, 0x00000000); 234 nvkm_wr32(device, 0x404154, 0x00000000);
234 235
235 oclass->bundle(info); 236 grctx->bundle(info);
236 oclass->pagepool(info); 237 grctx->pagepool(info);
237 oclass->attrib(info); 238 grctx->attrib(info);
238 oclass->unkn(priv); 239 grctx->unkn(gr);
239 240
240 gf100_grctx_generate_tpcid(priv); 241 gf100_grctx_generate_tpcid(gr);
241 gf100_grctx_generate_r406028(priv); 242 gf100_grctx_generate_r406028(gr);
242 gf100_grctx_generate_r4060a8(priv); 243 gf100_grctx_generate_r4060a8(gr);
243 gk104_grctx_generate_r418bb8(priv); 244 gk104_grctx_generate_r418bb8(gr);
244 gf100_grctx_generate_r406800(priv); 245 gf100_grctx_generate_r406800(gr);
245 246
246 for (i = 0; i < 8; i++) 247 for (i = 0; i < 8; i++)
247 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000); 248 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
248 249
249 gf100_gr_icmd(priv, oclass->icmd); 250 gf100_gr_icmd(gr, grctx->icmd);
250 nv_wr32(priv, 0x404154, 0x00000400); 251 nvkm_wr32(device, 0x404154, 0x00000400);
251 gf100_gr_mthd(priv, oclass->mthd); 252 gf100_gr_mthd(gr, grctx->mthd);
252 nvkm_mc(priv)->unk260(nvkm_mc(priv), 1); 253 nvkm_mc_unk260(device->mc, 1);
253} 254}
254 255
255struct nvkm_oclass * 256const struct gf100_grctx_func
256gf117_grctx_oclass = &(struct gf100_grctx_oclass) { 257gf117_grctx = {
257 .base.handle = NV_ENGCTX(GR, 0xd7),
258 .base.ofuncs = &(struct nvkm_ofuncs) {
259 .ctor = gf100_gr_context_ctor,
260 .dtor = gf100_gr_context_dtor,
261 .init = _nvkm_gr_context_init,
262 .fini = _nvkm_gr_context_fini,
263 .rd32 = _nvkm_gr_context_rd32,
264 .wr32 = _nvkm_gr_context_wr32,
265 },
266 .main = gf117_grctx_generate_main, 258 .main = gf117_grctx_generate_main,
267 .unkn = gk104_grctx_generate_unkn, 259 .unkn = gk104_grctx_generate_unkn,
268 .hub = gf117_grctx_pack_hub, 260 .hub = gf117_grctx_pack_hub,
@@ -281,4 +273,4 @@ gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
281 .attrib_nr = 0x218, 273 .attrib_nr = 0x218,
282 .alpha_nr_max = 0x7ff, 274 .alpha_nr_max = 0x7ff,
283 .alpha_nr = 0x324, 275 .alpha_nr = 0x324,
284}.base; 276};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 8d8761443809..605185b078be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -498,17 +498,8 @@ gf119_grctx_pack_tpc[] = {
498 * PGRAPH context implementation 498 * PGRAPH context implementation
499 ******************************************************************************/ 499 ******************************************************************************/
500 500
501struct nvkm_oclass * 501const struct gf100_grctx_func
502gf119_grctx_oclass = &(struct gf100_grctx_oclass) { 502gf119_grctx = {
503 .base.handle = NV_ENGCTX(GR, 0xd9),
504 .base.ofuncs = &(struct nvkm_ofuncs) {
505 .ctor = gf100_gr_context_ctor,
506 .dtor = gf100_gr_context_dtor,
507 .init = _nvkm_gr_context_init,
508 .fini = _nvkm_gr_context_fini,
509 .rd32 = _nvkm_gr_context_rd32,
510 .wr32 = _nvkm_gr_context_wr32,
511 },
512 .main = gf100_grctx_generate_main, 503 .main = gf100_grctx_generate_main,
513 .unkn = gf108_grctx_generate_unkn, 504 .unkn = gf108_grctx_generate_unkn,
514 .hub = gf119_grctx_pack_hub, 505 .hub = gf119_grctx_pack_hub,
@@ -526,4 +517,4 @@ gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
526 .attrib_nr = 0x218, 517 .attrib_nr = 0x218,
527 .alpha_nr_max = 0x324, 518 .alpha_nr_max = 0x324,
528 .alpha_nr = 0x218, 519 .alpha_nr = 0x218,
529}.base; 520};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b12f6a9fd926..a843e3689c3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -843,27 +843,27 @@ gk104_grctx_pack_ppc[] = {
843void 843void
844gk104_grctx_generate_bundle(struct gf100_grctx *info) 844gk104_grctx_generate_bundle(struct gf100_grctx *info)
845{ 845{
846 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv); 846 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
847 const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth, 847 const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
848 impl->bundle_size / 0x20); 848 grctx->bundle_size / 0x20);
849 const u32 token_limit = impl->bundle_token_limit; 849 const u32 token_limit = grctx->bundle_token_limit;
850 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; 850 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
851 const int s = 8; 851 const int s = 8;
852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 852 const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
853 mmio_refn(info, 0x408004, 0x00000000, s, b); 853 mmio_refn(info, 0x408004, 0x00000000, s, b);
854 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); 854 mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
855 mmio_refn(info, 0x418808, 0x00000000, s, b); 855 mmio_refn(info, 0x418808, 0x00000000, s, b);
856 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); 856 mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
858} 858}
859 859
860void 860void
861gk104_grctx_generate_pagepool(struct gf100_grctx *info) 861gk104_grctx_generate_pagepool(struct gf100_grctx *info)
862{ 862{
863 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv); 863 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
864 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; 864 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
865 const int s = 8; 865 const int s = 8;
866 const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); 866 const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
867 mmio_refn(info, 0x40800c, 0x00000000, s, b); 867 mmio_refn(info, 0x40800c, 0x00000000, s, b);
868 mmio_wr32(info, 0x408010, 0x80000000); 868 mmio_wr32(info, 0x408010, 0x80000000);
869 mmio_refn(info, 0x419004, 0x00000000, s, b); 869 mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -872,31 +872,33 @@ gk104_grctx_generate_pagepool(struct gf100_grctx *info)
872} 872}
873 873
874void 874void
875gk104_grctx_generate_unkn(struct gf100_gr_priv *priv) 875gk104_grctx_generate_unkn(struct gf100_gr *gr)
876{ 876{
877 nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001); 877 struct nvkm_device *device = gr->base.engine.subdev.device;
878 nv_mask(priv, 0x41980c, 0x00000010, 0x00000010); 878 nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
879 nv_mask(priv, 0x41be08, 0x00000004, 0x00000004); 879 nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
880 nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000); 880 nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004);
881 nv_mask(priv, 0x405800, 0x08000000, 0x08000000); 881 nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
882 nv_mask(priv, 0x419c00, 0x00000008, 0x00000008); 882 nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
883 nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
883} 884}
884 885
885void 886void
886gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv) 887gk104_grctx_generate_r418bb8(struct gf100_gr *gr)
887{ 888{
889 struct nvkm_device *device = gr->base.engine.subdev.device;
888 u32 data[6] = {}, data2[2] = {}; 890 u32 data[6] = {}, data2[2] = {};
889 u8 tpcnr[GPC_MAX]; 891 u8 tpcnr[GPC_MAX];
890 u8 shift, ntpcv; 892 u8 shift, ntpcv;
891 int gpc, tpc, i; 893 int gpc, tpc, i;
892 894
893 /* calculate first set of magics */ 895 /* calculate first set of magics */
894 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 896 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
895 897
896 gpc = -1; 898 gpc = -1;
897 for (tpc = 0; tpc < priv->tpc_total; tpc++) { 899 for (tpc = 0; tpc < gr->tpc_total; tpc++) {
898 do { 900 do {
899 gpc = (gpc + 1) % priv->gpc_nr; 901 gpc = (gpc + 1) % gr->gpc_nr;
900 } while (!tpcnr[gpc]); 902 } while (!tpcnr[gpc]);
901 tpcnr[gpc]--; 903 tpcnr[gpc]--;
902 904
@@ -908,7 +910,7 @@ gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
908 910
909 /* and the second... */ 911 /* and the second... */
910 shift = 0; 912 shift = 0;
911 ntpcv = priv->tpc_total; 913 ntpcv = gr->tpc_total;
912 while (!(ntpcv & (1 << 4))) { 914 while (!(ntpcv & (1 << 4))) {
913 ntpcv <<= 1; 915 ntpcv <<= 1;
914 shift++; 916 shift++;
@@ -921,86 +923,79 @@ gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
921 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); 923 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
922 924
923 /* GPC_BROADCAST */ 925 /* GPC_BROADCAST */
924 nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | 926 nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
925 priv->magic_not_rop_nr); 927 gr->magic_not_rop_nr);
926 for (i = 0; i < 6; i++) 928 for (i = 0; i < 6; i++)
927 nv_wr32(priv, 0x418b08 + (i * 4), data[i]); 929 nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
928 930
929 /* GPC_BROADCAST.TP_BROADCAST */ 931 /* GPC_BROADCAST.TP_BROADCAST */
930 nv_wr32(priv, 0x41bfd0, (priv->tpc_total << 8) | 932 nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
931 priv->magic_not_rop_nr | data2[0]); 933 gr->magic_not_rop_nr | data2[0]);
932 nv_wr32(priv, 0x41bfe4, data2[1]); 934 nvkm_wr32(device, 0x41bfe4, data2[1]);
933 for (i = 0; i < 6; i++) 935 for (i = 0; i < 6; i++)
934 nv_wr32(priv, 0x41bf00 + (i * 4), data[i]); 936 nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
935 937
936 /* UNK78xx */ 938 /* UNK78xx */
937 nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | 939 nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
938 priv->magic_not_rop_nr); 940 gr->magic_not_rop_nr);
939 for (i = 0; i < 6; i++) 941 for (i = 0; i < 6; i++)
940 nv_wr32(priv, 0x40780c + (i * 4), data[i]); 942 nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
941} 943}
942 944
943void 945void
944gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv) 946gk104_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
945{ 947{
946 const u32 fbp_count = nv_rd32(priv, 0x120074); 948 struct nvkm_device *device = gr->base.engine.subdev.device;
947 nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */ 949 const u32 fbp_count = nvkm_rd32(device, 0x120074);
948 nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */ 950 nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
951 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
949} 952}
950 953
951void 954void
952gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info) 955gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
953{ 956{
954 struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 957 struct nvkm_device *device = gr->base.engine.subdev.device;
958 const struct gf100_grctx_func *grctx = gr->func->grctx;
955 int i; 959 int i;
956 960
957 nvkm_mc(priv)->unk260(nvkm_mc(priv), 0); 961 nvkm_mc_unk260(device->mc, 0);
958 962
959 gf100_gr_mmio(priv, oclass->hub); 963 gf100_gr_mmio(gr, grctx->hub);
960 gf100_gr_mmio(priv, oclass->gpc); 964 gf100_gr_mmio(gr, grctx->gpc);
961 gf100_gr_mmio(priv, oclass->zcull); 965 gf100_gr_mmio(gr, grctx->zcull);
962 gf100_gr_mmio(priv, oclass->tpc); 966 gf100_gr_mmio(gr, grctx->tpc);
963 gf100_gr_mmio(priv, oclass->ppc); 967 gf100_gr_mmio(gr, grctx->ppc);
964 968
965 nv_wr32(priv, 0x404154, 0x00000000); 969 nvkm_wr32(device, 0x404154, 0x00000000);
966 970
967 oclass->bundle(info); 971 grctx->bundle(info);
968 oclass->pagepool(info); 972 grctx->pagepool(info);
969 oclass->attrib(info); 973 grctx->attrib(info);
970 oclass->unkn(priv); 974 grctx->unkn(gr);
971 975
972 gf100_grctx_generate_tpcid(priv); 976 gf100_grctx_generate_tpcid(gr);
973 gf100_grctx_generate_r406028(priv); 977 gf100_grctx_generate_r406028(gr);
974 gk104_grctx_generate_r418bb8(priv); 978 gk104_grctx_generate_r418bb8(gr);
975 gf100_grctx_generate_r406800(priv); 979 gf100_grctx_generate_r406800(gr);
976 980
977 for (i = 0; i < 8; i++) 981 for (i = 0; i < 8; i++)
978 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000); 982 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
979 983
980 nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr); 984 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
981 gk104_grctx_generate_rop_active_fbps(priv); 985 gk104_grctx_generate_rop_active_fbps(gr);
982 nv_mask(priv, 0x419f78, 0x00000001, 0x00000000); 986 nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
983 987
984 gf100_gr_icmd(priv, oclass->icmd); 988 gf100_gr_icmd(gr, grctx->icmd);
985 nv_wr32(priv, 0x404154, 0x00000400); 989 nvkm_wr32(device, 0x404154, 0x00000400);
986 gf100_gr_mthd(priv, oclass->mthd); 990 gf100_gr_mthd(gr, grctx->mthd);
987 nvkm_mc(priv)->unk260(nvkm_mc(priv), 1); 991 nvkm_mc_unk260(device->mc, 1);
988 992
989 nv_mask(priv, 0x418800, 0x00200000, 0x00200000); 993 nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
990 nv_mask(priv, 0x41be10, 0x00800000, 0x00800000); 994 nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
991} 995}
992 996
993struct nvkm_oclass * 997const struct gf100_grctx_func
994gk104_grctx_oclass = &(struct gf100_grctx_oclass) { 998gk104_grctx = {
995 .base.handle = NV_ENGCTX(GR, 0xe4),
996 .base.ofuncs = &(struct nvkm_ofuncs) {
997 .ctor = gf100_gr_context_ctor,
998 .dtor = gf100_gr_context_dtor,
999 .init = _nvkm_gr_context_init,
1000 .fini = _nvkm_gr_context_fini,
1001 .rd32 = _nvkm_gr_context_rd32,
1002 .wr32 = _nvkm_gr_context_wr32,
1003 },
1004 .main = gk104_grctx_generate_main, 999 .main = gk104_grctx_generate_main,
1005 .unkn = gk104_grctx_generate_unkn, 1000 .unkn = gk104_grctx_generate_unkn,
1006 .hub = gk104_grctx_pack_hub, 1001 .hub = gk104_grctx_pack_hub,
@@ -1021,4 +1016,4 @@ gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
1021 .attrib_nr = 0x218, 1016 .attrib_nr = 0x218,
1022 .alpha_nr_max = 0x7ff, 1017 .alpha_nr_max = 0x7ff,
1023 .alpha_nr = 0x648, 1018 .alpha_nr = 0x648,
1024}.base; 1019};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index b3f58be04e9c..7b95ec2fe453 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -808,17 +808,8 @@ gk110_grctx_pack_ppc[] = {
808 * PGRAPH context implementation 808 * PGRAPH context implementation
809 ******************************************************************************/ 809 ******************************************************************************/
810 810
811struct nvkm_oclass * 811const struct gf100_grctx_func
812gk110_grctx_oclass = &(struct gf100_grctx_oclass) { 812gk110_grctx = {
813 .base.handle = NV_ENGCTX(GR, 0xf0),
814 .base.ofuncs = &(struct nvkm_ofuncs) {
815 .ctor = gf100_gr_context_ctor,
816 .dtor = gf100_gr_context_dtor,
817 .init = _nvkm_gr_context_init,
818 .fini = _nvkm_gr_context_fini,
819 .rd32 = _nvkm_gr_context_rd32,
820 .wr32 = _nvkm_gr_context_wr32,
821 },
822 .main = gk104_grctx_generate_main, 813 .main = gk104_grctx_generate_main,
823 .unkn = gk104_grctx_generate_unkn, 814 .unkn = gk104_grctx_generate_unkn,
824 .hub = gk110_grctx_pack_hub, 815 .hub = gk110_grctx_pack_hub,
@@ -839,4 +830,4 @@ gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
839 .attrib_nr = 0x218, 830 .attrib_nr = 0x218,
840 .alpha_nr_max = 0x7ff, 831 .alpha_nr_max = 0x7ff,
841 .alpha_nr = 0x648, 832 .alpha_nr = 0x648,
842}.base; 833};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index b11c26794fde..048b1152da44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -69,17 +69,8 @@ gk110b_grctx_pack_tpc[] = {
69 * PGRAPH context implementation 69 * PGRAPH context implementation
70 ******************************************************************************/ 70 ******************************************************************************/
71 71
72struct nvkm_oclass * 72const struct gf100_grctx_func
73gk110b_grctx_oclass = &(struct gf100_grctx_oclass) { 73gk110b_grctx = {
74 .base.handle = NV_ENGCTX(GR, 0xf1),
75 .base.ofuncs = &(struct nvkm_ofuncs) {
76 .ctor = gf100_gr_context_ctor,
77 .dtor = gf100_gr_context_dtor,
78 .init = _nvkm_gr_context_init,
79 .fini = _nvkm_gr_context_fini,
80 .rd32 = _nvkm_gr_context_rd32,
81 .wr32 = _nvkm_gr_context_wr32,
82 },
83 .main = gk104_grctx_generate_main, 74 .main = gk104_grctx_generate_main,
84 .unkn = gk104_grctx_generate_unkn, 75 .unkn = gk104_grctx_generate_unkn,
85 .hub = gk110_grctx_pack_hub, 76 .hub = gk110_grctx_pack_hub,
@@ -100,4 +91,4 @@ gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
100 .attrib_nr = 0x218, 91 .attrib_nr = 0x218,
101 .alpha_nr_max = 0x7ff, 92 .alpha_nr_max = 0x7ff,
102 .alpha_nr = 0x648, 93 .alpha_nr = 0x648,
103}.base; 94};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 6e8ce9fc311a..67b7a1b43617 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -530,17 +530,8 @@ gk208_grctx_pack_ppc[] = {
530 * PGRAPH context implementation 530 * PGRAPH context implementation
531 ******************************************************************************/ 531 ******************************************************************************/
532 532
533struct nvkm_oclass * 533const struct gf100_grctx_func
534gk208_grctx_oclass = &(struct gf100_grctx_oclass) { 534gk208_grctx = {
535 .base.handle = NV_ENGCTX(GR, 0x08),
536 .base.ofuncs = &(struct nvkm_ofuncs) {
537 .ctor = gf100_gr_context_ctor,
538 .dtor = gf100_gr_context_dtor,
539 .init = _nvkm_gr_context_init,
540 .fini = _nvkm_gr_context_fini,
541 .rd32 = _nvkm_gr_context_rd32,
542 .wr32 = _nvkm_gr_context_wr32,
543 },
544 .main = gk104_grctx_generate_main, 535 .main = gk104_grctx_generate_main,
545 .unkn = gk104_grctx_generate_unkn, 536 .unkn = gk104_grctx_generate_unkn,
546 .hub = gk208_grctx_pack_hub, 537 .hub = gk208_grctx_pack_hub,
@@ -561,4 +552,4 @@ gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
561 .attrib_nr = 0x218, 552 .attrib_nr = 0x218,
562 .alpha_nr_max = 0x7ff, 553 .alpha_nr_max = 0x7ff,
563 .alpha_nr = 0x648, 554 .alpha_nr = 0x648,
564}.base; 555};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 2f241f6f0f0a..ddaa16a71c84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,34 +20,60 @@
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22#include "ctxgf100.h" 22#include "ctxgf100.h"
23#include "gf100.h"
23 24
24static const struct gf100_gr_pack 25#include <subdev/mc.h>
25gk20a_grctx_pack_mthd[] = { 26
26 { gk104_grctx_init_a097_0, 0xa297 }, 27static void
27 { gf100_grctx_init_902d_0, 0x902d }, 28gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
28 {} 29{
29}; 30 struct nvkm_device *device = gr->base.engine.subdev.device;
31 const struct gf100_grctx_func *grctx = gr->func->grctx;
32 int idle_timeout_save;
33 int i;
34
35 gf100_gr_mmio(gr, gr->fuc_sw_ctx);
36
37 gf100_gr_wait_idle(gr);
38
39 idle_timeout_save = nvkm_rd32(device, 0x404154);
40 nvkm_wr32(device, 0x404154, 0x00000000);
41
42 grctx->attrib(info);
43
44 grctx->unkn(gr);
45
46 gf100_grctx_generate_tpcid(gr);
47 gf100_grctx_generate_r406028(gr);
48 gk104_grctx_generate_r418bb8(gr);
49 gf100_grctx_generate_r406800(gr);
50
51 for (i = 0; i < 8; i++)
52 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
30 53
31struct nvkm_oclass * 54 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
32gk20a_grctx_oclass = &(struct gf100_grctx_oclass) { 55
33 .base.handle = NV_ENGCTX(GR, 0xea), 56 gk104_grctx_generate_rop_active_fbps(gr);
34 .base.ofuncs = &(struct nvkm_ofuncs) { 57
35 .ctor = gf100_gr_context_ctor, 58 nvkm_mask(device, 0x5044b0, 0x8000000, 0x8000000);
36 .dtor = gf100_gr_context_dtor, 59
37 .init = _nvkm_gr_context_init, 60 gf100_gr_wait_idle(gr);
38 .fini = _nvkm_gr_context_fini, 61
39 .rd32 = _nvkm_gr_context_rd32, 62 nvkm_wr32(device, 0x404154, idle_timeout_save);
40 .wr32 = _nvkm_gr_context_wr32, 63 gf100_gr_wait_idle(gr);
41 }, 64
42 .main = gk104_grctx_generate_main, 65 gf100_gr_mthd(gr, gr->fuc_method);
66 gf100_gr_wait_idle(gr);
67
68 gf100_gr_icmd(gr, gr->fuc_bundle);
69 grctx->pagepool(info);
70 grctx->bundle(info);
71}
72
73const struct gf100_grctx_func
74gk20a_grctx = {
75 .main = gk20a_grctx_generate_main,
43 .unkn = gk104_grctx_generate_unkn, 76 .unkn = gk104_grctx_generate_unkn,
44 .hub = gk104_grctx_pack_hub,
45 .gpc = gk104_grctx_pack_gpc,
46 .zcull = gf100_grctx_pack_zcull,
47 .tpc = gk104_grctx_pack_tpc,
48 .ppc = gk104_grctx_pack_ppc,
49 .icmd = gk104_grctx_pack_icmd,
50 .mthd = gk20a_grctx_pack_mthd,
51 .bundle = gk104_grctx_generate_bundle, 77 .bundle = gk104_grctx_generate_bundle,
52 .bundle_size = 0x1800, 78 .bundle_size = 0x1800,
53 .bundle_min_gpm_fifo_depth = 0x62, 79 .bundle_min_gpm_fifo_depth = 0x62,
@@ -59,4 +85,4 @@ gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
59 .attrib_nr = 0x240, 85 .attrib_nr = 0x240,
60 .alpha_nr_max = 0x648 + (0x648 / 2), 86 .alpha_nr_max = 0x648 + (0x648 / 2),
61 .alpha_nr = 0x648, 87 .alpha_nr = 0x648,
62}.base; 88};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index fbeaae3ae6ce..95f59e3169f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -863,27 +863,27 @@ gm107_grctx_pack_ppc[] = {
863void 863void
864gm107_grctx_generate_bundle(struct gf100_grctx *info) 864gm107_grctx_generate_bundle(struct gf100_grctx *info)
865{ 865{
866 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv); 866 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
867 const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth, 867 const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
868 impl->bundle_size / 0x20); 868 grctx->bundle_size / 0x20);
869 const u32 token_limit = impl->bundle_token_limit; 869 const u32 token_limit = grctx->bundle_token_limit;
870 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; 870 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
871 const int s = 8; 871 const int s = 8;
872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 872 const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
873 mmio_refn(info, 0x408004, 0x00000000, s, b); 873 mmio_refn(info, 0x408004, 0x00000000, s, b);
874 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); 874 mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
875 mmio_refn(info, 0x418e24, 0x00000000, s, b); 875 mmio_refn(info, 0x418e24, 0x00000000, s, b);
876 mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s)); 876 mmio_wr32(info, 0x418e28, 0x80000000 | (grctx->bundle_size >> s));
877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
878} 878}
879 879
880void 880void
881gm107_grctx_generate_pagepool(struct gf100_grctx *info) 881gm107_grctx_generate_pagepool(struct gf100_grctx *info)
882{ 882{
883 const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv); 883 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
884 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; 884 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
885 const int s = 8; 885 const int s = 8;
886 const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); 886 const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
887 mmio_refn(info, 0x40800c, 0x00000000, s, b); 887 mmio_refn(info, 0x40800c, 0x00000000, s, b);
888 mmio_wr32(info, 0x408010, 0x80000000); 888 mmio_wr32(info, 0x408010, 0x80000000);
889 mmio_refn(info, 0x419004, 0x00000000, s, b); 889 mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -895,17 +895,17 @@ gm107_grctx_generate_pagepool(struct gf100_grctx *info)
895void 895void
896gm107_grctx_generate_attrib(struct gf100_grctx *info) 896gm107_grctx_generate_attrib(struct gf100_grctx *info)
897{ 897{
898 struct gf100_gr_priv *priv = info->priv; 898 struct gf100_gr *gr = info->gr;
899 const struct gf100_grctx_oclass *impl = (void *)gf100_grctx_impl(priv); 899 const struct gf100_grctx_func *grctx = gr->func->grctx;
900 const u32 alpha = impl->alpha_nr; 900 const u32 alpha = grctx->alpha_nr;
901 const u32 attrib = impl->attrib_nr; 901 const u32 attrib = grctx->attrib_nr;
902 const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); 902 const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
903 const u32 access = NV_MEM_ACCESS_RW; 903 const u32 access = NV_MEM_ACCESS_RW;
904 const int s = 12; 904 const int s = 12;
905 const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); 905 const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
906 const int max_batches = 0xffff; 906 const int max_batches = 0xffff;
907 u32 bo = 0; 907 u32 bo = 0;
908 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; 908 u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
909 int gpc, ppc, n = 0; 909 int gpc, ppc, n = 0;
910 910
911 mmio_refn(info, 0x418810, 0x80000000, s, b); 911 mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -914,97 +914,90 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
914 mmio_wr32(info, 0x405830, (attrib << 16) | alpha); 914 mmio_wr32(info, 0x405830, (attrib << 16) | alpha);
915 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); 915 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
916 916
917 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 917 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
918 for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) { 918 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
919 const u32 as = alpha * priv->ppc_tpc_nr[gpc][ppc]; 919 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
920 const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc]; 920 const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
921 const u32 u = 0x418ea0 + (n * 0x04); 921 const u32 u = 0x418ea0 + (n * 0x04);
922 const u32 o = PPC_UNIT(gpc, ppc, 0); 922 const u32 o = PPC_UNIT(gpc, ppc, 0);
923 mmio_wr32(info, o + 0xc0, bs); 923 mmio_wr32(info, o + 0xc0, bs);
924 mmio_wr32(info, o + 0xf4, bo); 924 mmio_wr32(info, o + 0xf4, bo);
925 bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc]; 925 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
926 mmio_wr32(info, o + 0xe4, as); 926 mmio_wr32(info, o + 0xe4, as);
927 mmio_wr32(info, o + 0xf8, ao); 927 mmio_wr32(info, o + 0xf8, ao);
928 ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc]; 928 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
929 mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs); 929 mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
930 } 930 }
931 } 931 }
932} 932}
933 933
934static void 934void
935gm107_grctx_generate_tpcid(struct gf100_gr_priv *priv) 935gm107_grctx_generate_tpcid(struct gf100_gr *gr)
936{ 936{
937 struct nvkm_device *device = gr->base.engine.subdev.device;
937 int gpc, tpc, id; 938 int gpc, tpc, id;
938 939
939 for (tpc = 0, id = 0; tpc < 4; tpc++) { 940 for (tpc = 0, id = 0; tpc < 4; tpc++) {
940 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 941 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
941 if (tpc < priv->tpc_nr[gpc]) { 942 if (tpc < gr->tpc_nr[gpc]) {
942 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id); 943 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
943 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id); 944 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
944 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id); 945 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
945 id++; 946 id++;
946 } 947 }
947 948
948 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]); 949 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
949 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]); 950 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
950 } 951 }
951 } 952 }
952} 953}
953 954
954static void 955static void
955gm107_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info) 956gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
956{ 957{
957 struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 958 struct nvkm_device *device = gr->base.engine.subdev.device;
959 const struct gf100_grctx_func *grctx = gr->func->grctx;
958 int i; 960 int i;
959 961
960 gf100_gr_mmio(priv, oclass->hub); 962 gf100_gr_mmio(gr, grctx->hub);
961 gf100_gr_mmio(priv, oclass->gpc); 963 gf100_gr_mmio(gr, grctx->gpc);
962 gf100_gr_mmio(priv, oclass->zcull); 964 gf100_gr_mmio(gr, grctx->zcull);
963 gf100_gr_mmio(priv, oclass->tpc); 965 gf100_gr_mmio(gr, grctx->tpc);
964 gf100_gr_mmio(priv, oclass->ppc); 966 gf100_gr_mmio(gr, grctx->ppc);
965 967
966 nv_wr32(priv, 0x404154, 0x00000000); 968 nvkm_wr32(device, 0x404154, 0x00000000);
967 969
968 oclass->bundle(info); 970 grctx->bundle(info);
969 oclass->pagepool(info); 971 grctx->pagepool(info);
970 oclass->attrib(info); 972 grctx->attrib(info);
971 oclass->unkn(priv); 973 grctx->unkn(gr);
972 974
973 gm107_grctx_generate_tpcid(priv); 975 gm107_grctx_generate_tpcid(gr);
974 gf100_grctx_generate_r406028(priv); 976 gf100_grctx_generate_r406028(gr);
975 gk104_grctx_generate_r418bb8(priv); 977 gk104_grctx_generate_r418bb8(gr);
976 gf100_grctx_generate_r406800(priv); 978 gf100_grctx_generate_r406800(gr);
977 979
978 nv_wr32(priv, 0x4064d0, 0x00000001); 980 nvkm_wr32(device, 0x4064d0, 0x00000001);
979 for (i = 1; i < 8; i++) 981 for (i = 1; i < 8; i++)
980 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000); 982 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
981 nv_wr32(priv, 0x406500, 0x00000001); 983 nvkm_wr32(device, 0x406500, 0x00000001);
982 984
983 nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr); 985 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
984 986
985 gk104_grctx_generate_rop_active_fbps(priv); 987 gk104_grctx_generate_rop_active_fbps(gr);
986 988
987 gf100_gr_icmd(priv, oclass->icmd); 989 gf100_gr_icmd(gr, grctx->icmd);
988 nv_wr32(priv, 0x404154, 0x00000400); 990 nvkm_wr32(device, 0x404154, 0x00000400);
989 gf100_gr_mthd(priv, oclass->mthd); 991 gf100_gr_mthd(gr, grctx->mthd);
990 992
991 nv_mask(priv, 0x419e00, 0x00808080, 0x00808080); 993 nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
992 nv_mask(priv, 0x419ccc, 0x80000000, 0x80000000); 994 nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
993 nv_mask(priv, 0x419f80, 0x80000000, 0x80000000); 995 nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
994 nv_mask(priv, 0x419f88, 0x80000000, 0x80000000); 996 nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
995} 997}
996 998
997struct nvkm_oclass * 999const struct gf100_grctx_func
998gm107_grctx_oclass = &(struct gf100_grctx_oclass) { 1000gm107_grctx = {
999 .base.handle = NV_ENGCTX(GR, 0x08),
1000 .base.ofuncs = &(struct nvkm_ofuncs) {
1001 .ctor = gf100_gr_context_ctor,
1002 .dtor = gf100_gr_context_dtor,
1003 .init = _nvkm_gr_context_init,
1004 .fini = _nvkm_gr_context_fini,
1005 .rd32 = _nvkm_gr_context_rd32,
1006 .wr32 = _nvkm_gr_context_wr32,
1007 },
1008 .main = gm107_grctx_generate_main, 1001 .main = gm107_grctx_generate_main,
1009 .unkn = gk104_grctx_generate_unkn, 1002 .unkn = gk104_grctx_generate_unkn,
1010 .hub = gm107_grctx_pack_hub, 1003 .hub = gm107_grctx_pack_hub,
@@ -1025,4 +1018,4 @@ gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
1025 .attrib_nr = 0xaa0, 1018 .attrib_nr = 0xaa0,
1026 .alpha_nr_max = 0x1800, 1019 .alpha_nr_max = 0x1800,
1027 .alpha_nr = 0x1000, 1020 .alpha_nr = 0x1000,
1028}.base; 1021};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
index ea8e66151aa8..170cbfdbe1ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
@@ -918,17 +918,18 @@ gm204_grctx_pack_ppc[] = {
918 * PGRAPH context implementation 918 * PGRAPH context implementation
919 ******************************************************************************/ 919 ******************************************************************************/
920 920
921static void 921void
922gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv) 922gm204_grctx_generate_tpcid(struct gf100_gr *gr)
923{ 923{
924 struct nvkm_device *device = gr->base.engine.subdev.device;
924 int gpc, tpc, id; 925 int gpc, tpc, id;
925 926
926 for (tpc = 0, id = 0; tpc < 4; tpc++) { 927 for (tpc = 0, id = 0; tpc < 4; tpc++) {
927 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 928 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
928 if (tpc < priv->tpc_nr[gpc]) { 929 if (tpc < gr->tpc_nr[gpc]) {
929 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id); 930 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
930 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id); 931 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
931 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id); 932 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
932 id++; 933 id++;
933 } 934 }
934 } 935 }
@@ -936,101 +937,95 @@ gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
936} 937}
937 938
938static void 939static void
939gm204_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv) 940gm204_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
940{ 941{
941 const u32 fbp_count = nv_rd32(priv, 0x12006c); 942 struct nvkm_device *device = gr->base.engine.subdev.device;
942 nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */ 943 const u32 fbp_count = nvkm_rd32(device, 0x12006c);
943 nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */ 944 nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
945 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
944} 946}
945 947
946static void 948void
947gm204_grctx_generate_405b60(struct gf100_gr_priv *priv) 949gm204_grctx_generate_405b60(struct gf100_gr *gr)
948{ 950{
949 const u32 dist_nr = DIV_ROUND_UP(priv->tpc_total, 4); 951 struct nvkm_device *device = gr->base.engine.subdev.device;
950 u32 dist[TPC_MAX] = {}; 952 const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
953 u32 dist[TPC_MAX / 4] = {};
951 u32 gpcs[GPC_MAX] = {}; 954 u32 gpcs[GPC_MAX] = {};
952 u8 tpcnr[GPC_MAX]; 955 u8 tpcnr[GPC_MAX];
953 int tpc, gpc, i; 956 int tpc, gpc, i;
954 957
955 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 958 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
956 959
957 /* won't result in the same distribution as the binary driver where 960 /* won't result in the same distribution as the binary driver where
958 * some of the gpcs have more tpcs than others, but this shall do 961 * some of the gpcs have more tpcs than others, but this shall do
959 * for the moment. the code for earlier gpus has this issue too. 962 * for the moment. the code for earlier gpus has this issue too.
960 */ 963 */
961 for (gpc = -1, i = 0; i < priv->tpc_total; i++) { 964 for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
962 do { 965 do {
963 gpc = (gpc + 1) % priv->gpc_nr; 966 gpc = (gpc + 1) % gr->gpc_nr;
964 } while(!tpcnr[gpc]); 967 } while(!tpcnr[gpc]);
965 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; 968 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
966 969
967 dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8); 970 dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
968 gpcs[gpc] |= i << (tpc * 8); 971 gpcs[gpc] |= i << (tpc * 8);
969 } 972 }
970 973
971 for (i = 0; i < dist_nr; i++) 974 for (i = 0; i < dist_nr; i++)
972 nv_wr32(priv, 0x405b60 + (i * 4), dist[i]); 975 nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
973 for (i = 0; i < priv->gpc_nr; i++) 976 for (i = 0; i < gr->gpc_nr; i++)
974 nv_wr32(priv, 0x405ba0 + (i * 4), gpcs[i]); 977 nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
975} 978}
976 979
977void 980void
978gm204_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info) 981gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
979{ 982{
980 struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 983 struct nvkm_device *device = gr->base.engine.subdev.device;
984 const struct gf100_grctx_func *grctx = gr->func->grctx;
981 u32 tmp; 985 u32 tmp;
982 int i; 986 int i;
983 987
984 gf100_gr_mmio(priv, oclass->hub); 988 gf100_gr_mmio(gr, grctx->hub);
985 gf100_gr_mmio(priv, oclass->gpc); 989 gf100_gr_mmio(gr, grctx->gpc);
986 gf100_gr_mmio(priv, oclass->zcull); 990 gf100_gr_mmio(gr, grctx->zcull);
987 gf100_gr_mmio(priv, oclass->tpc); 991 gf100_gr_mmio(gr, grctx->tpc);
988 gf100_gr_mmio(priv, oclass->ppc); 992 gf100_gr_mmio(gr, grctx->ppc);
989 993
990 nv_wr32(priv, 0x404154, 0x00000000); 994 nvkm_wr32(device, 0x404154, 0x00000000);
991 995
992 oclass->bundle(info); 996 grctx->bundle(info);
993 oclass->pagepool(info); 997 grctx->pagepool(info);
994 oclass->attrib(info); 998 grctx->attrib(info);
995 oclass->unkn(priv); 999 grctx->unkn(gr);
996 1000
997 gm204_grctx_generate_tpcid(priv); 1001 gm204_grctx_generate_tpcid(gr);
998 gf100_grctx_generate_r406028(priv); 1002 gf100_grctx_generate_r406028(gr);
999 gk104_grctx_generate_r418bb8(priv); 1003 gk104_grctx_generate_r418bb8(gr);
1000 1004
1001 for (i = 0; i < 8; i++) 1005 for (i = 0; i < 8; i++)
1002 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000); 1006 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
1003 nv_wr32(priv, 0x406500, 0x00000000); 1007 nvkm_wr32(device, 0x406500, 0x00000000);
1004 1008
1005 nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr); 1009 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
1006 1010
1007 gm204_grctx_generate_rop_active_fbps(priv); 1011 gm204_grctx_generate_rop_active_fbps(gr);
1008 1012
1009 for (tmp = 0, i = 0; i < priv->gpc_nr; i++) 1013 for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
1010 tmp |= ((1 << priv->tpc_nr[i]) - 1) << (i * 4); 1014 tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
1011 nv_wr32(priv, 0x4041c4, tmp); 1015 nvkm_wr32(device, 0x4041c4, tmp);
1012 1016
1013 gm204_grctx_generate_405b60(priv); 1017 gm204_grctx_generate_405b60(gr);
1014 1018
1015 gf100_gr_icmd(priv, oclass->icmd); 1019 gf100_gr_icmd(gr, grctx->icmd);
1016 nv_wr32(priv, 0x404154, 0x00000800); 1020 nvkm_wr32(device, 0x404154, 0x00000800);
1017 gf100_gr_mthd(priv, oclass->mthd); 1021 gf100_gr_mthd(gr, grctx->mthd);
1018 1022
1019 nv_mask(priv, 0x418e94, 0xffffffff, 0xc4230000); 1023 nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
1020 nv_mask(priv, 0x418e4c, 0xffffffff, 0x70000000); 1024 nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
1021} 1025}
1022 1026
1023struct nvkm_oclass * 1027const struct gf100_grctx_func
1024gm204_grctx_oclass = &(struct gf100_grctx_oclass) { 1028gm204_grctx = {
1025 .base.handle = NV_ENGCTX(GR, 0x24),
1026 .base.ofuncs = &(struct nvkm_ofuncs) {
1027 .ctor = gf100_gr_context_ctor,
1028 .dtor = gf100_gr_context_dtor,
1029 .init = _nvkm_gr_context_init,
1030 .fini = _nvkm_gr_context_fini,
1031 .rd32 = _nvkm_gr_context_rd32,
1032 .wr32 = _nvkm_gr_context_wr32,
1033 },
1034 .main = gm204_grctx_generate_main, 1029 .main = gm204_grctx_generate_main,
1035 .unkn = gk104_grctx_generate_unkn, 1030 .unkn = gk104_grctx_generate_unkn,
1036 .hub = gm204_grctx_pack_hub, 1031 .hub = gm204_grctx_pack_hub,
@@ -1051,4 +1046,4 @@ gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
1051 .attrib_nr = 0x400, 1046 .attrib_nr = 0x400,
1052 .alpha_nr_max = 0x1800, 1047 .alpha_nr_max = 0x1800,
1053 .alpha_nr = 0x1000, 1048 .alpha_nr = 0x1000,
1054}.base; 1049};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
index 91ec41617943..d6be6034c2c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
@@ -49,17 +49,8 @@ gm206_grctx_pack_gpc[] = {
49 {} 49 {}
50}; 50};
51 51
52struct nvkm_oclass * 52const struct gf100_grctx_func
53gm206_grctx_oclass = &(struct gf100_grctx_oclass) { 53gm206_grctx = {
54 .base.handle = NV_ENGCTX(GR, 0x26),
55 .base.ofuncs = &(struct nvkm_ofuncs) {
56 .ctor = gf100_gr_context_ctor,
57 .dtor = gf100_gr_context_dtor,
58 .init = _nvkm_gr_context_init,
59 .fini = _nvkm_gr_context_fini,
60 .rd32 = _nvkm_gr_context_rd32,
61 .wr32 = _nvkm_gr_context_wr32,
62 },
63 .main = gm204_grctx_generate_main, 54 .main = gm204_grctx_generate_main,
64 .unkn = gk104_grctx_generate_unkn, 55 .unkn = gk104_grctx_generate_unkn,
65 .hub = gm204_grctx_pack_hub, 56 .hub = gm204_grctx_pack_hub,
@@ -80,4 +71,4 @@ gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
80 .attrib_nr = 0x400, 71 .attrib_nr = 0x400,
81 .alpha_nr_max = 0x1800, 72 .alpha_nr_max = 0x1800,
82 .alpha_nr = 0x1000, 73 .alpha_nr = 0x1000,
83}.base; 74};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
new file mode 100644
index 000000000000..670260402538
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -0,0 +1,103 @@
1/*
2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "ctxgf100.h"
23
24static void
25gm20b_grctx_generate_r406028(struct gf100_gr *gr)
26{
27 struct nvkm_device *device = gr->base.engine.subdev.device;
28 u32 tpc_per_gpc = 0;
29 int i;
30
31 for (i = 0; i < gr->gpc_nr; i++)
32 tpc_per_gpc |= gr->tpc_nr[i] << (4 * i);
33
34 nvkm_wr32(device, 0x406028, tpc_per_gpc);
35 nvkm_wr32(device, 0x405870, tpc_per_gpc);
36}
37
38static void
39gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
40{
41 struct nvkm_device *device = gr->base.engine.subdev.device;
42 const struct gf100_grctx_func *grctx = gr->func->grctx;
43 int idle_timeout_save;
44 int i, tmp;
45
46 gf100_gr_mmio(gr, gr->fuc_sw_ctx);
47
48 gf100_gr_wait_idle(gr);
49
50 idle_timeout_save = nvkm_rd32(device, 0x404154);
51 nvkm_wr32(device, 0x404154, 0x00000000);
52
53 grctx->attrib(info);
54
55 grctx->unkn(gr);
56
57 gm204_grctx_generate_tpcid(gr);
58 gm20b_grctx_generate_r406028(gr);
59 gk104_grctx_generate_r418bb8(gr);
60
61 for (i = 0; i < 8; i++)
62 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
63
64 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
65
66 gk104_grctx_generate_rop_active_fbps(gr);
67 nvkm_wr32(device, 0x408908, nvkm_rd32(device, 0x410108) | 0x80000000);
68
69 for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
70 tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
71 nvkm_wr32(device, 0x4041c4, tmp);
72
73 gm204_grctx_generate_405b60(gr);
74
75 gf100_gr_wait_idle(gr);
76
77 nvkm_wr32(device, 0x404154, idle_timeout_save);
78 gf100_gr_wait_idle(gr);
79
80 gf100_gr_mthd(gr, gr->fuc_method);
81 gf100_gr_wait_idle(gr);
82
83 gf100_gr_icmd(gr, gr->fuc_bundle);
84 grctx->pagepool(info);
85 grctx->bundle(info);
86}
87
88const struct gf100_grctx_func
89gm20b_grctx = {
90 .main = gm20b_grctx_generate_main,
91 .unkn = gk104_grctx_generate_unkn,
92 .bundle = gm107_grctx_generate_bundle,
93 .bundle_size = 0x1800,
94 .bundle_min_gpm_fifo_depth = 0x182,
95 .bundle_token_limit = 0x1c0,
96 .pagepool = gm107_grctx_generate_pagepool,
97 .pagepool_size = 0x8000,
98 .attrib = gm107_grctx_generate_attrib,
99 .attrib_nr_max = 0x600,
100 .attrib_nr = 0x400,
101 .alpha_nr_max = 0xc00,
102 .alpha_nr = 0x800,
103};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
index dc31462afe65..80a6b017af64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
@@ -111,7 +111,6 @@
111 111
112#include "ctxnv40.h" 112#include "ctxnv40.h"
113#include "nv40.h" 113#include "nv40.h"
114#include <core/device.h>
115 114
116/* TODO: 115/* TODO:
117 * - get vs count from 0x1540 116 * - get vs count from 0x1540
@@ -583,13 +582,13 @@ nv40_gr_construct_shader(struct nvkm_grctx *ctx)
583 582
584 offset += 0x0280/4; 583 offset += 0x0280/4;
585 for (i = 0; i < 16; i++, offset += 2) 584 for (i = 0; i < 16; i++, offset += 2)
586 nv_wo32(obj, offset * 4, 0x3f800000); 585 nvkm_wo32(obj, offset * 4, 0x3f800000);
587 586
588 for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { 587 for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
589 for (i = 0; i < vs_nr_b0 * 6; i += 6) 588 for (i = 0; i < vs_nr_b0 * 6; i += 6)
590 nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001); 589 nvkm_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
591 for (i = 0; i < vs_nr_b1 * 4; i += 4) 590 for (i = 0; i < vs_nr_b1 * 4; i += 4)
592 nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000); 591 nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
593 } 592 }
594} 593}
595 594
@@ -675,7 +674,7 @@ nv40_grctx_init(struct nvkm_device *device, u32 *size)
675 struct nvkm_grctx ctx = { 674 struct nvkm_grctx ctx = {
676 .device = device, 675 .device = device,
677 .mode = NVKM_GRCTX_PROG, 676 .mode = NVKM_GRCTX_PROG,
678 .data = ctxprog, 677 .ucode = ctxprog,
679 .ctxprog_max = 256, 678 .ctxprog_max = 256,
680 }; 679 };
681 680
@@ -684,9 +683,9 @@ nv40_grctx_init(struct nvkm_device *device, u32 *size)
684 683
685 nv40_grctx_generate(&ctx); 684 nv40_grctx_generate(&ctx);
686 685
687 nv_wr32(device, 0x400324, 0); 686 nvkm_wr32(device, 0x400324, 0);
688 for (i = 0; i < ctx.ctxprog_len; i++) 687 for (i = 0; i < ctx.ctxprog_len; i++)
689 nv_wr32(device, 0x400328, ctxprog[i]); 688 nvkm_wr32(device, 0x400328, ctxprog[i]);
690 *size = ctx.ctxvals_pos * 4; 689 *size = ctx.ctxvals_pos * 4;
691 690
692 kfree(ctxprog); 691 kfree(ctxprog);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index 8a89961956af..50e808e9f926 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -9,7 +9,8 @@ struct nvkm_grctx {
9 NVKM_GRCTX_PROG, 9 NVKM_GRCTX_PROG,
10 NVKM_GRCTX_VALS 10 NVKM_GRCTX_VALS
11 } mode; 11 } mode;
12 void *data; 12 u32 *ucode;
13 struct nvkm_gpuobj *data;
13 14
14 u32 ctxprog_max; 15 u32 ctxprog_max;
15 u32 ctxprog_len; 16 u32 ctxprog_len;
@@ -22,7 +23,7 @@ struct nvkm_grctx {
22static inline void 23static inline void
23cp_out(struct nvkm_grctx *ctx, u32 inst) 24cp_out(struct nvkm_grctx *ctx, u32 inst)
24{ 25{
25 u32 *ctxprog = ctx->data; 26 u32 *ctxprog = ctx->ucode;
26 27
27 if (ctx->mode != NVKM_GRCTX_PROG) 28 if (ctx->mode != NVKM_GRCTX_PROG)
28 return; 29 return;
@@ -56,7 +57,7 @@ cp_ctx(struct nvkm_grctx *ctx, u32 reg, u32 length)
56static inline void 57static inline void
57cp_name(struct nvkm_grctx *ctx, int name) 58cp_name(struct nvkm_grctx *ctx, int name)
58{ 59{
59 u32 *ctxprog = ctx->data; 60 u32 *ctxprog = ctx->ucode;
60 int i; 61 int i;
61 62
62 if (ctx->mode != NVKM_GRCTX_PROG) 63 if (ctx->mode != NVKM_GRCTX_PROG)
@@ -124,6 +125,6 @@ gr_def(struct nvkm_grctx *ctx, u32 reg, u32 val)
124 reg = (reg - 0x00400000) / 4; 125 reg = (reg - 0x00400000) / 4;
125 reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; 126 reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
126 127
127 nv_wo32(ctx->data, reg * 4, val); 128 nvkm_wo32(ctx->data, reg * 4, val);
128} 129}
129#endif 130#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 9c9528d2cd90..1e13278cf306 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -107,7 +107,6 @@
107 107
108#include "ctxnv40.h" 108#include "ctxnv40.h"
109 109
110#include <core/device.h>
111#include <subdev/fb.h> 110#include <subdev/fb.h>
112 111
113#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) 112#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
@@ -269,7 +268,7 @@ nv50_grctx_init(struct nvkm_device *device, u32 *size)
269 struct nvkm_grctx ctx = { 268 struct nvkm_grctx ctx = {
270 .device = device, 269 .device = device,
271 .mode = NVKM_GRCTX_PROG, 270 .mode = NVKM_GRCTX_PROG,
272 .data = ctxprog, 271 .ucode = ctxprog,
273 .ctxprog_max = 512, 272 .ctxprog_max = 512,
274 }; 273 };
275 274
@@ -277,9 +276,9 @@ nv50_grctx_init(struct nvkm_device *device, u32 *size)
277 return -ENOMEM; 276 return -ENOMEM;
278 nv50_grctx_generate(&ctx); 277 nv50_grctx_generate(&ctx);
279 278
280 nv_wr32(device, 0x400324, 0); 279 nvkm_wr32(device, 0x400324, 0);
281 for (i = 0; i < ctx.ctxprog_len; i++) 280 for (i = 0; i < ctx.ctxprog_len; i++)
282 nv_wr32(device, 0x400328, ctxprog[i]); 281 nvkm_wr32(device, 0x400328, ctxprog[i]);
283 *size = ctx.ctxvals_pos * 4; 282 *size = ctx.ctxvals_pos * 4;
284 kfree(ctxprog); 283 kfree(ctxprog);
285 return 0; 284 return 0;
@@ -299,7 +298,7 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
299 struct nvkm_device *device = ctx->device; 298 struct nvkm_device *device = ctx->device;
300 int i, j; 299 int i, j;
301 int offset, base; 300 int offset, base;
302 u32 units = nv_rd32 (ctx->device, 0x1540); 301 u32 units = nvkm_rd32(device, 0x1540);
303 302
304 /* 0800: DISPATCH */ 303 /* 0800: DISPATCH */
305 cp_ctx(ctx, 0x400808, 7); 304 cp_ctx(ctx, 0x400808, 7);
@@ -570,7 +569,7 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
570 else if (device->chipset < 0xa0) 569 else if (device->chipset < 0xa0)
571 gr_def(ctx, 0x407d08, 0x00390040); 570 gr_def(ctx, 0x407d08, 0x00390040);
572 else { 571 else {
573 if (nvkm_fb(device)->ram->type != NV_MEM_TYPE_GDDR5) 572 if (device->fb->ram->type != NVKM_RAM_TYPE_GDDR5)
574 gr_def(ctx, 0x407d08, 0x003d0040); 573 gr_def(ctx, 0x407d08, 0x003d0040);
575 else 574 else
576 gr_def(ctx, 0x407d08, 0x003c0040); 575 gr_def(ctx, 0x407d08, 0x003c0040);
@@ -784,9 +783,10 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
784static void 783static void
785dd_emit(struct nvkm_grctx *ctx, int num, u32 val) { 784dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
786 int i; 785 int i;
787 if (val && ctx->mode == NVKM_GRCTX_VALS) 786 if (val && ctx->mode == NVKM_GRCTX_VALS) {
788 for (i = 0; i < num; i++) 787 for (i = 0; i < num; i++)
789 nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); 788 nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
789 }
790 ctx->ctxvals_pos += num; 790 ctx->ctxvals_pos += num;
791} 791}
792 792
@@ -1156,9 +1156,10 @@ nv50_gr_construct_mmio_ddata(struct nvkm_grctx *ctx)
1156static void 1156static void
1157xf_emit(struct nvkm_grctx *ctx, int num, u32 val) { 1157xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
1158 int i; 1158 int i;
1159 if (val && ctx->mode == NVKM_GRCTX_VALS) 1159 if (val && ctx->mode == NVKM_GRCTX_VALS) {
1160 for (i = 0; i < num; i++) 1160 for (i = 0; i < num; i++)
1161 nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); 1161 nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
1162 }
1162 ctx->ctxvals_pos += num << 3; 1163 ctx->ctxvals_pos += num << 3;
1163} 1164}
1164 1165
@@ -1190,7 +1191,7 @@ nv50_gr_construct_xfer1(struct nvkm_grctx *ctx)
1190 int i; 1191 int i;
1191 int offset; 1192 int offset;
1192 int size = 0; 1193 int size = 0;
1193 u32 units = nv_rd32 (ctx->device, 0x1540); 1194 u32 units = nvkm_rd32(device, 0x1540);
1194 1195
1195 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; 1196 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
1196 ctx->ctxvals_base = offset; 1197 ctx->ctxvals_base = offset;
@@ -3273,7 +3274,7 @@ nv50_gr_construct_xfer2(struct nvkm_grctx *ctx)
3273 struct nvkm_device *device = ctx->device; 3274 struct nvkm_device *device = ctx->device;
3274 int i; 3275 int i;
3275 u32 offset; 3276 u32 offset;
3276 u32 units = nv_rd32 (ctx->device, 0x1540); 3277 u32 units = nvkm_rd32(device, 0x1540);
3277 int size = 0; 3278 int size = 0;
3278 3279
3279 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; 3280 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
new file mode 100644
index 000000000000..ce913300539f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25
26#include <subdev/timer.h>
27
28static const struct nvkm_bitfield nv50_gr_status[] = {
29 { 0x00000001, "BUSY" }, /* set when any bit is set */
30 { 0x00000002, "DISPATCH" },
31 { 0x00000004, "UNK2" },
32 { 0x00000008, "UNK3" },
33 { 0x00000010, "UNK4" },
34 { 0x00000020, "UNK5" },
35 { 0x00000040, "M2MF" },
36 { 0x00000080, "UNK7" },
37 { 0x00000100, "CTXPROG" },
38 { 0x00000200, "VFETCH" },
39 { 0x00000400, "CCACHE_PREGEOM" },
40 { 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
41 { 0x00001000, "VCLIP" },
42 { 0x00002000, "RATTR_APLANE" },
43 { 0x00004000, "TRAST" },
44 { 0x00008000, "CLIPID" },
45 { 0x00010000, "ZCULL" },
46 { 0x00020000, "ENG2D" },
47 { 0x00040000, "RMASK" },
48 { 0x00080000, "TPC_RAST" },
49 { 0x00100000, "TPC_PROP" },
50 { 0x00200000, "TPC_TEX" },
51 { 0x00400000, "TPC_GEOM" },
52 { 0x00800000, "TPC_MP" },
53 { 0x01000000, "ROP" },
54 {}
55};
56
57static const struct nvkm_bitfield
58nv50_gr_vstatus_0[] = {
59 { 0x01, "VFETCH" },
60 { 0x02, "CCACHE" },
61 { 0x04, "PREGEOM" },
62 { 0x08, "POSTGEOM" },
63 { 0x10, "VATTR" },
64 { 0x20, "STRMOUT" },
65 { 0x40, "VCLIP" },
66 {}
67};
68
69static const struct nvkm_bitfield
70nv50_gr_vstatus_1[] = {
71 { 0x01, "TPC_RAST" },
72 { 0x02, "TPC_PROP" },
73 { 0x04, "TPC_TEX" },
74 { 0x08, "TPC_GEOM" },
75 { 0x10, "TPC_MP" },
76 {}
77};
78
79static const struct nvkm_bitfield
80nv50_gr_vstatus_2[] = {
81 { 0x01, "RATTR" },
82 { 0x02, "APLANE" },
83 { 0x04, "TRAST" },
84 { 0x08, "CLIPID" },
85 { 0x10, "ZCULL" },
86 { 0x20, "ENG2D" },
87 { 0x40, "RMASK" },
88 { 0x80, "ROP" },
89 {}
90};
91
92static void
93nvkm_gr_vstatus_print(struct nv50_gr *gr, int r,
94 const struct nvkm_bitfield *units, u32 status)
95{
96 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
97 u32 stat = status;
98 u8 mask = 0x00;
99 char msg[64];
100 int i;
101
102 for (i = 0; units[i].name && status; i++) {
103 if ((status & 7) == 1)
104 mask |= (1 << i);
105 status >>= 3;
106 }
107
108 nvkm_snprintbf(msg, sizeof(msg), units, mask);
109 nvkm_error(subdev, "PGRAPH_VSTATUS%d: %08x [%s]\n", r, stat, msg);
110}
111
112int
113g84_gr_tlb_flush(struct nvkm_gr *base)
114{
115 struct nv50_gr *gr = nv50_gr(base);
116 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
117 struct nvkm_device *device = subdev->device;
118 struct nvkm_timer *tmr = device->timer;
119 bool idle, timeout = false;
120 unsigned long flags;
121 char status[128];
122 u64 start;
123 u32 tmp;
124
125 spin_lock_irqsave(&gr->lock, flags);
126 nvkm_mask(device, 0x400500, 0x00000001, 0x00000000);
127
128 start = nvkm_timer_read(tmr);
129 do {
130 idle = true;
131
132 for (tmp = nvkm_rd32(device, 0x400380); tmp && idle; tmp >>= 3) {
133 if ((tmp & 7) == 1)
134 idle = false;
135 }
136
137 for (tmp = nvkm_rd32(device, 0x400384); tmp && idle; tmp >>= 3) {
138 if ((tmp & 7) == 1)
139 idle = false;
140 }
141
142 for (tmp = nvkm_rd32(device, 0x400388); tmp && idle; tmp >>= 3) {
143 if ((tmp & 7) == 1)
144 idle = false;
145 }
146 } while (!idle &&
147 !(timeout = nvkm_timer_read(tmr) - start > 2000000000));
148
149 if (timeout) {
150 nvkm_error(subdev, "PGRAPH TLB flush idle timeout fail\n");
151
152 tmp = nvkm_rd32(device, 0x400700);
153 nvkm_snprintbf(status, sizeof(status), nv50_gr_status, tmp);
154 nvkm_error(subdev, "PGRAPH_STATUS %08x [%s]\n", tmp, status);
155
156 nvkm_gr_vstatus_print(gr, 0, nv50_gr_vstatus_0,
157 nvkm_rd32(device, 0x400380));
158 nvkm_gr_vstatus_print(gr, 1, nv50_gr_vstatus_1,
159 nvkm_rd32(device, 0x400384));
160 nvkm_gr_vstatus_print(gr, 2, nv50_gr_vstatus_2,
161 nvkm_rd32(device, 0x400388));
162 }
163
164
165 nvkm_wr32(device, 0x100c80, 0x00000001);
166 nvkm_msec(device, 2000,
167 if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
168 break;
169 );
170 nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
171 spin_unlock_irqrestore(&gr->lock, flags);
172 return timeout ? -EBUSY : 0;
173}
174
175static const struct nvkm_gr_func
176g84_gr = {
177 .init = nv50_gr_init,
178 .intr = nv50_gr_intr,
179 .chan_new = nv50_gr_chan_new,
180 .tlb_flush = g84_gr_tlb_flush,
181 .units = nv50_gr_units,
182 .sclass = {
183 { -1, -1, 0x0030, &nv50_gr_object },
184 { -1, -1, 0x502d, &nv50_gr_object },
185 { -1, -1, 0x5039, &nv50_gr_object },
186 { -1, -1, 0x50c0, &nv50_gr_object },
187 { -1, -1, 0x8297, &nv50_gr_object },
188 {}
189 }
190};
191
192int
193g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
194{
195 return nv50_gr_new_(&g84_gr, device, index, pgr);
196}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ca11ddb6ed46..f1358a564e3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,13 +26,12 @@
26#include "fuc/os.h" 26#include "fuc/os.h"
27 27
28#include <core/client.h> 28#include <core/client.h>
29#include <core/device.h>
30#include <core/handle.h>
31#include <core/option.h> 29#include <core/option.h>
32#include <engine/fifo.h>
33#include <subdev/fb.h> 30#include <subdev/fb.h>
34#include <subdev/mc.h> 31#include <subdev/mc.h>
32#include <subdev/pmu.h>
35#include <subdev/timer.h> 33#include <subdev/timer.h>
34#include <engine/fifo.h>
36 35
37#include <nvif/class.h> 36#include <nvif/class.h>
38#include <nvif/unpack.h> 37#include <nvif/unpack.h>
@@ -42,35 +41,36 @@
42 ******************************************************************************/ 41 ******************************************************************************/
43 42
44static void 43static void
45gf100_gr_zbc_clear_color(struct gf100_gr_priv *priv, int zbc) 44gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
46{ 45{
47 if (priv->zbc_color[zbc].format) { 46 struct nvkm_device *device = gr->base.engine.subdev.device;
48 nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]); 47 if (gr->zbc_color[zbc].format) {
49 nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]); 48 nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]);
50 nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]); 49 nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]);
51 nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]); 50 nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]);
52 } 51 nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]);
53 nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format); 52 }
54 nv_wr32(priv, 0x405820, zbc); 53 nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format);
55 nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ 54 nvkm_wr32(device, 0x405820, zbc);
55 nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
56} 56}
57 57
58static int 58static int
59gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format, 59gf100_gr_zbc_color_get(struct gf100_gr *gr, int format,
60 const u32 ds[4], const u32 l2[4]) 60 const u32 ds[4], const u32 l2[4])
61{ 61{
62 struct nvkm_ltc *ltc = nvkm_ltc(priv); 62 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
63 int zbc = -ENOSPC, i; 63 int zbc = -ENOSPC, i;
64 64
65 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 65 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
66 if (priv->zbc_color[i].format) { 66 if (gr->zbc_color[i].format) {
67 if (priv->zbc_color[i].format != format) 67 if (gr->zbc_color[i].format != format)
68 continue; 68 continue;
69 if (memcmp(priv->zbc_color[i].ds, ds, sizeof( 69 if (memcmp(gr->zbc_color[i].ds, ds, sizeof(
70 priv->zbc_color[i].ds))) 70 gr->zbc_color[i].ds)))
71 continue; 71 continue;
72 if (memcmp(priv->zbc_color[i].l2, l2, sizeof( 72 if (memcmp(gr->zbc_color[i].l2, l2, sizeof(
73 priv->zbc_color[i].l2))) { 73 gr->zbc_color[i].l2))) {
74 WARN_ON(1); 74 WARN_ON(1);
75 return -EINVAL; 75 return -EINVAL;
76 } 76 }
@@ -83,38 +83,39 @@ gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format,
83 if (zbc < 0) 83 if (zbc < 0)
84 return zbc; 84 return zbc;
85 85
86 memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds)); 86 memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds));
87 memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2)); 87 memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2));
88 priv->zbc_color[zbc].format = format; 88 gr->zbc_color[zbc].format = format;
89 ltc->zbc_color_get(ltc, zbc, l2); 89 nvkm_ltc_zbc_color_get(ltc, zbc, l2);
90 gf100_gr_zbc_clear_color(priv, zbc); 90 gf100_gr_zbc_clear_color(gr, zbc);
91 return zbc; 91 return zbc;
92} 92}
93 93
94static void 94static void
95gf100_gr_zbc_clear_depth(struct gf100_gr_priv *priv, int zbc) 95gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
96{ 96{
97 if (priv->zbc_depth[zbc].format) 97 struct nvkm_device *device = gr->base.engine.subdev.device;
98 nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds); 98 if (gr->zbc_depth[zbc].format)
99 nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format); 99 nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds);
100 nv_wr32(priv, 0x405820, zbc); 100 nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format);
101 nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ 101 nvkm_wr32(device, 0x405820, zbc);
102 nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
102} 103}
103 104
104static int 105static int
105gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format, 106gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
106 const u32 ds, const u32 l2) 107 const u32 ds, const u32 l2)
107{ 108{
108 struct nvkm_ltc *ltc = nvkm_ltc(priv); 109 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
109 int zbc = -ENOSPC, i; 110 int zbc = -ENOSPC, i;
110 111
111 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 112 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
112 if (priv->zbc_depth[i].format) { 113 if (gr->zbc_depth[i].format) {
113 if (priv->zbc_depth[i].format != format) 114 if (gr->zbc_depth[i].format != format)
114 continue; 115 continue;
115 if (priv->zbc_depth[i].ds != ds) 116 if (gr->zbc_depth[i].ds != ds)
116 continue; 117 continue;
117 if (priv->zbc_depth[i].l2 != l2) { 118 if (gr->zbc_depth[i].l2 != l2) {
118 WARN_ON(1); 119 WARN_ON(1);
119 return -EINVAL; 120 return -EINVAL;
120 } 121 }
@@ -127,11 +128,11 @@ gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
127 if (zbc < 0) 128 if (zbc < 0)
128 return zbc; 129 return zbc;
129 130
130 priv->zbc_depth[zbc].format = format; 131 gr->zbc_depth[zbc].format = format;
131 priv->zbc_depth[zbc].ds = ds; 132 gr->zbc_depth[zbc].ds = ds;
132 priv->zbc_depth[zbc].l2 = l2; 133 gr->zbc_depth[zbc].l2 = l2;
133 ltc->zbc_depth_get(ltc, zbc, l2); 134 nvkm_ltc_zbc_depth_get(ltc, zbc, l2);
134 gf100_gr_zbc_clear_depth(priv, zbc); 135 gf100_gr_zbc_clear_depth(gr, zbc);
135 return zbc; 136 return zbc;
136} 137}
137 138
@@ -142,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
142static int 143static int
143gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 144gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
144{ 145{
145 struct gf100_gr_priv *priv = (void *)object->engine; 146 struct gf100_gr *gr = (void *)object->engine;
146 union { 147 union {
147 struct fermi_a_zbc_color_v0 v0; 148 struct fermi_a_zbc_color_v0 v0;
148 } *args = data; 149 } *args = data;
@@ -169,7 +170,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
169 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8: 170 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
170 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10: 171 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
171 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11: 172 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
172 ret = gf100_gr_zbc_color_get(priv, args->v0.format, 173 ret = gf100_gr_zbc_color_get(gr, args->v0.format,
173 args->v0.ds, 174 args->v0.ds,
174 args->v0.l2); 175 args->v0.l2);
175 if (ret >= 0) { 176 if (ret >= 0) {
@@ -188,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
188static int 189static int
189gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 190gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
190{ 191{
191 struct gf100_gr_priv *priv = (void *)object->engine; 192 struct gf100_gr *gr = (void *)object->engine;
192 union { 193 union {
193 struct fermi_a_zbc_depth_v0 v0; 194 struct fermi_a_zbc_depth_v0 v0;
194 } *args = data; 195 } *args = data;
@@ -197,7 +198,7 @@ gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
197 if (nvif_unpack(args->v0, 0, 0, false)) { 198 if (nvif_unpack(args->v0, 0, 0, false)) {
198 switch (args->v0.format) { 199 switch (args->v0.format) {
199 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: 200 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
200 ret = gf100_gr_zbc_depth_get(priv, args->v0.format, 201 ret = gf100_gr_zbc_depth_get(gr, args->v0.format,
201 args->v0.ds, 202 args->v0.ds,
202 args->v0.l2); 203 args->v0.l2);
203 return (ret >= 0) ? 0 : -ENOSPC; 204 return (ret >= 0) ? 0 : -ENOSPC;
@@ -223,106 +224,176 @@ gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
223 return -EINVAL; 224 return -EINVAL;
224} 225}
225 226
226struct nvkm_ofuncs 227const struct nvkm_object_func
227gf100_fermi_ofuncs = { 228gf100_fermi = {
228 .ctor = _nvkm_object_ctor,
229 .dtor = nvkm_object_destroy,
230 .init = nvkm_object_init,
231 .fini = nvkm_object_fini,
232 .mthd = gf100_fermi_mthd, 229 .mthd = gf100_fermi_mthd,
233}; 230};
234 231
235static int 232static void
236gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd, 233gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data)
237 void *pdata, u32 size)
238{ 234{
239 struct gf100_gr_priv *priv = (void *)object->engine; 235 nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000);
240 if (size >= sizeof(u32)) { 236 nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000);
241 u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000; 237}
242 nv_wr32(priv, 0x419e44, data); 238
243 nv_wr32(priv, 0x419e4c, data); 239static bool
244 return 0; 240gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
241{
242 switch (class & 0x00ff) {
243 case 0x97:
244 case 0xc0:
245 switch (mthd) {
246 case 0x1528:
247 gf100_gr_mthd_set_shader_exceptions(device, data);
248 return true;
249 default:
250 break;
251 }
252 break;
253 default:
254 break;
245 } 255 }
246 return -EINVAL; 256 return false;
247} 257}
248 258
249struct nvkm_omthds 259static int
250gf100_gr_9097_omthds[] = { 260gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
251 { 0x1528, 0x1528, gf100_gr_set_shader_exceptions }, 261{
252 {} 262 struct gf100_gr *gr = gf100_gr(base);
253}; 263 int c = 0;
254 264
255struct nvkm_omthds 265 while (gr->func->sclass[c].oclass) {
256gf100_gr_90c0_omthds[] = { 266 if (c++ == index) {
257 { 0x1528, 0x1528, gf100_gr_set_shader_exceptions }, 267 *sclass = gr->func->sclass[index];
258 {} 268 return index;
259}; 269 }
270 }
260 271
261struct nvkm_oclass 272 return c;
262gf100_gr_sclass[] = { 273}
263 { FERMI_TWOD_A, &nvkm_object_ofuncs },
264 { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
265 { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
266 { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
267 {}
268};
269 274
270/******************************************************************************* 275/*******************************************************************************
271 * PGRAPH context 276 * PGRAPH context
272 ******************************************************************************/ 277 ******************************************************************************/
273 278
274int 279static int
275gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 280gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
276 struct nvkm_oclass *oclass, void *args, u32 size, 281 int align, struct nvkm_gpuobj **pgpuobj)
277 struct nvkm_object **pobject)
278{ 282{
279 struct nvkm_vm *vm = nvkm_client(parent)->vm; 283 struct gf100_gr_chan *chan = gf100_gr_chan(object);
280 struct gf100_gr_priv *priv = (void *)engine; 284 struct gf100_gr *gr = chan->gr;
281 struct gf100_gr_data *data = priv->mmio_data;
282 struct gf100_gr_mmio *mmio = priv->mmio_list;
283 struct gf100_gr_chan *chan;
284 int ret, i; 285 int ret, i;
285 286
286 /* allocate memory for context, and fill with default values */ 287 ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
287 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 288 align, false, parent, pgpuobj);
288 priv->size, 0x100,
289 NVOBJ_FLAG_ZERO_ALLOC, &chan);
290 *pobject = nv_object(chan);
291 if (ret) 289 if (ret)
292 return ret; 290 return ret;
293 291
292 nvkm_kmap(*pgpuobj);
293 for (i = 0; i < gr->size; i += 4)
294 nvkm_wo32(*pgpuobj, i, gr->data[i / 4]);
295
296 if (!gr->firmware) {
297 nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
298 nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
299 } else {
300 nvkm_wo32(*pgpuobj, 0xf4, 0);
301 nvkm_wo32(*pgpuobj, 0xf8, 0);
302 nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
303 nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
304 nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
305 nvkm_wo32(*pgpuobj, 0x1c, 1);
306 nvkm_wo32(*pgpuobj, 0x20, 0);
307 nvkm_wo32(*pgpuobj, 0x28, 0);
308 nvkm_wo32(*pgpuobj, 0x2c, 0);
309 }
310 nvkm_done(*pgpuobj);
311 return 0;
312}
313
314static void *
315gf100_gr_chan_dtor(struct nvkm_object *object)
316{
317 struct gf100_gr_chan *chan = gf100_gr_chan(object);
318 int i;
319
320 for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
321 if (chan->data[i].vma.node) {
322 nvkm_vm_unmap(&chan->data[i].vma);
323 nvkm_vm_put(&chan->data[i].vma);
324 }
325 nvkm_memory_del(&chan->data[i].mem);
326 }
327
328 if (chan->mmio_vma.node) {
329 nvkm_vm_unmap(&chan->mmio_vma);
330 nvkm_vm_put(&chan->mmio_vma);
331 }
332 nvkm_memory_del(&chan->mmio);
333 return chan;
334}
335
336static const struct nvkm_object_func
337gf100_gr_chan = {
338 .dtor = gf100_gr_chan_dtor,
339 .bind = gf100_gr_chan_bind,
340};
341
342static int
343gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
344 const struct nvkm_oclass *oclass,
345 struct nvkm_object **pobject)
346{
347 struct gf100_gr *gr = gf100_gr(base);
348 struct gf100_gr_data *data = gr->mmio_data;
349 struct gf100_gr_mmio *mmio = gr->mmio_list;
350 struct gf100_gr_chan *chan;
351 struct nvkm_device *device = gr->base.engine.subdev.device;
352 int ret, i;
353
354 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
355 return -ENOMEM;
356 nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
357 chan->gr = gr;
358 *pobject = &chan->object;
359
294 /* allocate memory for a "mmio list" buffer that's used by the HUB 360 /* allocate memory for a "mmio list" buffer that's used by the HUB
295 * fuc to modify some per-context register settings on first load 361 * fuc to modify some per-context register settings on first load
296 * of the context. 362 * of the context.
297 */ 363 */
298 ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0, 364 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100,
299 &chan->mmio); 365 false, &chan->mmio);
300 if (ret) 366 if (ret)
301 return ret; 367 return ret;
302 368
303 ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm, 369 ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
304 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, 370 NV_MEM_ACCESS_SYS, &chan->mmio_vma);
305 &chan->mmio_vma);
306 if (ret) 371 if (ret)
307 return ret; 372 return ret;
308 373
374 nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
375
309 /* allocate buffers referenced by mmio list */ 376 /* allocate buffers referenced by mmio list */
310 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) { 377 for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
311 ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size, 378 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
312 data->align, 0, &chan->data[i].mem); 379 data->size, data->align, false,
380 &chan->data[i].mem);
313 if (ret) 381 if (ret)
314 return ret; 382 return ret;
315 383
316 ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access, 384 ret = nvkm_vm_get(fifoch->vm,
317 &chan->data[i].vma); 385 nvkm_memory_size(chan->data[i].mem), 12,
386 data->access, &chan->data[i].vma);
318 if (ret) 387 if (ret)
319 return ret; 388 return ret;
320 389
390 nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
321 data++; 391 data++;
322 } 392 }
323 393
324 /* finally, fill in the mmio list and point the context at it */ 394 /* finally, fill in the mmio list and point the context at it */
325 for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) { 395 nvkm_kmap(chan->mmio);
396 for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) {
326 u32 addr = mmio->addr; 397 u32 addr = mmio->addr;
327 u32 data = mmio->data; 398 u32 data = mmio->data;
328 399
@@ -331,49 +402,14 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
331 data |= info >> mmio->shift; 402 data |= info >> mmio->shift;
332 } 403 }
333 404
334 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); 405 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
335 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data); 406 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
336 mmio++; 407 mmio++;
337 } 408 }
338 409 nvkm_done(chan->mmio);
339 for (i = 0; i < priv->size; i += 4)
340 nv_wo32(chan, i, priv->data[i / 4]);
341
342 if (!priv->firmware) {
343 nv_wo32(chan, 0x00, chan->mmio_nr / 2);
344 nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
345 } else {
346 nv_wo32(chan, 0xf4, 0);
347 nv_wo32(chan, 0xf8, 0);
348 nv_wo32(chan, 0x10, chan->mmio_nr / 2);
349 nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
350 nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
351 nv_wo32(chan, 0x1c, 1);
352 nv_wo32(chan, 0x20, 0);
353 nv_wo32(chan, 0x28, 0);
354 nv_wo32(chan, 0x2c, 0);
355 }
356
357 return 0; 410 return 0;
358} 411}
359 412
360void
361gf100_gr_context_dtor(struct nvkm_object *object)
362{
363 struct gf100_gr_chan *chan = (void *)object;
364 int i;
365
366 for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
367 nvkm_gpuobj_unmap(&chan->data[i].vma);
368 nvkm_gpuobj_ref(NULL, &chan->data[i].mem);
369 }
370
371 nvkm_gpuobj_unmap(&chan->mmio_vma);
372 nvkm_gpuobj_ref(NULL, &chan->mmio);
373
374 nvkm_gr_context_destroy(&chan->base);
375}
376
377/******************************************************************************* 413/*******************************************************************************
378 * PGRAPH register lists 414 * PGRAPH register lists
379 ******************************************************************************/ 415 ******************************************************************************/
@@ -635,7 +671,7 @@ gf100_gr_pack_mmio[] = {
635 ******************************************************************************/ 671 ******************************************************************************/
636 672
637void 673void
638gf100_gr_zbc_init(struct gf100_gr_priv *priv) 674gf100_gr_zbc_init(struct gf100_gr *gr)
639{ 675{
640 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 676 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 677 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
@@ -645,22 +681,22 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
645 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 681 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
646 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 682 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
647 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; 683 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
648 struct nvkm_ltc *ltc = nvkm_ltc(priv); 684 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
649 int index; 685 int index;
650 686
651 if (!priv->zbc_color[0].format) { 687 if (!gr->zbc_color[0].format) {
652 gf100_gr_zbc_color_get(priv, 1, & zero[0], &zero[4]); 688 gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]);
653 gf100_gr_zbc_color_get(priv, 2, & one[0], &one[4]); 689 gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]);
654 gf100_gr_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]); 690 gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]);
655 gf100_gr_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]); 691 gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]);
656 gf100_gr_zbc_depth_get(priv, 1, 0x00000000, 0x00000000); 692 gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000);
657 gf100_gr_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000); 693 gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000);
658 } 694 }
659 695
660 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 696 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
661 gf100_gr_zbc_clear_color(priv, index); 697 gf100_gr_zbc_clear_color(gr, index);
662 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 698 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
663 gf100_gr_zbc_clear_depth(priv, index); 699 gf100_gr_zbc_clear_depth(gr, index);
664} 700}
665 701
666/** 702/**
@@ -669,8 +705,10 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
669 * progress. 705 * progress.
670 */ 706 */
671int 707int
672gf100_gr_wait_idle(struct gf100_gr_priv *priv) 708gf100_gr_wait_idle(struct gf100_gr *gr)
673{ 709{
710 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
711 struct nvkm_device *device = subdev->device;
674 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000); 712 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
675 bool gr_enabled, ctxsw_active, gr_busy; 713 bool gr_enabled, ctxsw_active, gr_busy;
676 714
@@ -679,24 +717,26 @@ gf100_gr_wait_idle(struct gf100_gr_priv *priv)
679 * required to make sure FIFO_ENGINE_STATUS (0x2640) is 717 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
680 * up-to-date 718 * up-to-date
681 */ 719 */
682 nv_rd32(priv, 0x400700); 720 nvkm_rd32(device, 0x400700);
683 721
684 gr_enabled = nv_rd32(priv, 0x200) & 0x1000; 722 gr_enabled = nvkm_rd32(device, 0x200) & 0x1000;
685 ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000; 723 ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000;
686 gr_busy = nv_rd32(priv, 0x40060c) & 0x1; 724 gr_busy = nvkm_rd32(device, 0x40060c) & 0x1;
687 725
688 if (!gr_enabled || (!gr_busy && !ctxsw_active)) 726 if (!gr_enabled || (!gr_busy && !ctxsw_active))
689 return 0; 727 return 0;
690 } while (time_before(jiffies, end_jiffies)); 728 } while (time_before(jiffies, end_jiffies));
691 729
692 nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n", 730 nvkm_error(subdev,
693 gr_enabled, ctxsw_active, gr_busy); 731 "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
732 gr_enabled, ctxsw_active, gr_busy);
694 return -EAGAIN; 733 return -EAGAIN;
695} 734}
696 735
697void 736void
698gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 737gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p)
699{ 738{
739 struct nvkm_device *device = gr->base.engine.subdev.device;
700 const struct gf100_gr_pack *pack; 740 const struct gf100_gr_pack *pack;
701 const struct gf100_gr_init *init; 741 const struct gf100_gr_init *init;
702 742
@@ -704,49 +744,54 @@ gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
704 u32 next = init->addr + init->count * init->pitch; 744 u32 next = init->addr + init->count * init->pitch;
705 u32 addr = init->addr; 745 u32 addr = init->addr;
706 while (addr < next) { 746 while (addr < next) {
707 nv_wr32(priv, addr, init->data); 747 nvkm_wr32(device, addr, init->data);
708 addr += init->pitch; 748 addr += init->pitch;
709 } 749 }
710 } 750 }
711} 751}
712 752
713void 753void
714gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 754gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
715{ 755{
756 struct nvkm_device *device = gr->base.engine.subdev.device;
716 const struct gf100_gr_pack *pack; 757 const struct gf100_gr_pack *pack;
717 const struct gf100_gr_init *init; 758 const struct gf100_gr_init *init;
718 u32 data = 0; 759 u32 data = 0;
719 760
720 nv_wr32(priv, 0x400208, 0x80000000); 761 nvkm_wr32(device, 0x400208, 0x80000000);
721 762
722 pack_for_each_init(init, pack, p) { 763 pack_for_each_init(init, pack, p) {
723 u32 next = init->addr + init->count * init->pitch; 764 u32 next = init->addr + init->count * init->pitch;
724 u32 addr = init->addr; 765 u32 addr = init->addr;
725 766
726 if ((pack == p && init == p->init) || data != init->data) { 767 if ((pack == p && init == p->init) || data != init->data) {
727 nv_wr32(priv, 0x400204, init->data); 768 nvkm_wr32(device, 0x400204, init->data);
728 data = init->data; 769 data = init->data;
729 } 770 }
730 771
731 while (addr < next) { 772 while (addr < next) {
732 nv_wr32(priv, 0x400200, addr); 773 nvkm_wr32(device, 0x400200, addr);
733 /** 774 /**
734 * Wait for GR to go idle after submitting a 775 * Wait for GR to go idle after submitting a
735 * GO_IDLE bundle 776 * GO_IDLE bundle
736 */ 777 */
737 if ((addr & 0xffff) == 0xe100) 778 if ((addr & 0xffff) == 0xe100)
738 gf100_gr_wait_idle(priv); 779 gf100_gr_wait_idle(gr);
739 nv_wait(priv, 0x400700, 0x00000004, 0x00000000); 780 nvkm_msec(device, 2000,
781 if (!(nvkm_rd32(device, 0x400700) & 0x00000004))
782 break;
783 );
740 addr += init->pitch; 784 addr += init->pitch;
741 } 785 }
742 } 786 }
743 787
744 nv_wr32(priv, 0x400208, 0x00000000); 788 nvkm_wr32(device, 0x400208, 0x00000000);
745} 789}
746 790
747void 791void
748gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 792gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
749{ 793{
794 struct nvkm_device *device = gr->base.engine.subdev.device;
750 const struct gf100_gr_pack *pack; 795 const struct gf100_gr_pack *pack;
751 const struct gf100_gr_init *init; 796 const struct gf100_gr_init *init;
752 u32 data = 0; 797 u32 data = 0;
@@ -757,79 +802,75 @@ gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
757 u32 addr = init->addr; 802 u32 addr = init->addr;
758 803
759 if ((pack == p && init == p->init) || data != init->data) { 804 if ((pack == p && init == p->init) || data != init->data) {
760 nv_wr32(priv, 0x40448c, init->data); 805 nvkm_wr32(device, 0x40448c, init->data);
761 data = init->data; 806 data = init->data;
762 } 807 }
763 808
764 while (addr < next) { 809 while (addr < next) {
765 nv_wr32(priv, 0x404488, ctrl | (addr << 14)); 810 nvkm_wr32(device, 0x404488, ctrl | (addr << 14));
766 addr += init->pitch; 811 addr += init->pitch;
767 } 812 }
768 } 813 }
769} 814}
770 815
771u64 816u64
772gf100_gr_units(struct nvkm_gr *gr) 817gf100_gr_units(struct nvkm_gr *base)
773{ 818{
774 struct gf100_gr_priv *priv = (void *)gr; 819 struct gf100_gr *gr = gf100_gr(base);
775 u64 cfg; 820 u64 cfg;
776 821
777 cfg = (u32)priv->gpc_nr; 822 cfg = (u32)gr->gpc_nr;
778 cfg |= (u32)priv->tpc_total << 8; 823 cfg |= (u32)gr->tpc_total << 8;
779 cfg |= (u64)priv->rop_nr << 32; 824 cfg |= (u64)gr->rop_nr << 32;
780 825
781 return cfg; 826 return cfg;
782} 827}
783 828
784static const struct nvkm_enum gk104_sked_error[] = { 829static const struct nvkm_bitfield gk104_sked_error[] = {
785 { 7, "CONSTANT_BUFFER_SIZE" }, 830 { 0x00000080, "CONSTANT_BUFFER_SIZE" },
786 { 9, "LOCAL_MEMORY_SIZE_POS" }, 831 { 0x00000200, "LOCAL_MEMORY_SIZE_POS" },
787 { 10, "LOCAL_MEMORY_SIZE_NEG" }, 832 { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" },
788 { 11, "WARP_CSTACK_SIZE" }, 833 { 0x00000800, "WARP_CSTACK_SIZE" },
789 { 12, "TOTAL_TEMP_SIZE" }, 834 { 0x00001000, "TOTAL_TEMP_SIZE" },
790 { 13, "REGISTER_COUNT" }, 835 { 0x00002000, "REGISTER_COUNT" },
791 { 18, "TOTAL_THREADS" }, 836 { 0x00040000, "TOTAL_THREADS" },
792 { 20, "PROGRAM_OFFSET" }, 837 { 0x00100000, "PROGRAM_OFFSET" },
793 { 21, "SHARED_MEMORY_SIZE" }, 838 { 0x00200000, "SHARED_MEMORY_SIZE" },
794 { 25, "SHARED_CONFIG_TOO_SMALL" }, 839 { 0x02000000, "SHARED_CONFIG_TOO_SMALL" },
795 { 26, "TOTAL_REGISTER_COUNT" }, 840 { 0x04000000, "TOTAL_REGISTER_COUNT" },
796 {} 841 {}
797}; 842};
798 843
799static const struct nvkm_enum gf100_gpc_rop_error[] = { 844static const struct nvkm_bitfield gf100_gpc_rop_error[] = {
800 { 1, "RT_PITCH_OVERRUN" }, 845 { 0x00000002, "RT_PITCH_OVERRUN" },
801 { 4, "RT_WIDTH_OVERRUN" }, 846 { 0x00000010, "RT_WIDTH_OVERRUN" },
802 { 5, "RT_HEIGHT_OVERRUN" }, 847 { 0x00000020, "RT_HEIGHT_OVERRUN" },
803 { 7, "ZETA_STORAGE_TYPE_MISMATCH" }, 848 { 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" },
804 { 8, "RT_STORAGE_TYPE_MISMATCH" }, 849 { 0x00000100, "RT_STORAGE_TYPE_MISMATCH" },
805 { 10, "RT_LINEAR_MISMATCH" }, 850 { 0x00000400, "RT_LINEAR_MISMATCH" },
806 {} 851 {}
807}; 852};
808 853
809static void 854static void
810gf100_gr_trap_gpc_rop(struct gf100_gr_priv *priv, int gpc) 855gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
811{ 856{
857 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
858 struct nvkm_device *device = subdev->device;
859 char error[128];
812 u32 trap[4]; 860 u32 trap[4];
813 int i;
814 861
815 trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420)); 862 trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff;
816 trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434)); 863 trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434));
817 trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438)); 864 trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438));
818 trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c)); 865 trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c));
819 866
820 nv_error(priv, "GPC%d/PROP trap:", gpc); 867 nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]);
821 for (i = 0; i <= 29; ++i) {
822 if (!(trap[0] & (1 << i)))
823 continue;
824 pr_cont(" ");
825 nvkm_enum_print(gf100_gpc_rop_error, i);
826 }
827 pr_cont("\n");
828 868
829 nv_error(priv, "x = %u, y = %u, format = %x, storage type = %x\n", 869 nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, "
830 trap[1] & 0xffff, trap[1] >> 16, (trap[2] >> 8) & 0x3f, 870 "format = %x, storage type = %x\n",
831 trap[3] & 0xff); 871 gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16,
832 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); 872 (trap[2] >> 8) & 0x3f, trap[3] & 0xff);
873 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
833} 874}
834 875
835static const struct nvkm_enum gf100_mp_warp_error[] = { 876static const struct nvkm_enum gf100_mp_warp_error[] = {
@@ -852,401 +893,418 @@ static const struct nvkm_bitfield gf100_mp_global_error[] = {
852}; 893};
853 894
854static void 895static void
855gf100_gr_trap_mp(struct gf100_gr_priv *priv, int gpc, int tpc) 896gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
856{ 897{
857 u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x648)); 898 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
858 u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x650)); 899 struct nvkm_device *device = subdev->device;
859 900 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648));
860 nv_error(priv, "GPC%i/TPC%i/MP trap:", gpc, tpc); 901 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650));
861 nvkm_bitfield_print(gf100_mp_global_error, gerr); 902 const struct nvkm_enum *warp;
862 if (werr) { 903 char glob[128];
863 pr_cont(" "); 904
864 nvkm_enum_print(gf100_mp_warp_error, werr & 0xffff); 905 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
865 } 906 warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
866 pr_cont("\n"); 907
867 908 nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
868 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x648), 0x00000000); 909 "global %08x [%s] warp %04x [%s]\n",
869 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x650), gerr); 910 gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
911
912 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
913 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr);
870} 914}
871 915
872static void 916static void
873gf100_gr_trap_tpc(struct gf100_gr_priv *priv, int gpc, int tpc) 917gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
874{ 918{
875 u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508)); 919 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
920 struct nvkm_device *device = subdev->device;
921 u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508));
876 922
877 if (stat & 0x00000001) { 923 if (stat & 0x00000001) {
878 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224)); 924 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224));
879 nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap); 925 nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap);
880 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000); 926 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
881 stat &= ~0x00000001; 927 stat &= ~0x00000001;
882 } 928 }
883 929
884 if (stat & 0x00000002) { 930 if (stat & 0x00000002) {
885 gf100_gr_trap_mp(priv, gpc, tpc); 931 gf100_gr_trap_mp(gr, gpc, tpc);
886 stat &= ~0x00000002; 932 stat &= ~0x00000002;
887 } 933 }
888 934
889 if (stat & 0x00000004) { 935 if (stat & 0x00000004) {
890 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084)); 936 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084));
891 nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap); 937 nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap);
892 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000); 938 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
893 stat &= ~0x00000004; 939 stat &= ~0x00000004;
894 } 940 }
895 941
896 if (stat & 0x00000008) { 942 if (stat & 0x00000008) {
897 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c)); 943 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c));
898 nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap); 944 nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap);
899 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000); 945 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
900 stat &= ~0x00000008; 946 stat &= ~0x00000008;
901 } 947 }
902 948
903 if (stat) { 949 if (stat) {
904 nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat); 950 nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat);
905 } 951 }
906} 952}
907 953
908static void 954static void
909gf100_gr_trap_gpc(struct gf100_gr_priv *priv, int gpc) 955gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc)
910{ 956{
911 u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90)); 957 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
958 struct nvkm_device *device = subdev->device;
959 u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90));
912 int tpc; 960 int tpc;
913 961
914 if (stat & 0x00000001) { 962 if (stat & 0x00000001) {
915 gf100_gr_trap_gpc_rop(priv, gpc); 963 gf100_gr_trap_gpc_rop(gr, gpc);
916 stat &= ~0x00000001; 964 stat &= ~0x00000001;
917 } 965 }
918 966
919 if (stat & 0x00000002) { 967 if (stat & 0x00000002) {
920 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900)); 968 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900));
921 nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap); 969 nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap);
922 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); 970 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
923 stat &= ~0x00000002; 971 stat &= ~0x00000002;
924 } 972 }
925 973
926 if (stat & 0x00000004) { 974 if (stat & 0x00000004) {
927 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028)); 975 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028));
928 nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap); 976 nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap);
929 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); 977 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
930 stat &= ~0x00000004; 978 stat &= ~0x00000004;
931 } 979 }
932 980
933 if (stat & 0x00000008) { 981 if (stat & 0x00000008) {
934 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824)); 982 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824));
935 nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap); 983 nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap);
936 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); 984 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
937 stat &= ~0x00000009; 985 stat &= ~0x00000009;
938 } 986 }
939 987
940 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 988 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
941 u32 mask = 0x00010000 << tpc; 989 u32 mask = 0x00010000 << tpc;
942 if (stat & mask) { 990 if (stat & mask) {
943 gf100_gr_trap_tpc(priv, gpc, tpc); 991 gf100_gr_trap_tpc(gr, gpc, tpc);
944 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask); 992 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask);
945 stat &= ~mask; 993 stat &= ~mask;
946 } 994 }
947 } 995 }
948 996
949 if (stat) { 997 if (stat) {
950 nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat); 998 nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat);
951 } 999 }
952} 1000}
953 1001
954static void 1002static void
955gf100_gr_trap_intr(struct gf100_gr_priv *priv) 1003gf100_gr_trap_intr(struct gf100_gr *gr)
956{ 1004{
957 u32 trap = nv_rd32(priv, 0x400108); 1005 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
958 int rop, gpc, i; 1006 struct nvkm_device *device = subdev->device;
1007 u32 trap = nvkm_rd32(device, 0x400108);
1008 int rop, gpc;
959 1009
960 if (trap & 0x00000001) { 1010 if (trap & 0x00000001) {
961 u32 stat = nv_rd32(priv, 0x404000); 1011 u32 stat = nvkm_rd32(device, 0x404000);
962 nv_error(priv, "DISPATCH 0x%08x\n", stat); 1012 nvkm_error(subdev, "DISPATCH %08x\n", stat);
963 nv_wr32(priv, 0x404000, 0xc0000000); 1013 nvkm_wr32(device, 0x404000, 0xc0000000);
964 nv_wr32(priv, 0x400108, 0x00000001); 1014 nvkm_wr32(device, 0x400108, 0x00000001);
965 trap &= ~0x00000001; 1015 trap &= ~0x00000001;
966 } 1016 }
967 1017
968 if (trap & 0x00000002) { 1018 if (trap & 0x00000002) {
969 u32 stat = nv_rd32(priv, 0x404600); 1019 u32 stat = nvkm_rd32(device, 0x404600);
970 nv_error(priv, "M2MF 0x%08x\n", stat); 1020 nvkm_error(subdev, "M2MF %08x\n", stat);
971 nv_wr32(priv, 0x404600, 0xc0000000); 1021 nvkm_wr32(device, 0x404600, 0xc0000000);
972 nv_wr32(priv, 0x400108, 0x00000002); 1022 nvkm_wr32(device, 0x400108, 0x00000002);
973 trap &= ~0x00000002; 1023 trap &= ~0x00000002;
974 } 1024 }
975 1025
976 if (trap & 0x00000008) { 1026 if (trap & 0x00000008) {
977 u32 stat = nv_rd32(priv, 0x408030); 1027 u32 stat = nvkm_rd32(device, 0x408030);
978 nv_error(priv, "CCACHE 0x%08x\n", stat); 1028 nvkm_error(subdev, "CCACHE %08x\n", stat);
979 nv_wr32(priv, 0x408030, 0xc0000000); 1029 nvkm_wr32(device, 0x408030, 0xc0000000);
980 nv_wr32(priv, 0x400108, 0x00000008); 1030 nvkm_wr32(device, 0x400108, 0x00000008);
981 trap &= ~0x00000008; 1031 trap &= ~0x00000008;
982 } 1032 }
983 1033
984 if (trap & 0x00000010) { 1034 if (trap & 0x00000010) {
985 u32 stat = nv_rd32(priv, 0x405840); 1035 u32 stat = nvkm_rd32(device, 0x405840);
986 nv_error(priv, "SHADER 0x%08x\n", stat); 1036 nvkm_error(subdev, "SHADER %08x\n", stat);
987 nv_wr32(priv, 0x405840, 0xc0000000); 1037 nvkm_wr32(device, 0x405840, 0xc0000000);
988 nv_wr32(priv, 0x400108, 0x00000010); 1038 nvkm_wr32(device, 0x400108, 0x00000010);
989 trap &= ~0x00000010; 1039 trap &= ~0x00000010;
990 } 1040 }
991 1041
992 if (trap & 0x00000040) { 1042 if (trap & 0x00000040) {
993 u32 stat = nv_rd32(priv, 0x40601c); 1043 u32 stat = nvkm_rd32(device, 0x40601c);
994 nv_error(priv, "UNK6 0x%08x\n", stat); 1044 nvkm_error(subdev, "UNK6 %08x\n", stat);
995 nv_wr32(priv, 0x40601c, 0xc0000000); 1045 nvkm_wr32(device, 0x40601c, 0xc0000000);
996 nv_wr32(priv, 0x400108, 0x00000040); 1046 nvkm_wr32(device, 0x400108, 0x00000040);
997 trap &= ~0x00000040; 1047 trap &= ~0x00000040;
998 } 1048 }
999 1049
1000 if (trap & 0x00000080) { 1050 if (trap & 0x00000080) {
1001 u32 stat = nv_rd32(priv, 0x404490); 1051 u32 stat = nvkm_rd32(device, 0x404490);
1002 nv_error(priv, "MACRO 0x%08x\n", stat); 1052 nvkm_error(subdev, "MACRO %08x\n", stat);
1003 nv_wr32(priv, 0x404490, 0xc0000000); 1053 nvkm_wr32(device, 0x404490, 0xc0000000);
1004 nv_wr32(priv, 0x400108, 0x00000080); 1054 nvkm_wr32(device, 0x400108, 0x00000080);
1005 trap &= ~0x00000080; 1055 trap &= ~0x00000080;
1006 } 1056 }
1007 1057
1008 if (trap & 0x00000100) { 1058 if (trap & 0x00000100) {
1009 u32 stat = nv_rd32(priv, 0x407020); 1059 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff;
1060 char sked[128];
1010 1061
1011 nv_error(priv, "SKED:"); 1062 nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat);
1012 for (i = 0; i <= 29; ++i) { 1063 nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked);
1013 if (!(stat & (1 << i)))
1014 continue;
1015 pr_cont(" ");
1016 nvkm_enum_print(gk104_sked_error, i);
1017 }
1018 pr_cont("\n");
1019 1064
1020 if (stat & 0x3fffffff) 1065 if (stat)
1021 nv_wr32(priv, 0x407020, 0x40000000); 1066 nvkm_wr32(device, 0x407020, 0x40000000);
1022 nv_wr32(priv, 0x400108, 0x00000100); 1067 nvkm_wr32(device, 0x400108, 0x00000100);
1023 trap &= ~0x00000100; 1068 trap &= ~0x00000100;
1024 } 1069 }
1025 1070
1026 if (trap & 0x01000000) { 1071 if (trap & 0x01000000) {
1027 u32 stat = nv_rd32(priv, 0x400118); 1072 u32 stat = nvkm_rd32(device, 0x400118);
1028 for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) { 1073 for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) {
1029 u32 mask = 0x00000001 << gpc; 1074 u32 mask = 0x00000001 << gpc;
1030 if (stat & mask) { 1075 if (stat & mask) {
1031 gf100_gr_trap_gpc(priv, gpc); 1076 gf100_gr_trap_gpc(gr, gpc);
1032 nv_wr32(priv, 0x400118, mask); 1077 nvkm_wr32(device, 0x400118, mask);
1033 stat &= ~mask; 1078 stat &= ~mask;
1034 } 1079 }
1035 } 1080 }
1036 nv_wr32(priv, 0x400108, 0x01000000); 1081 nvkm_wr32(device, 0x400108, 0x01000000);
1037 trap &= ~0x01000000; 1082 trap &= ~0x01000000;
1038 } 1083 }
1039 1084
1040 if (trap & 0x02000000) { 1085 if (trap & 0x02000000) {
1041 for (rop = 0; rop < priv->rop_nr; rop++) { 1086 for (rop = 0; rop < gr->rop_nr; rop++) {
1042 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070)); 1087 u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070));
1043 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144)); 1088 u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144));
1044 nv_error(priv, "ROP%d 0x%08x 0x%08x\n", 1089 nvkm_error(subdev, "ROP%d %08x %08x\n",
1045 rop, statz, statc); 1090 rop, statz, statc);
1046 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); 1091 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
1047 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); 1092 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
1048 } 1093 }
1049 nv_wr32(priv, 0x400108, 0x02000000); 1094 nvkm_wr32(device, 0x400108, 0x02000000);
1050 trap &= ~0x02000000; 1095 trap &= ~0x02000000;
1051 } 1096 }
1052 1097
1053 if (trap) { 1098 if (trap) {
1054 nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap); 1099 nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap);
1055 nv_wr32(priv, 0x400108, trap); 1100 nvkm_wr32(device, 0x400108, trap);
1056 } 1101 }
1057} 1102}
1058 1103
1059static void 1104static void
1060gf100_gr_ctxctl_debug_unit(struct gf100_gr_priv *priv, u32 base) 1105gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base)
1061{ 1106{
1062 nv_error(priv, "%06x - done 0x%08x\n", base, 1107 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1063 nv_rd32(priv, base + 0x400)); 1108 struct nvkm_device *device = subdev->device;
1064 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, 1109 nvkm_error(subdev, "%06x - done %08x\n", base,
1065 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804), 1110 nvkm_rd32(device, base + 0x400));
1066 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c)); 1111 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
1067 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, 1112 nvkm_rd32(device, base + 0x800),
1068 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814), 1113 nvkm_rd32(device, base + 0x804),
1069 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c)); 1114 nvkm_rd32(device, base + 0x808),
1115 nvkm_rd32(device, base + 0x80c));
1116 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
1117 nvkm_rd32(device, base + 0x810),
1118 nvkm_rd32(device, base + 0x814),
1119 nvkm_rd32(device, base + 0x818),
1120 nvkm_rd32(device, base + 0x81c));
1070} 1121}
1071 1122
1072void 1123void
1073gf100_gr_ctxctl_debug(struct gf100_gr_priv *priv) 1124gf100_gr_ctxctl_debug(struct gf100_gr *gr)
1074{ 1125{
1075 u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff; 1126 struct nvkm_device *device = gr->base.engine.subdev.device;
1127 u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff;
1076 u32 gpc; 1128 u32 gpc;
1077 1129
1078 gf100_gr_ctxctl_debug_unit(priv, 0x409000); 1130 gf100_gr_ctxctl_debug_unit(gr, 0x409000);
1079 for (gpc = 0; gpc < gpcnr; gpc++) 1131 for (gpc = 0; gpc < gpcnr; gpc++)
1080 gf100_gr_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000)); 1132 gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000));
1081} 1133}
1082 1134
1083static void 1135static void
1084gf100_gr_ctxctl_isr(struct gf100_gr_priv *priv) 1136gf100_gr_ctxctl_isr(struct gf100_gr *gr)
1085{ 1137{
1086 u32 stat = nv_rd32(priv, 0x409c18); 1138 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1139 struct nvkm_device *device = subdev->device;
1140 u32 stat = nvkm_rd32(device, 0x409c18);
1087 1141
1088 if (stat & 0x00000001) { 1142 if (stat & 0x00000001) {
1089 u32 code = nv_rd32(priv, 0x409814); 1143 u32 code = nvkm_rd32(device, 0x409814);
1090 if (code == E_BAD_FWMTHD) { 1144 if (code == E_BAD_FWMTHD) {
1091 u32 class = nv_rd32(priv, 0x409808); 1145 u32 class = nvkm_rd32(device, 0x409808);
1092 u32 addr = nv_rd32(priv, 0x40980c); 1146 u32 addr = nvkm_rd32(device, 0x40980c);
1093 u32 subc = (addr & 0x00070000) >> 16; 1147 u32 subc = (addr & 0x00070000) >> 16;
1094 u32 mthd = (addr & 0x00003ffc); 1148 u32 mthd = (addr & 0x00003ffc);
1095 u32 data = nv_rd32(priv, 0x409810); 1149 u32 data = nvkm_rd32(device, 0x409810);
1096 1150
1097 nv_error(priv, "FECS MTHD subc %d class 0x%04x " 1151 nvkm_error(subdev, "FECS MTHD subc %d class %04x "
1098 "mthd 0x%04x data 0x%08x\n", 1152 "mthd %04x data %08x\n",
1099 subc, class, mthd, data); 1153 subc, class, mthd, data);
1100 1154
1101 nv_wr32(priv, 0x409c20, 0x00000001); 1155 nvkm_wr32(device, 0x409c20, 0x00000001);
1102 stat &= ~0x00000001; 1156 stat &= ~0x00000001;
1103 } else { 1157 } else {
1104 nv_error(priv, "FECS ucode error %d\n", code); 1158 nvkm_error(subdev, "FECS ucode error %d\n", code);
1105 } 1159 }
1106 } 1160 }
1107 1161
1108 if (stat & 0x00080000) { 1162 if (stat & 0x00080000) {
1109 nv_error(priv, "FECS watchdog timeout\n"); 1163 nvkm_error(subdev, "FECS watchdog timeout\n");
1110 gf100_gr_ctxctl_debug(priv); 1164 gf100_gr_ctxctl_debug(gr);
1111 nv_wr32(priv, 0x409c20, 0x00080000); 1165 nvkm_wr32(device, 0x409c20, 0x00080000);
1112 stat &= ~0x00080000; 1166 stat &= ~0x00080000;
1113 } 1167 }
1114 1168
1115 if (stat) { 1169 if (stat) {
1116 nv_error(priv, "FECS 0x%08x\n", stat); 1170 nvkm_error(subdev, "FECS %08x\n", stat);
1117 gf100_gr_ctxctl_debug(priv); 1171 gf100_gr_ctxctl_debug(gr);
1118 nv_wr32(priv, 0x409c20, stat); 1172 nvkm_wr32(device, 0x409c20, stat);
1119 } 1173 }
1120} 1174}
1121 1175
1122static void 1176static void
1123gf100_gr_intr(struct nvkm_subdev *subdev) 1177gf100_gr_intr(struct nvkm_gr *base)
1124{ 1178{
1125 struct nvkm_fifo *pfifo = nvkm_fifo(subdev); 1179 struct gf100_gr *gr = gf100_gr(base);
1126 struct nvkm_engine *engine = nv_engine(subdev); 1180 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1127 struct nvkm_object *engctx; 1181 struct nvkm_device *device = subdev->device;
1128 struct nvkm_handle *handle; 1182 struct nvkm_fifo_chan *chan;
1129 struct gf100_gr_priv *priv = (void *)subdev; 1183 unsigned long flags;
1130 u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff; 1184 u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff;
1131 u32 stat = nv_rd32(priv, 0x400100); 1185 u32 stat = nvkm_rd32(device, 0x400100);
1132 u32 addr = nv_rd32(priv, 0x400704); 1186 u32 addr = nvkm_rd32(device, 0x400704);
1133 u32 mthd = (addr & 0x00003ffc); 1187 u32 mthd = (addr & 0x00003ffc);
1134 u32 subc = (addr & 0x00070000) >> 16; 1188 u32 subc = (addr & 0x00070000) >> 16;
1135 u32 data = nv_rd32(priv, 0x400708); 1189 u32 data = nvkm_rd32(device, 0x400708);
1136 u32 code = nv_rd32(priv, 0x400110); 1190 u32 code = nvkm_rd32(device, 0x400110);
1137 u32 class; 1191 u32 class;
1138 int chid; 1192 const char *name = "unknown";
1193 int chid = -1;
1194
1195 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
1196 if (chan) {
1197 name = chan->object.client->name;
1198 chid = chan->chid;
1199 }
1139 1200
1140 if (nv_device(priv)->card_type < NV_E0 || subc < 4) 1201 if (device->card_type < NV_E0 || subc < 4)
1141 class = nv_rd32(priv, 0x404200 + (subc * 4)); 1202 class = nvkm_rd32(device, 0x404200 + (subc * 4));
1142 else 1203 else
1143 class = 0x0000; 1204 class = 0x0000;
1144 1205
1145 engctx = nvkm_engctx_get(engine, inst);
1146 chid = pfifo->chid(pfifo, engctx);
1147
1148 if (stat & 0x00000001) { 1206 if (stat & 0x00000001) {
1149 /* 1207 /*
1150 * notifier interrupt, only needed for cyclestats 1208 * notifier interrupt, only needed for cyclestats
1151 * can be safely ignored 1209 * can be safely ignored
1152 */ 1210 */
1153 nv_wr32(priv, 0x400100, 0x00000001); 1211 nvkm_wr32(device, 0x400100, 0x00000001);
1154 stat &= ~0x00000001; 1212 stat &= ~0x00000001;
1155 } 1213 }
1156 1214
1157 if (stat & 0x00000010) { 1215 if (stat & 0x00000010) {
1158 handle = nvkm_handle_get_class(engctx, class); 1216 if (!gf100_gr_mthd_sw(device, class, mthd, data)) {
1159 if (!handle || nv_call(handle->object, mthd, data)) { 1217 nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] "
1160 nv_error(priv, 1218 "subc %d class %04x mthd %04x data %08x\n",
1161 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 1219 chid, inst << 12, name, subc,
1162 chid, inst << 12, nvkm_client_name(engctx), 1220 class, mthd, data);
1163 subc, class, mthd, data);
1164 } 1221 }
1165 nvkm_handle_put(handle); 1222 nvkm_wr32(device, 0x400100, 0x00000010);
1166 nv_wr32(priv, 0x400100, 0x00000010);
1167 stat &= ~0x00000010; 1223 stat &= ~0x00000010;
1168 } 1224 }
1169 1225
1170 if (stat & 0x00000020) { 1226 if (stat & 0x00000020) {
1171 nv_error(priv, 1227 nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] "
1172 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 1228 "subc %d class %04x mthd %04x data %08x\n",
1173 chid, inst << 12, nvkm_client_name(engctx), subc, 1229 chid, inst << 12, name, subc, class, mthd, data);
1174 class, mthd, data); 1230 nvkm_wr32(device, 0x400100, 0x00000020);
1175 nv_wr32(priv, 0x400100, 0x00000020);
1176 stat &= ~0x00000020; 1231 stat &= ~0x00000020;
1177 } 1232 }
1178 1233
1179 if (stat & 0x00100000) { 1234 if (stat & 0x00100000) {
1180 nv_error(priv, "DATA_ERROR ["); 1235 const struct nvkm_enum *en =
1181 nvkm_enum_print(nv50_data_error_names, code); 1236 nvkm_enum_find(nv50_data_error_names, code);
1182 pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 1237 nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] "
1183 chid, inst << 12, nvkm_client_name(engctx), subc, 1238 "subc %d class %04x mthd %04x data %08x\n",
1184 class, mthd, data); 1239 code, en ? en->name : "", chid, inst << 12,
1185 nv_wr32(priv, 0x400100, 0x00100000); 1240 name, subc, class, mthd, data);
1241 nvkm_wr32(device, 0x400100, 0x00100000);
1186 stat &= ~0x00100000; 1242 stat &= ~0x00100000;
1187 } 1243 }
1188 1244
1189 if (stat & 0x00200000) { 1245 if (stat & 0x00200000) {
1190 nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12, 1246 nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n",
1191 nvkm_client_name(engctx)); 1247 chid, inst << 12, name);
1192 gf100_gr_trap_intr(priv); 1248 gf100_gr_trap_intr(gr);
1193 nv_wr32(priv, 0x400100, 0x00200000); 1249 nvkm_wr32(device, 0x400100, 0x00200000);
1194 stat &= ~0x00200000; 1250 stat &= ~0x00200000;
1195 } 1251 }
1196 1252
1197 if (stat & 0x00080000) { 1253 if (stat & 0x00080000) {
1198 gf100_gr_ctxctl_isr(priv); 1254 gf100_gr_ctxctl_isr(gr);
1199 nv_wr32(priv, 0x400100, 0x00080000); 1255 nvkm_wr32(device, 0x400100, 0x00080000);
1200 stat &= ~0x00080000; 1256 stat &= ~0x00080000;
1201 } 1257 }
1202 1258
1203 if (stat) { 1259 if (stat) {
1204 nv_error(priv, "unknown stat 0x%08x\n", stat); 1260 nvkm_error(subdev, "intr %08x\n", stat);
1205 nv_wr32(priv, 0x400100, stat); 1261 nvkm_wr32(device, 0x400100, stat);
1206 } 1262 }
1207 1263
1208 nv_wr32(priv, 0x400500, 0x00010001); 1264 nvkm_wr32(device, 0x400500, 0x00010001);
1209 nvkm_engctx_put(engctx); 1265 nvkm_fifo_chan_put(device->fifo, flags, &chan);
1210} 1266}
1211 1267
1212void 1268void
1213gf100_gr_init_fw(struct gf100_gr_priv *priv, u32 fuc_base, 1269gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
1214 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) 1270 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
1215{ 1271{
1272 struct nvkm_device *device = gr->base.engine.subdev.device;
1216 int i; 1273 int i;
1217 1274
1218 nv_wr32(priv, fuc_base + 0x01c0, 0x01000000); 1275 nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
1219 for (i = 0; i < data->size / 4; i++) 1276 for (i = 0; i < data->size / 4; i++)
1220 nv_wr32(priv, fuc_base + 0x01c4, data->data[i]); 1277 nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
1221 1278
1222 nv_wr32(priv, fuc_base + 0x0180, 0x01000000); 1279 nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
1223 for (i = 0; i < code->size / 4; i++) { 1280 for (i = 0; i < code->size / 4; i++) {
1224 if ((i & 0x3f) == 0) 1281 if ((i & 0x3f) == 0)
1225 nv_wr32(priv, fuc_base + 0x0188, i >> 6); 1282 nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
1226 nv_wr32(priv, fuc_base + 0x0184, code->data[i]); 1283 nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
1227 } 1284 }
1228 1285
1229 /* code must be padded to 0x40 words */ 1286 /* code must be padded to 0x40 words */
1230 for (; i & 0x3f; i++) 1287 for (; i & 0x3f; i++)
1231 nv_wr32(priv, fuc_base + 0x0184, 0); 1288 nvkm_wr32(device, fuc_base + 0x0184, 0);
1232} 1289}
1233 1290
1234static void 1291static void
1235gf100_gr_init_csdata(struct gf100_gr_priv *priv, 1292gf100_gr_init_csdata(struct gf100_gr *gr,
1236 const struct gf100_gr_pack *pack, 1293 const struct gf100_gr_pack *pack,
1237 u32 falcon, u32 starstar, u32 base) 1294 u32 falcon, u32 starstar, u32 base)
1238{ 1295{
1296 struct nvkm_device *device = gr->base.engine.subdev.device;
1239 const struct gf100_gr_pack *iter; 1297 const struct gf100_gr_pack *iter;
1240 const struct gf100_gr_init *init; 1298 const struct gf100_gr_init *init;
1241 u32 addr = ~0, prev = ~0, xfer = 0; 1299 u32 addr = ~0, prev = ~0, xfer = 0;
1242 u32 star, temp; 1300 u32 star, temp;
1243 1301
1244 nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar); 1302 nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar);
1245 star = nv_rd32(priv, falcon + 0x01c4); 1303 star = nvkm_rd32(device, falcon + 0x01c4);
1246 temp = nv_rd32(priv, falcon + 0x01c4); 1304 temp = nvkm_rd32(device, falcon + 0x01c4);
1247 if (temp > star) 1305 if (temp > star)
1248 star = temp; 1306 star = temp;
1249 nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star); 1307 nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star);
1250 1308
1251 pack_for_each_init(init, iter, pack) { 1309 pack_for_each_init(init, iter, pack) {
1252 u32 head = init->addr - base; 1310 u32 head = init->addr - base;
@@ -1255,7 +1313,7 @@ gf100_gr_init_csdata(struct gf100_gr_priv *priv,
1255 if (head != prev + 4 || xfer >= 32) { 1313 if (head != prev + 4 || xfer >= 32) {
1256 if (xfer) { 1314 if (xfer) {
1257 u32 data = ((--xfer << 26) | addr); 1315 u32 data = ((--xfer << 26) | addr);
1258 nv_wr32(priv, falcon + 0x01c4, data); 1316 nvkm_wr32(device, falcon + 0x01c4, data);
1259 star += 4; 1317 star += 4;
1260 } 1318 }
1261 addr = head; 1319 addr = head;
@@ -1267,157 +1325,166 @@ gf100_gr_init_csdata(struct gf100_gr_priv *priv,
1267 } 1325 }
1268 } 1326 }
1269 1327
1270 nv_wr32(priv, falcon + 0x01c4, (--xfer << 26) | addr); 1328 nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr);
1271 nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar); 1329 nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar);
1272 nv_wr32(priv, falcon + 0x01c4, star + 4); 1330 nvkm_wr32(device, falcon + 0x01c4, star + 4);
1273} 1331}
1274 1332
1275int 1333int
1276gf100_gr_init_ctxctl(struct gf100_gr_priv *priv) 1334gf100_gr_init_ctxctl(struct gf100_gr *gr)
1277{ 1335{
1278 struct gf100_gr_oclass *oclass = (void *)nv_object(priv)->oclass; 1336 const struct gf100_grctx_func *grctx = gr->func->grctx;
1279 struct gf100_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass; 1337 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1338 struct nvkm_device *device = subdev->device;
1280 int i; 1339 int i;
1281 1340
1282 if (priv->firmware) { 1341 if (gr->firmware) {
1283 /* load fuc microcode */ 1342 /* load fuc microcode */
1284 nvkm_mc(priv)->unk260(nvkm_mc(priv), 0); 1343 nvkm_mc_unk260(device->mc, 0);
1285 gf100_gr_init_fw(priv, 0x409000, &priv->fuc409c, 1344 gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d);
1286 &priv->fuc409d); 1345 gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad);
1287 gf100_gr_init_fw(priv, 0x41a000, &priv->fuc41ac, 1346 nvkm_mc_unk260(device->mc, 1);
1288 &priv->fuc41ad);
1289 nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
1290 1347
1291 /* start both of them running */ 1348 /* start both of them running */
1292 nv_wr32(priv, 0x409840, 0xffffffff); 1349 nvkm_wr32(device, 0x409840, 0xffffffff);
1293 nv_wr32(priv, 0x41a10c, 0x00000000); 1350 nvkm_wr32(device, 0x41a10c, 0x00000000);
1294 nv_wr32(priv, 0x40910c, 0x00000000); 1351 nvkm_wr32(device, 0x40910c, 0x00000000);
1295 nv_wr32(priv, 0x41a100, 0x00000002); 1352 nvkm_wr32(device, 0x41a100, 0x00000002);
1296 nv_wr32(priv, 0x409100, 0x00000002); 1353 nvkm_wr32(device, 0x409100, 0x00000002);
1297 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001)) 1354 if (nvkm_msec(device, 2000,
1298 nv_warn(priv, "0x409800 wait failed\n"); 1355 if (nvkm_rd32(device, 0x409800) & 0x00000001)
1299 1356 break;
1300 nv_wr32(priv, 0x409840, 0xffffffff); 1357 ) < 0)
1301 nv_wr32(priv, 0x409500, 0x7fffffff);
1302 nv_wr32(priv, 0x409504, 0x00000021);
1303
1304 nv_wr32(priv, 0x409840, 0xffffffff);
1305 nv_wr32(priv, 0x409500, 0x00000000);
1306 nv_wr32(priv, 0x409504, 0x00000010);
1307 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
1308 nv_error(priv, "fuc09 req 0x10 timeout\n");
1309 return -EBUSY; 1358 return -EBUSY;
1310 }
1311 priv->size = nv_rd32(priv, 0x409800);
1312 1359
1313 nv_wr32(priv, 0x409840, 0xffffffff); 1360 nvkm_wr32(device, 0x409840, 0xffffffff);
1314 nv_wr32(priv, 0x409500, 0x00000000); 1361 nvkm_wr32(device, 0x409500, 0x7fffffff);
1315 nv_wr32(priv, 0x409504, 0x00000016); 1362 nvkm_wr32(device, 0x409504, 0x00000021);
1316 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1363
1317 nv_error(priv, "fuc09 req 0x16 timeout\n"); 1364 nvkm_wr32(device, 0x409840, 0xffffffff);
1365 nvkm_wr32(device, 0x409500, 0x00000000);
1366 nvkm_wr32(device, 0x409504, 0x00000010);
1367 if (nvkm_msec(device, 2000,
1368 if ((gr->size = nvkm_rd32(device, 0x409800)))
1369 break;
1370 ) < 0)
1318 return -EBUSY; 1371 return -EBUSY;
1319 }
1320 1372
1321 nv_wr32(priv, 0x409840, 0xffffffff); 1373 nvkm_wr32(device, 0x409840, 0xffffffff);
1322 nv_wr32(priv, 0x409500, 0x00000000); 1374 nvkm_wr32(device, 0x409500, 0x00000000);
1323 nv_wr32(priv, 0x409504, 0x00000025); 1375 nvkm_wr32(device, 0x409504, 0x00000016);
1324 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1376 if (nvkm_msec(device, 2000,
1325 nv_error(priv, "fuc09 req 0x25 timeout\n"); 1377 if (nvkm_rd32(device, 0x409800))
1378 break;
1379 ) < 0)
1380 return -EBUSY;
1381
1382 nvkm_wr32(device, 0x409840, 0xffffffff);
1383 nvkm_wr32(device, 0x409500, 0x00000000);
1384 nvkm_wr32(device, 0x409504, 0x00000025);
1385 if (nvkm_msec(device, 2000,
1386 if (nvkm_rd32(device, 0x409800))
1387 break;
1388 ) < 0)
1326 return -EBUSY; 1389 return -EBUSY;
1327 }
1328 1390
1329 if (nv_device(priv)->chipset >= 0xe0) { 1391 if (device->chipset >= 0xe0) {
1330 nv_wr32(priv, 0x409800, 0x00000000); 1392 nvkm_wr32(device, 0x409800, 0x00000000);
1331 nv_wr32(priv, 0x409500, 0x00000001); 1393 nvkm_wr32(device, 0x409500, 0x00000001);
1332 nv_wr32(priv, 0x409504, 0x00000030); 1394 nvkm_wr32(device, 0x409504, 0x00000030);
1333 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1395 if (nvkm_msec(device, 2000,
1334 nv_error(priv, "fuc09 req 0x30 timeout\n"); 1396 if (nvkm_rd32(device, 0x409800))
1397 break;
1398 ) < 0)
1335 return -EBUSY; 1399 return -EBUSY;
1336 }
1337 1400
1338 nv_wr32(priv, 0x409810, 0xb00095c8); 1401 nvkm_wr32(device, 0x409810, 0xb00095c8);
1339 nv_wr32(priv, 0x409800, 0x00000000); 1402 nvkm_wr32(device, 0x409800, 0x00000000);
1340 nv_wr32(priv, 0x409500, 0x00000001); 1403 nvkm_wr32(device, 0x409500, 0x00000001);
1341 nv_wr32(priv, 0x409504, 0x00000031); 1404 nvkm_wr32(device, 0x409504, 0x00000031);
1342 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1405 if (nvkm_msec(device, 2000,
1343 nv_error(priv, "fuc09 req 0x31 timeout\n"); 1406 if (nvkm_rd32(device, 0x409800))
1407 break;
1408 ) < 0)
1344 return -EBUSY; 1409 return -EBUSY;
1345 }
1346 1410
1347 nv_wr32(priv, 0x409810, 0x00080420); 1411 nvkm_wr32(device, 0x409810, 0x00080420);
1348 nv_wr32(priv, 0x409800, 0x00000000); 1412 nvkm_wr32(device, 0x409800, 0x00000000);
1349 nv_wr32(priv, 0x409500, 0x00000001); 1413 nvkm_wr32(device, 0x409500, 0x00000001);
1350 nv_wr32(priv, 0x409504, 0x00000032); 1414 nvkm_wr32(device, 0x409504, 0x00000032);
1351 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { 1415 if (nvkm_msec(device, 2000,
1352 nv_error(priv, "fuc09 req 0x32 timeout\n"); 1416 if (nvkm_rd32(device, 0x409800))
1417 break;
1418 ) < 0)
1353 return -EBUSY; 1419 return -EBUSY;
1354 }
1355 1420
1356 nv_wr32(priv, 0x409614, 0x00000070); 1421 nvkm_wr32(device, 0x409614, 0x00000070);
1357 nv_wr32(priv, 0x409614, 0x00000770); 1422 nvkm_wr32(device, 0x409614, 0x00000770);
1358 nv_wr32(priv, 0x40802c, 0x00000001); 1423 nvkm_wr32(device, 0x40802c, 0x00000001);
1359 } 1424 }
1360 1425
1361 if (priv->data == NULL) { 1426 if (gr->data == NULL) {
1362 int ret = gf100_grctx_generate(priv); 1427 int ret = gf100_grctx_generate(gr);
1363 if (ret) { 1428 if (ret) {
1364 nv_error(priv, "failed to construct context\n"); 1429 nvkm_error(subdev, "failed to construct context\n");
1365 return ret; 1430 return ret;
1366 } 1431 }
1367 } 1432 }
1368 1433
1369 return 0; 1434 return 0;
1370 } else 1435 } else
1371 if (!oclass->fecs.ucode) { 1436 if (!gr->func->fecs.ucode) {
1372 return -ENOSYS; 1437 return -ENOSYS;
1373 } 1438 }
1374 1439
1375 /* load HUB microcode */ 1440 /* load HUB microcode */
1376 nvkm_mc(priv)->unk260(nvkm_mc(priv), 0); 1441 nvkm_mc_unk260(device->mc, 0);
1377 nv_wr32(priv, 0x4091c0, 0x01000000); 1442 nvkm_wr32(device, 0x4091c0, 0x01000000);
1378 for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++) 1443 for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
1379 nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]); 1444 nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
1380 1445
1381 nv_wr32(priv, 0x409180, 0x01000000); 1446 nvkm_wr32(device, 0x409180, 0x01000000);
1382 for (i = 0; i < oclass->fecs.ucode->code.size / 4; i++) { 1447 for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
1383 if ((i & 0x3f) == 0) 1448 if ((i & 0x3f) == 0)
1384 nv_wr32(priv, 0x409188, i >> 6); 1449 nvkm_wr32(device, 0x409188, i >> 6);
1385 nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]); 1450 nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
1386 } 1451 }
1387 1452
1388 /* load GPC microcode */ 1453 /* load GPC microcode */
1389 nv_wr32(priv, 0x41a1c0, 0x01000000); 1454 nvkm_wr32(device, 0x41a1c0, 0x01000000);
1390 for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++) 1455 for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++)
1391 nv_wr32(priv, 0x41a1c4, oclass->gpccs.ucode->data.data[i]); 1456 nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]);
1392 1457
1393 nv_wr32(priv, 0x41a180, 0x01000000); 1458 nvkm_wr32(device, 0x41a180, 0x01000000);
1394 for (i = 0; i < oclass->gpccs.ucode->code.size / 4; i++) { 1459 for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
1395 if ((i & 0x3f) == 0) 1460 if ((i & 0x3f) == 0)
1396 nv_wr32(priv, 0x41a188, i >> 6); 1461 nvkm_wr32(device, 0x41a188, i >> 6);
1397 nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]); 1462 nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
1398 } 1463 }
1399 nvkm_mc(priv)->unk260(nvkm_mc(priv), 1); 1464 nvkm_mc_unk260(device->mc, 1);
1400 1465
1401 /* load register lists */ 1466 /* load register lists */
1402 gf100_gr_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000); 1467 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
1403 gf100_gr_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000); 1468 gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
1404 gf100_gr_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800); 1469 gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
1405 gf100_gr_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00); 1470 gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
1406 1471
1407 /* start HUB ucode running, it'll init the GPCs */ 1472 /* start HUB ucode running, it'll init the GPCs */
1408 nv_wr32(priv, 0x40910c, 0x00000000); 1473 nvkm_wr32(device, 0x40910c, 0x00000000);
1409 nv_wr32(priv, 0x409100, 0x00000002); 1474 nvkm_wr32(device, 0x409100, 0x00000002);
1410 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) { 1475 if (nvkm_msec(device, 2000,
1411 nv_error(priv, "HUB_INIT timed out\n"); 1476 if (nvkm_rd32(device, 0x409800) & 0x80000000)
1412 gf100_gr_ctxctl_debug(priv); 1477 break;
1478 ) < 0) {
1479 gf100_gr_ctxctl_debug(gr);
1413 return -EBUSY; 1480 return -EBUSY;
1414 } 1481 }
1415 1482
1416 priv->size = nv_rd32(priv, 0x409804); 1483 gr->size = nvkm_rd32(device, 0x409804);
1417 if (priv->data == NULL) { 1484 if (gr->data == NULL) {
1418 int ret = gf100_grctx_generate(priv); 1485 int ret = gf100_grctx_generate(gr);
1419 if (ret) { 1486 if (ret) {
1420 nv_error(priv, "failed to construct context\n"); 1487 nvkm_error(subdev, "failed to construct context\n");
1421 return ret; 1488 return ret;
1422 } 1489 }
1423 } 1490 }
@@ -1425,143 +1492,160 @@ gf100_gr_init_ctxctl(struct gf100_gr_priv *priv)
1425 return 0; 1492 return 0;
1426} 1493}
1427 1494
1428int 1495static int
1429gf100_gr_init(struct nvkm_object *object) 1496gf100_gr_oneinit(struct nvkm_gr *base)
1430{ 1497{
1431 struct gf100_gr_oclass *oclass = (void *)object->oclass; 1498 struct gf100_gr *gr = gf100_gr(base);
1432 struct gf100_gr_priv *priv = (void *)object; 1499 struct nvkm_device *device = gr->base.engine.subdev.device;
1433 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); 1500 int ret, i, j;
1434 u32 data[TPC_MAX / 8] = {};
1435 u8 tpcnr[GPC_MAX];
1436 int gpc, tpc, rop;
1437 int ret, i;
1438 1501
1439 ret = nvkm_gr_init(&priv->base); 1502 nvkm_pmu_pgob(device->pmu, false);
1503
1504 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
1505 &gr->unk4188b4);
1440 if (ret) 1506 if (ret)
1441 return ret; 1507 return ret;
1442 1508
1443 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000); 1509 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
1444 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000); 1510 &gr->unk4188b8);
1445 nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000); 1511 if (ret)
1446 nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000); 1512 return ret;
1447 nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
1448 nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
1449 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
1450 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
1451
1452 gf100_gr_mmio(priv, oclass->mmio);
1453
1454 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
1455 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
1456 do {
1457 gpc = (gpc + 1) % priv->gpc_nr;
1458 } while (!tpcnr[gpc]);
1459 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
1460
1461 data[i / 8] |= tpc << ((i % 8) * 4);
1462 }
1463
1464 nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
1465 nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
1466 nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
1467 nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
1468
1469 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1470 nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
1471 priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
1472 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
1473 priv->tpc_total);
1474 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
1475 }
1476 1513
1477 if (nv_device(priv)->chipset != 0xd7) 1514 nvkm_kmap(gr->unk4188b4);
1478 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918); 1515 for (i = 0; i < 0x1000; i += 4)
1479 else 1516 nvkm_wo32(gr->unk4188b4, i, 0x00000010);
1480 nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918); 1517 nvkm_done(gr->unk4188b4);
1481 1518
1482 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); 1519 nvkm_kmap(gr->unk4188b8);
1483 1520 for (i = 0; i < 0x1000; i += 4)
1484 nv_wr32(priv, 0x400500, 0x00010001); 1521 nvkm_wo32(gr->unk4188b8, i, 0x00000010);
1485 1522 nvkm_done(gr->unk4188b8);
1486 nv_wr32(priv, 0x400100, 0xffffffff); 1523
1487 nv_wr32(priv, 0x40013c, 0xffffffff); 1524 gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
1488 1525 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
1489 nv_wr32(priv, 0x409c24, 0x000f0000); 1526 for (i = 0; i < gr->gpc_nr; i++) {
1490 nv_wr32(priv, 0x404000, 0xc0000000); 1527 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
1491 nv_wr32(priv, 0x404600, 0xc0000000); 1528 gr->tpc_total += gr->tpc_nr[i];
1492 nv_wr32(priv, 0x408030, 0xc0000000); 1529 gr->ppc_nr[i] = gr->func->ppc_nr;
1493 nv_wr32(priv, 0x40601c, 0xc0000000); 1530 for (j = 0; j < gr->ppc_nr[i]; j++) {
1494 nv_wr32(priv, 0x404490, 0xc0000000); 1531 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
1495 nv_wr32(priv, 0x406018, 0xc0000000); 1532 gr->ppc_tpc_nr[i][j] = hweight8(mask);
1496 nv_wr32(priv, 0x405840, 0xc0000000);
1497 nv_wr32(priv, 0x405844, 0x00ffffff);
1498 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
1499 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
1500
1501 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1502 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
1503 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
1504 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
1505 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
1506 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1507 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
1508 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
1509 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
1510 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
1511 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
1512 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
1513 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
1514 } 1533 }
1515 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
1516 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
1517 } 1534 }
1518 1535
1519 for (rop = 0; rop < priv->rop_nr; rop++) { 1536 /*XXX: these need figuring out... though it might not even matter */
1520 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); 1537 switch (device->chipset) {
1521 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); 1538 case 0xc0:
1522 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff); 1539 if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
1523 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff); 1540 gr->magic_not_rop_nr = 0x07;
1541 } else
1542 if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
1543 gr->magic_not_rop_nr = 0x05;
1544 } else
1545 if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
1546 gr->magic_not_rop_nr = 0x06;
1547 }
1548 break;
1549 case 0xc3: /* 450, 4/0/0/0, 2 */
1550 gr->magic_not_rop_nr = 0x03;
1551 break;
1552 case 0xc4: /* 460, 3/4/0/0, 4 */
1553 gr->magic_not_rop_nr = 0x01;
1554 break;
1555 case 0xc1: /* 2/0/0/0, 1 */
1556 gr->magic_not_rop_nr = 0x01;
1557 break;
1558 case 0xc8: /* 4/4/3/4, 5 */
1559 gr->magic_not_rop_nr = 0x06;
1560 break;
1561 case 0xce: /* 4/4/0/0, 4 */
1562 gr->magic_not_rop_nr = 0x03;
1563 break;
1564 case 0xcf: /* 4/0/0/0, 3 */
1565 gr->magic_not_rop_nr = 0x03;
1566 break;
1567 case 0xd7:
1568 case 0xd9: /* 1/0/0/0, 1 */
1569 case 0xea: /* gk20a */
1570 case 0x12b: /* gm20b */
1571 gr->magic_not_rop_nr = 0x01;
1572 break;
1524 } 1573 }
1525 1574
1526 nv_wr32(priv, 0x400108, 0xffffffff); 1575 return 0;
1527 nv_wr32(priv, 0x400138, 0xffffffff); 1576}
1528 nv_wr32(priv, 0x400118, 0xffffffff);
1529 nv_wr32(priv, 0x400130, 0xffffffff);
1530 nv_wr32(priv, 0x40011c, 0xffffffff);
1531 nv_wr32(priv, 0x400134, 0xffffffff);
1532
1533 nv_wr32(priv, 0x400054, 0x34ce3464);
1534
1535 gf100_gr_zbc_init(priv);
1536 1577
1537 return gf100_gr_init_ctxctl(priv); 1578int
1579gf100_gr_init_(struct nvkm_gr *base)
1580{
1581 struct gf100_gr *gr = gf100_gr(base);
1582 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
1583 return gr->func->init(gr);
1538} 1584}
1539 1585
1540static void 1586void
1541gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) 1587gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
1542{ 1588{
1543 kfree(fuc->data); 1589 kfree(fuc->data);
1544 fuc->data = NULL; 1590 fuc->data = NULL;
1545} 1591}
1546 1592
1593void *
1594gf100_gr_dtor(struct nvkm_gr *base)
1595{
1596 struct gf100_gr *gr = gf100_gr(base);
1597
1598 if (gr->func->dtor)
1599 gr->func->dtor(gr);
1600 kfree(gr->data);
1601
1602 gf100_gr_dtor_fw(&gr->fuc409c);
1603 gf100_gr_dtor_fw(&gr->fuc409d);
1604 gf100_gr_dtor_fw(&gr->fuc41ac);
1605 gf100_gr_dtor_fw(&gr->fuc41ad);
1606
1607 nvkm_memory_del(&gr->unk4188b8);
1608 nvkm_memory_del(&gr->unk4188b4);
1609 return gr;
1610}
1611
1612static const struct nvkm_gr_func
1613gf100_gr_ = {
1614 .dtor = gf100_gr_dtor,
1615 .oneinit = gf100_gr_oneinit,
1616 .init = gf100_gr_init_,
1617 .intr = gf100_gr_intr,
1618 .units = gf100_gr_units,
1619 .chan_new = gf100_gr_chan_new,
1620 .object_get = gf100_gr_object_get,
1621};
1622
1547int 1623int
1548gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname, 1624gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
1549 struct gf100_gr_fuc *fuc) 1625 struct gf100_gr_fuc *fuc)
1550{ 1626{
1551 struct nvkm_device *device = nv_device(priv); 1627 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1628 struct nvkm_device *device = subdev->device;
1552 const struct firmware *fw; 1629 const struct firmware *fw;
1553 char f[32]; 1630 char f[64];
1631 char cname[16];
1554 int ret; 1632 int ret;
1633 int i;
1634
1635 /* Convert device name to lowercase */
1636 strncpy(cname, device->chip->name, sizeof(cname));
1637 cname[sizeof(cname) - 1] = '\0';
1638 i = strlen(cname);
1639 while (i) {
1640 --i;
1641 cname[i] = tolower(cname[i]);
1642 }
1555 1643
1556 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); 1644 snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
1557 ret = request_firmware(&fw, f, nv_device_base(device)); 1645 ret = request_firmware(&fw, f, device->dev);
1558 if (ret) { 1646 if (ret) {
1559 snprintf(f, sizeof(f), "nouveau/%s", fwname); 1647 nvkm_error(subdev, "failed to load %s\n", fwname);
1560 ret = request_firmware(&fw, f, nv_device_base(device)); 1648 return ret;
1561 if (ret) {
1562 nv_error(priv, "failed to load %s\n", fwname);
1563 return ret;
1564 }
1565 } 1649 }
1566 1650
1567 fuc->size = fw->size; 1651 fuc->size = fw->size;
@@ -1570,126 +1654,150 @@ gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname,
1570 return (fuc->data != NULL) ? 0 : -ENOMEM; 1654 return (fuc->data != NULL) ? 0 : -ENOMEM;
1571} 1655}
1572 1656
1573void 1657int
1574gf100_gr_dtor(struct nvkm_object *object) 1658gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
1659 int index, struct gf100_gr *gr)
1575{ 1660{
1576 struct gf100_gr_priv *priv = (void *)object; 1661 int ret;
1577 1662
1578 kfree(priv->data); 1663 gr->func = func;
1664 gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
1665 func->fecs.ucode == NULL);
1579 1666
1580 gf100_gr_dtor_fw(&priv->fuc409c); 1667 ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000,
1581 gf100_gr_dtor_fw(&priv->fuc409d); 1668 gr->firmware || func->fecs.ucode != NULL,
1582 gf100_gr_dtor_fw(&priv->fuc41ac); 1669 &gr->base);
1583 gf100_gr_dtor_fw(&priv->fuc41ad); 1670 if (ret)
1671 return ret;
1584 1672
1585 nvkm_gpuobj_ref(NULL, &priv->unk4188b8); 1673 if (gr->firmware) {
1586 nvkm_gpuobj_ref(NULL, &priv->unk4188b4); 1674 nvkm_info(&gr->base.engine.subdev, "using external firmware\n");
1675 if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
1676 gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
1677 gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
1678 gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
1679 return -ENODEV;
1680 }
1587 1681
1588 nvkm_gr_destroy(&priv->base); 1682 return 0;
1589} 1683}
1590 1684
1591int 1685int
1592gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1686gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
1593 struct nvkm_oclass *bclass, void *data, u32 size, 1687 int index, struct nvkm_gr **pgr)
1594 struct nvkm_object **pobject)
1595{ 1688{
1596 struct gf100_gr_oclass *oclass = (void *)bclass; 1689 struct gf100_gr *gr;
1597 struct nvkm_device *device = nv_device(parent); 1690 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
1598 struct gf100_gr_priv *priv; 1691 return -ENOMEM;
1599 bool use_ext_fw, enable; 1692 *pgr = &gr->base;
1600 int ret, i, j; 1693 return gf100_gr_ctor(func, device, index, gr);
1694}
1601 1695
1602 use_ext_fw = nvkm_boolopt(device->cfgopt, "NvGrUseFW", 1696int
1603 oclass->fecs.ucode == NULL); 1697gf100_gr_init(struct gf100_gr *gr)
1604 enable = use_ext_fw || oclass->fecs.ucode != NULL; 1698{
1699 struct nvkm_device *device = gr->base.engine.subdev.device;
1700 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
1701 u32 data[TPC_MAX / 8] = {};
1702 u8 tpcnr[GPC_MAX];
1703 int gpc, tpc, rop;
1704 int i;
1605 1705
1606 ret = nvkm_gr_create(parent, engine, bclass, enable, &priv); 1706 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
1607 *pobject = nv_object(priv); 1707 nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
1608 if (ret) 1708 nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
1609 return ret; 1709 nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
1710 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
1711 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
1712 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
1713 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
1610 1714
1611 nv_subdev(priv)->unit = 0x08001000; 1715 gf100_gr_mmio(gr, gr->func->mmio);
1612 nv_subdev(priv)->intr = gf100_gr_intr;
1613 1716
1614 priv->base.units = gf100_gr_units; 1717 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1718 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
1719 do {
1720 gpc = (gpc + 1) % gr->gpc_nr;
1721 } while (!tpcnr[gpc]);
1722 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
1615 1723
1616 if (use_ext_fw) { 1724 data[i / 8] |= tpc << ((i % 8) * 4);
1617 nv_info(priv, "using external firmware\n");
1618 if (gf100_gr_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
1619 gf100_gr_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
1620 gf100_gr_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
1621 gf100_gr_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
1622 return -ENODEV;
1623 priv->firmware = true;
1624 } 1725 }
1625 1726
1626 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0, 1727 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
1627 &priv->unk4188b4); 1728 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
1628 if (ret) 1729 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
1629 return ret; 1730 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
1630
1631 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
1632 &priv->unk4188b8);
1633 if (ret)
1634 return ret;
1635 1731
1636 for (i = 0; i < 0x1000; i += 4) { 1732 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1637 nv_wo32(priv->unk4188b4, i, 0x00000010); 1733 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
1638 nv_wo32(priv->unk4188b8, i, 0x00000010); 1734 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
1735 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
1736 gr->tpc_total);
1737 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
1639 } 1738 }
1640 1739
1641 priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16; 1740 if (device->chipset != 0xd7)
1642 priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f; 1741 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
1643 for (i = 0; i < priv->gpc_nr; i++) { 1742 else
1644 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608)); 1743 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
1645 priv->tpc_total += priv->tpc_nr[i]; 1744
1646 priv->ppc_nr[i] = oclass->ppc_nr; 1745 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
1647 for (j = 0; j < priv->ppc_nr[i]; j++) { 1746
1648 u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4))); 1747 nvkm_wr32(device, 0x400500, 0x00010001);
1649 priv->ppc_tpc_nr[i][j] = hweight8(mask); 1748
1749 nvkm_wr32(device, 0x400100, 0xffffffff);
1750 nvkm_wr32(device, 0x40013c, 0xffffffff);
1751
1752 nvkm_wr32(device, 0x409c24, 0x000f0000);
1753 nvkm_wr32(device, 0x404000, 0xc0000000);
1754 nvkm_wr32(device, 0x404600, 0xc0000000);
1755 nvkm_wr32(device, 0x408030, 0xc0000000);
1756 nvkm_wr32(device, 0x40601c, 0xc0000000);
1757 nvkm_wr32(device, 0x404490, 0xc0000000);
1758 nvkm_wr32(device, 0x406018, 0xc0000000);
1759 nvkm_wr32(device, 0x405840, 0xc0000000);
1760 nvkm_wr32(device, 0x405844, 0x00ffffff);
1761 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
1762 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
1763
1764 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1765 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
1766 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
1767 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
1768 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
1769 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
1770 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
1771 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
1772 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
1773 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
1774 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
1775 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
1776 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
1650 } 1777 }
1778 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
1779 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
1651 } 1780 }
1652 1781
1653 /*XXX: these need figuring out... though it might not even matter */ 1782 for (rop = 0; rop < gr->rop_nr; rop++) {
1654 switch (nv_device(priv)->chipset) { 1783 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
1655 case 0xc0: 1784 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
1656 if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */ 1785 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
1657 priv->magic_not_rop_nr = 0x07; 1786 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
1658 } else
1659 if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
1660 priv->magic_not_rop_nr = 0x05;
1661 } else
1662 if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
1663 priv->magic_not_rop_nr = 0x06;
1664 }
1665 break;
1666 case 0xc3: /* 450, 4/0/0/0, 2 */
1667 priv->magic_not_rop_nr = 0x03;
1668 break;
1669 case 0xc4: /* 460, 3/4/0/0, 4 */
1670 priv->magic_not_rop_nr = 0x01;
1671 break;
1672 case 0xc1: /* 2/0/0/0, 1 */
1673 priv->magic_not_rop_nr = 0x01;
1674 break;
1675 case 0xc8: /* 4/4/3/4, 5 */
1676 priv->magic_not_rop_nr = 0x06;
1677 break;
1678 case 0xce: /* 4/4/0/0, 4 */
1679 priv->magic_not_rop_nr = 0x03;
1680 break;
1681 case 0xcf: /* 4/0/0/0, 3 */
1682 priv->magic_not_rop_nr = 0x03;
1683 break;
1684 case 0xd7:
1685 case 0xd9: /* 1/0/0/0, 1 */
1686 priv->magic_not_rop_nr = 0x01;
1687 break;
1688 } 1787 }
1689 1788
1690 nv_engine(priv)->cclass = *oclass->cclass; 1789 nvkm_wr32(device, 0x400108, 0xffffffff);
1691 nv_engine(priv)->sclass = oclass->sclass; 1790 nvkm_wr32(device, 0x400138, 0xffffffff);
1692 return 0; 1791 nvkm_wr32(device, 0x400118, 0xffffffff);
1792 nvkm_wr32(device, 0x400130, 0xffffffff);
1793 nvkm_wr32(device, 0x40011c, 0xffffffff);
1794 nvkm_wr32(device, 0x400134, 0xffffffff);
1795
1796 nvkm_wr32(device, 0x400054, 0x34ce3464);
1797
1798 gf100_gr_zbc_init(gr);
1799
1800 return gf100_gr_init_ctxctl(gr);
1693} 1801}
1694 1802
1695#include "fuc/hubgf100.fuc3.h" 1803#include "fuc/hubgf100.fuc3.h"
@@ -1712,18 +1820,24 @@ gf100_gr_gpccs_ucode = {
1712 .data.size = sizeof(gf100_grgpc_data), 1820 .data.size = sizeof(gf100_grgpc_data),
1713}; 1821};
1714 1822
1715struct nvkm_oclass * 1823static const struct gf100_gr_func
1716gf100_gr_oclass = &(struct gf100_gr_oclass) { 1824gf100_gr = {
1717 .base.handle = NV_ENGINE(GR, 0xc0), 1825 .init = gf100_gr_init,
1718 .base.ofuncs = &(struct nvkm_ofuncs) {
1719 .ctor = gf100_gr_ctor,
1720 .dtor = gf100_gr_dtor,
1721 .init = gf100_gr_init,
1722 .fini = _nvkm_gr_fini,
1723 },
1724 .cclass = &gf100_grctx_oclass,
1725 .sclass = gf100_gr_sclass,
1726 .mmio = gf100_gr_pack_mmio, 1826 .mmio = gf100_gr_pack_mmio,
1727 .fecs.ucode = &gf100_gr_fecs_ucode, 1827 .fecs.ucode = &gf100_gr_fecs_ucode,
1728 .gpccs.ucode = &gf100_gr_gpccs_ucode, 1828 .gpccs.ucode = &gf100_gr_gpccs_ucode,
1729}.base; 1829 .grctx = &gf100_grctx,
1830 .sclass = {
1831 { -1, -1, FERMI_TWOD_A },
1832 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
1833 { -1, -1, FERMI_A, &gf100_fermi },
1834 { -1, -1, FERMI_COMPUTE_A },
1835 {}
1836 }
1837};
1838
1839int
1840gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
1841{
1842 return gf100_gr_new_(&gf100_gr, device, index, pgr);
1843}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c9533fdac4fc..4611961b1187 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -21,11 +21,14 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#ifndef __NVC0_GR_H__ 24#ifndef __GF100_GR_H__
25#define __NVC0_GR_H__ 25#define __GF100_GR_H__
26#include <engine/gr.h> 26#define gf100_gr(p) container_of((p), struct gf100_gr, base)
27#include "priv.h"
27 28
29#include <core/gpuobj.h>
28#include <subdev/ltc.h> 30#include <subdev/ltc.h>
31#include <subdev/mmu.h>
29 32
30#define GPC_MAX 32 33#define GPC_MAX 32
31#define TPC_MAX (GPC_MAX * 8) 34#define TPC_MAX (GPC_MAX * 8)
@@ -67,7 +70,8 @@ struct gf100_gr_zbc_depth {
67 u32 l2; 70 u32 l2;
68}; 71};
69 72
70struct gf100_gr_priv { 73struct gf100_gr {
74 const struct gf100_gr_func *func;
71 struct nvkm_gr base; 75 struct nvkm_gr base;
72 76
73 struct gf100_gr_fuc fuc409c; 77 struct gf100_gr_fuc fuc409c;
@@ -76,6 +80,15 @@ struct gf100_gr_priv {
76 struct gf100_gr_fuc fuc41ad; 80 struct gf100_gr_fuc fuc41ad;
77 bool firmware; 81 bool firmware;
78 82
83 /*
84 * Used if the register packs are loaded from NVIDIA fw instead of
85 * using hardcoded arrays.
86 */
87 struct gf100_gr_pack *fuc_sw_nonctx;
88 struct gf100_gr_pack *fuc_sw_ctx;
89 struct gf100_gr_pack *fuc_bundle;
90 struct gf100_gr_pack *fuc_method;
91
79 struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT]; 92 struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
80 struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT]; 93 struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
81 94
@@ -86,8 +99,8 @@ struct gf100_gr_priv {
86 u8 ppc_nr[GPC_MAX]; 99 u8 ppc_nr[GPC_MAX];
87 u8 ppc_tpc_nr[GPC_MAX][4]; 100 u8 ppc_tpc_nr[GPC_MAX][4];
88 101
89 struct nvkm_gpuobj *unk4188b4; 102 struct nvkm_memory *unk4188b4;
90 struct nvkm_gpuobj *unk4188b8; 103 struct nvkm_memory *unk4188b8;
91 104
92 struct gf100_gr_data mmio_data[4]; 105 struct gf100_gr_data mmio_data[4];
93 struct gf100_gr_mmio mmio_list[4096/8]; 106 struct gf100_gr_mmio mmio_list[4096/8];
@@ -97,48 +110,65 @@ struct gf100_gr_priv {
97 u8 magic_not_rop_nr; 110 u8 magic_not_rop_nr;
98}; 111};
99 112
113int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
114 int, struct gf100_gr *);
115int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
116 int, struct nvkm_gr **);
117void *gf100_gr_dtor(struct nvkm_gr *);
118
119struct gf100_gr_func {
120 void (*dtor)(struct gf100_gr *);
121 int (*init)(struct gf100_gr *);
122 void (*init_gpc_mmu)(struct gf100_gr *);
123 void (*set_hww_esr_report_mask)(struct gf100_gr *);
124 const struct gf100_gr_pack *mmio;
125 struct {
126 struct gf100_gr_ucode *ucode;
127 } fecs;
128 struct {
129 struct gf100_gr_ucode *ucode;
130 } gpccs;
131 int ppc_nr;
132 const struct gf100_grctx_func *grctx;
133 struct nvkm_sclass sclass[];
134};
135
136int gf100_gr_init(struct gf100_gr *);
137
138int gk104_gr_init(struct gf100_gr *);
139
140int gk20a_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
141 int, struct nvkm_gr **);
142void gk20a_gr_dtor(struct gf100_gr *);
143int gk20a_gr_init(struct gf100_gr *);
144
145int gm204_gr_init(struct gf100_gr *);
146
147#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
148
100struct gf100_gr_chan { 149struct gf100_gr_chan {
101 struct nvkm_gr_chan base; 150 struct nvkm_object object;
151 struct gf100_gr *gr;
102 152
103 struct nvkm_gpuobj *mmio; 153 struct nvkm_memory *mmio;
104 struct nvkm_vma mmio_vma; 154 struct nvkm_vma mmio_vma;
105 int mmio_nr; 155 int mmio_nr;
156
106 struct { 157 struct {
107 struct nvkm_gpuobj *mem; 158 struct nvkm_memory *mem;
108 struct nvkm_vma vma; 159 struct nvkm_vma vma;
109 } data[4]; 160 } data[4];
110}; 161};
111 162
112int gf100_gr_context_ctor(struct nvkm_object *, struct nvkm_object *, 163void gf100_gr_ctxctl_debug(struct gf100_gr *);
113 struct nvkm_oclass *, void *, u32,
114 struct nvkm_object **);
115void gf100_gr_context_dtor(struct nvkm_object *);
116
117void gf100_gr_ctxctl_debug(struct gf100_gr_priv *);
118 164
165void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
166int gf100_gr_ctor_fw(struct gf100_gr *, const char *,
167 struct gf100_gr_fuc *);
119u64 gf100_gr_units(struct nvkm_gr *); 168u64 gf100_gr_units(struct nvkm_gr *);
120int gf100_gr_ctor(struct nvkm_object *, struct nvkm_object *, 169void gf100_gr_zbc_init(struct gf100_gr *);
121 struct nvkm_oclass *, void *data, u32 size,
122 struct nvkm_object **);
123void gf100_gr_dtor(struct nvkm_object *);
124int gf100_gr_init(struct nvkm_object *);
125void gf100_gr_zbc_init(struct gf100_gr_priv *);
126
127int gk104_gr_ctor(struct nvkm_object *, struct nvkm_object *,
128 struct nvkm_oclass *, void *data, u32 size,
129 struct nvkm_object **);
130int gk104_gr_init(struct nvkm_object *);
131
132int gm204_gr_init(struct nvkm_object *);
133 170
134extern struct nvkm_ofuncs gf100_fermi_ofuncs; 171extern const struct nvkm_object_func gf100_fermi;
135
136extern struct nvkm_oclass gf100_gr_sclass[];
137extern struct nvkm_omthds gf100_gr_9097_omthds[];
138extern struct nvkm_omthds gf100_gr_90c0_omthds[];
139extern struct nvkm_oclass gf110_gr_sclass[];
140extern struct nvkm_oclass gk110_gr_sclass[];
141extern struct nvkm_oclass gm204_gr_sclass[];
142 172
143struct gf100_gr_init { 173struct gf100_gr_init {
144 u32 addr; 174 u32 addr;
@@ -167,25 +197,11 @@ extern struct gf100_gr_ucode gf100_gr_gpccs_ucode;
167extern struct gf100_gr_ucode gk110_gr_fecs_ucode; 197extern struct gf100_gr_ucode gk110_gr_fecs_ucode;
168extern struct gf100_gr_ucode gk110_gr_gpccs_ucode; 198extern struct gf100_gr_ucode gk110_gr_gpccs_ucode;
169 199
170struct gf100_gr_oclass { 200int gf100_gr_wait_idle(struct gf100_gr *);
171 struct nvkm_oclass base; 201void gf100_gr_mmio(struct gf100_gr *, const struct gf100_gr_pack *);
172 struct nvkm_oclass **cclass; 202void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
173 struct nvkm_oclass *sclass; 203void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
174 const struct gf100_gr_pack *mmio; 204int gf100_gr_init_ctxctl(struct gf100_gr *);
175 struct {
176 struct gf100_gr_ucode *ucode;
177 } fecs;
178 struct {
179 struct gf100_gr_ucode *ucode;
180 } gpccs;
181 int ppc_nr;
182};
183
184int gf100_gr_wait_idle(struct gf100_gr_priv *);
185void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
186void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
187void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
188int gf100_gr_init_ctxctl(struct gf100_gr_priv *);
189 205
190/* register init value lists */ 206/* register init value lists */
191 207
@@ -261,7 +277,7 @@ extern const struct gf100_gr_init gm107_gr_init_tex_0[];
261extern const struct gf100_gr_init gm107_gr_init_l1c_0[]; 277extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
262extern const struct gf100_gr_init gm107_gr_init_wwdx_0[]; 278extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
263extern const struct gf100_gr_init gm107_gr_init_cbm_0[]; 279extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
264void gm107_gr_init_bios(struct gf100_gr_priv *); 280void gm107_gr_init_bios(struct gf100_gr *);
265 281
266extern const struct gf100_gr_pack gm204_gr_pack_mmio[]; 282extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
267#endif 283#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 20d3b85db3b5..8f253e0a22f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -24,6 +24,8 @@
24#include "gf100.h" 24#include "gf100.h"
25#include "ctxgf100.h" 25#include "ctxgf100.h"
26 26
27#include <nvif/class.h>
28
27/******************************************************************************* 29/*******************************************************************************
28 * PGRAPH register lists 30 * PGRAPH register lists
29 ******************************************************************************/ 31 ******************************************************************************/
@@ -110,18 +112,24 @@ gf104_gr_pack_mmio[] = {
110 * PGRAPH engine/subdev functions 112 * PGRAPH engine/subdev functions
111 ******************************************************************************/ 113 ******************************************************************************/
112 114
113struct nvkm_oclass * 115static const struct gf100_gr_func
114gf104_gr_oclass = &(struct gf100_gr_oclass) { 116gf104_gr = {
115 .base.handle = NV_ENGINE(GR, 0xc3), 117 .init = gf100_gr_init,
116 .base.ofuncs = &(struct nvkm_ofuncs) {
117 .ctor = gf100_gr_ctor,
118 .dtor = gf100_gr_dtor,
119 .init = gf100_gr_init,
120 .fini = _nvkm_gr_fini,
121 },
122 .cclass = &gf104_grctx_oclass,
123 .sclass = gf100_gr_sclass,
124 .mmio = gf104_gr_pack_mmio, 118 .mmio = gf104_gr_pack_mmio,
125 .fecs.ucode = &gf100_gr_fecs_ucode, 119 .fecs.ucode = &gf100_gr_fecs_ucode,
126 .gpccs.ucode = &gf100_gr_gpccs_ucode, 120 .gpccs.ucode = &gf100_gr_gpccs_ucode,
127}.base; 121 .grctx = &gf104_grctx,
122 .sclass = {
123 { -1, -1, FERMI_TWOD_A },
124 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
125 { -1, -1, FERMI_A, &gf100_fermi },
126 { -1, -1, FERMI_COMPUTE_A },
127 {}
128 }
129};
130
131int
132gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
133{
134 return gf100_gr_new_(&gf104_gr, device, index, pgr);
135}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 8df73421c78c..815a5aafa245 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -27,20 +27,6 @@
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29/******************************************************************************* 29/*******************************************************************************
30 * Graphics object classes
31 ******************************************************************************/
32
33static struct nvkm_oclass
34gf108_gr_sclass[] = {
35 { FERMI_TWOD_A, &nvkm_object_ofuncs },
36 { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
37 { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
38 { FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
39 { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
40 {}
41};
42
43/*******************************************************************************
44 * PGRAPH register lists 30 * PGRAPH register lists
45 ******************************************************************************/ 31 ******************************************************************************/
46 32
@@ -117,18 +103,25 @@ gf108_gr_pack_mmio[] = {
117 * PGRAPH engine/subdev functions 103 * PGRAPH engine/subdev functions
118 ******************************************************************************/ 104 ******************************************************************************/
119 105
120struct nvkm_oclass * 106static const struct gf100_gr_func
121gf108_gr_oclass = &(struct gf100_gr_oclass) { 107gf108_gr = {
122 .base.handle = NV_ENGINE(GR, 0xc1), 108 .init = gf100_gr_init,
123 .base.ofuncs = &(struct nvkm_ofuncs) {
124 .ctor = gf100_gr_ctor,
125 .dtor = gf100_gr_dtor,
126 .init = gf100_gr_init,
127 .fini = _nvkm_gr_fini,
128 },
129 .cclass = &gf108_grctx_oclass,
130 .sclass = gf108_gr_sclass,
131 .mmio = gf108_gr_pack_mmio, 109 .mmio = gf108_gr_pack_mmio,
132 .fecs.ucode = &gf100_gr_fecs_ucode, 110 .fecs.ucode = &gf100_gr_fecs_ucode,
133 .gpccs.ucode = &gf100_gr_gpccs_ucode, 111 .gpccs.ucode = &gf100_gr_gpccs_ucode,
134}.base; 112 .grctx = &gf108_grctx,
113 .sclass = {
114 { -1, -1, FERMI_TWOD_A },
115 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
116 { -1, -1, FERMI_A, &gf100_fermi },
117 { -1, -1, FERMI_B, &gf100_fermi },
118 { -1, -1, FERMI_COMPUTE_A },
119 {}
120 }
121};
122
123int
124gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
125{
126 return gf100_gr_new_(&gf108_gr, device, index, pgr);
127}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index ef76e2dd1d31..d13187409d68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -27,21 +27,6 @@
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29/******************************************************************************* 29/*******************************************************************************
30 * Graphics object classes
31 ******************************************************************************/
32
33struct nvkm_oclass
34gf110_gr_sclass[] = {
35 { FERMI_TWOD_A, &nvkm_object_ofuncs },
36 { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
37 { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
38 { FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
39 { FERMI_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
40 { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
41 {}
42};
43
44/*******************************************************************************
45 * PGRAPH register lists 30 * PGRAPH register lists
46 ******************************************************************************/ 31 ******************************************************************************/
47 32
@@ -99,18 +84,26 @@ gf110_gr_pack_mmio[] = {
99 * PGRAPH engine/subdev functions 84 * PGRAPH engine/subdev functions
100 ******************************************************************************/ 85 ******************************************************************************/
101 86
102struct nvkm_oclass * 87static const struct gf100_gr_func
103gf110_gr_oclass = &(struct gf100_gr_oclass) { 88gf110_gr = {
104 .base.handle = NV_ENGINE(GR, 0xc8), 89 .init = gf100_gr_init,
105 .base.ofuncs = &(struct nvkm_ofuncs) {
106 .ctor = gf100_gr_ctor,
107 .dtor = gf100_gr_dtor,
108 .init = gf100_gr_init,
109 .fini = _nvkm_gr_fini,
110 },
111 .cclass = &gf110_grctx_oclass,
112 .sclass = gf110_gr_sclass,
113 .mmio = gf110_gr_pack_mmio, 90 .mmio = gf110_gr_pack_mmio,
114 .fecs.ucode = &gf100_gr_fecs_ucode, 91 .fecs.ucode = &gf100_gr_fecs_ucode,
115 .gpccs.ucode = &gf100_gr_gpccs_ucode, 92 .gpccs.ucode = &gf100_gr_gpccs_ucode,
116}.base; 93 .grctx = &gf110_grctx,
94 .sclass = {
95 { -1, -1, FERMI_TWOD_A },
96 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
97 { -1, -1, FERMI_A, &gf100_fermi },
98 { -1, -1, FERMI_B, &gf100_fermi },
99 { -1, -1, FERMI_C, &gf100_fermi },
100 { -1, -1, FERMI_COMPUTE_A },
101 {}
102 }
103};
104
105int
106gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
107{
108 return gf100_gr_new_(&gf110_gr, device, index, pgr);
109}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 871ac5f806f6..28483d8bf3d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -24,6 +24,8 @@
24#include "gf100.h" 24#include "gf100.h"
25#include "ctxgf100.h" 25#include "ctxgf100.h"
26 26
27#include <nvif/class.h>
28
27/******************************************************************************* 29/*******************************************************************************
28 * PGRAPH register lists 30 * PGRAPH register lists
29 ******************************************************************************/ 31 ******************************************************************************/
@@ -118,19 +120,27 @@ gf117_gr_gpccs_ucode = {
118 .data.size = sizeof(gf117_grgpc_data), 120 .data.size = sizeof(gf117_grgpc_data),
119}; 121};
120 122
121struct nvkm_oclass * 123static const struct gf100_gr_func
122gf117_gr_oclass = &(struct gf100_gr_oclass) { 124gf117_gr = {
123 .base.handle = NV_ENGINE(GR, 0xd7), 125 .init = gf100_gr_init,
124 .base.ofuncs = &(struct nvkm_ofuncs) {
125 .ctor = gf100_gr_ctor,
126 .dtor = gf100_gr_dtor,
127 .init = gf100_gr_init,
128 .fini = _nvkm_gr_fini,
129 },
130 .cclass = &gf117_grctx_oclass,
131 .sclass = gf110_gr_sclass,
132 .mmio = gf117_gr_pack_mmio, 126 .mmio = gf117_gr_pack_mmio,
133 .fecs.ucode = &gf117_gr_fecs_ucode, 127 .fecs.ucode = &gf117_gr_fecs_ucode,
134 .gpccs.ucode = &gf117_gr_gpccs_ucode, 128 .gpccs.ucode = &gf117_gr_gpccs_ucode,
135 .ppc_nr = 1, 129 .ppc_nr = 1,
136}.base; 130 .grctx = &gf117_grctx,
131 .sclass = {
132 { -1, -1, FERMI_TWOD_A },
133 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
134 { -1, -1, FERMI_A, &gf100_fermi },
135 { -1, -1, FERMI_B, &gf100_fermi },
136 { -1, -1, FERMI_C, &gf100_fermi },
137 { -1, -1, FERMI_COMPUTE_A },
138 {}
139 }
140};
141
142int
143gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
144{
145 return gf100_gr_new_(&gf117_gr, device, index, pgr);
146}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index e6dd651e2636..9811a72e0313 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -24,6 +24,8 @@
24#include "gf100.h" 24#include "gf100.h"
25#include "ctxgf100.h" 25#include "ctxgf100.h"
26 26
27#include <nvif/class.h>
28
27/******************************************************************************* 29/*******************************************************************************
28 * PGRAPH register lists 30 * PGRAPH register lists
29 ******************************************************************************/ 31 ******************************************************************************/
@@ -173,18 +175,26 @@ gf119_gr_pack_mmio[] = {
173 * PGRAPH engine/subdev functions 175 * PGRAPH engine/subdev functions
174 ******************************************************************************/ 176 ******************************************************************************/
175 177
176struct nvkm_oclass * 178static const struct gf100_gr_func
177gf119_gr_oclass = &(struct gf100_gr_oclass) { 179gf119_gr = {
178 .base.handle = NV_ENGINE(GR, 0xd9), 180 .init = gf100_gr_init,
179 .base.ofuncs = &(struct nvkm_ofuncs) {
180 .ctor = gf100_gr_ctor,
181 .dtor = gf100_gr_dtor,
182 .init = gf100_gr_init,
183 .fini = _nvkm_gr_fini,
184 },
185 .cclass = &gf119_grctx_oclass,
186 .sclass = gf110_gr_sclass,
187 .mmio = gf119_gr_pack_mmio, 181 .mmio = gf119_gr_pack_mmio,
188 .fecs.ucode = &gf100_gr_fecs_ucode, 182 .fecs.ucode = &gf100_gr_fecs_ucode,
189 .gpccs.ucode = &gf100_gr_gpccs_ucode, 183 .gpccs.ucode = &gf100_gr_gpccs_ucode,
190}.base; 184 .grctx = &gf119_grctx,
185 .sclass = {
186 { -1, -1, FERMI_TWOD_A },
187 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
188 { -1, -1, FERMI_A, &gf100_fermi },
189 { -1, -1, FERMI_B, &gf100_fermi },
190 { -1, -1, FERMI_C, &gf100_fermi },
191 { -1, -1, FERMI_COMPUTE_A },
192 {}
193 }
194};
195
196int
197gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
198{
199 return gf100_gr_new_(&gf119_gr, device, index, pgr);
200}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 46f7844eca70..abf54928a1a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,24 +24,9 @@
24#include "gf100.h" 24#include "gf100.h"
25#include "ctxgf100.h" 25#include "ctxgf100.h"
26 26
27#include <subdev/pmu.h>
28
29#include <nvif/class.h> 27#include <nvif/class.h>
30 28
31/******************************************************************************* 29/*******************************************************************************
32 * Graphics object classes
33 ******************************************************************************/
34
35static struct nvkm_oclass
36gk104_gr_sclass[] = {
37 { FERMI_TWOD_A, &nvkm_object_ofuncs },
38 { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
39 { KEPLER_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
40 { KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
41 {}
42};
43
44/*******************************************************************************
45 * PGRAPH register lists 30 * PGRAPH register lists
46 ******************************************************************************/ 31 ******************************************************************************/
47 32
@@ -193,132 +178,112 @@ gk104_gr_pack_mmio[] = {
193 ******************************************************************************/ 178 ******************************************************************************/
194 179
195int 180int
196gk104_gr_init(struct nvkm_object *object) 181gk104_gr_init(struct gf100_gr *gr)
197{ 182{
198 struct gf100_gr_oclass *oclass = (void *)object->oclass; 183 struct nvkm_device *device = gr->base.engine.subdev.device;
199 struct gf100_gr_priv *priv = (void *)object; 184 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
200 struct nvkm_pmu *pmu = nvkm_pmu(priv);
201 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
202 u32 data[TPC_MAX / 8] = {}; 185 u32 data[TPC_MAX / 8] = {};
203 u8 tpcnr[GPC_MAX]; 186 u8 tpcnr[GPC_MAX];
204 int gpc, tpc, rop; 187 int gpc, tpc, rop;
205 int ret, i; 188 int i;
206
207 if (pmu)
208 pmu->pgob(pmu, false);
209 189
210 ret = nvkm_gr_init(&priv->base); 190 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
211 if (ret) 191 nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
212 return ret; 192 nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
193 nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
194 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
195 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
196 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
197 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
213 198
214 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000); 199 gf100_gr_mmio(gr, gr->func->mmio);
215 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
216 nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
217 nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
218 nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
219 nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
220 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
221 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
222 200
223 gf100_gr_mmio(priv, oclass->mmio); 201 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
224
225 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
226 202
227 memset(data, 0x00, sizeof(data)); 203 memset(data, 0x00, sizeof(data));
228 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 204 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
229 for (i = 0, gpc = -1; i < priv->tpc_total; i++) { 205 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
230 do { 206 do {
231 gpc = (gpc + 1) % priv->gpc_nr; 207 gpc = (gpc + 1) % gr->gpc_nr;
232 } while (!tpcnr[gpc]); 208 } while (!tpcnr[gpc]);
233 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; 209 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
234 210
235 data[i / 8] |= tpc << ((i % 8) * 4); 211 data[i / 8] |= tpc << ((i % 8) * 4);
236 } 212 }
237 213
238 nv_wr32(priv, GPC_BCAST(0x0980), data[0]); 214 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
239 nv_wr32(priv, GPC_BCAST(0x0984), data[1]); 215 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
240 nv_wr32(priv, GPC_BCAST(0x0988), data[2]); 216 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
241 nv_wr32(priv, GPC_BCAST(0x098c), data[3]); 217 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
242 218
243 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 219 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
244 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), 220 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
245 priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]); 221 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
246 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | 222 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
247 priv->tpc_total); 223 gr->tpc_total);
248 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); 224 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
249 } 225 }
250 226
251 nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918); 227 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
252 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); 228 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
253 229
254 nv_wr32(priv, 0x400500, 0x00010001); 230 nvkm_wr32(device, 0x400500, 0x00010001);
255 231
256 nv_wr32(priv, 0x400100, 0xffffffff); 232 nvkm_wr32(device, 0x400100, 0xffffffff);
257 nv_wr32(priv, 0x40013c, 0xffffffff); 233 nvkm_wr32(device, 0x40013c, 0xffffffff);
258 234
259 nv_wr32(priv, 0x409ffc, 0x00000000); 235 nvkm_wr32(device, 0x409ffc, 0x00000000);
260 nv_wr32(priv, 0x409c14, 0x00003e3e); 236 nvkm_wr32(device, 0x409c14, 0x00003e3e);
261 nv_wr32(priv, 0x409c24, 0x000f0001); 237 nvkm_wr32(device, 0x409c24, 0x000f0001);
262 nv_wr32(priv, 0x404000, 0xc0000000); 238 nvkm_wr32(device, 0x404000, 0xc0000000);
263 nv_wr32(priv, 0x404600, 0xc0000000); 239 nvkm_wr32(device, 0x404600, 0xc0000000);
264 nv_wr32(priv, 0x408030, 0xc0000000); 240 nvkm_wr32(device, 0x408030, 0xc0000000);
265 nv_wr32(priv, 0x404490, 0xc0000000); 241 nvkm_wr32(device, 0x404490, 0xc0000000);
266 nv_wr32(priv, 0x406018, 0xc0000000); 242 nvkm_wr32(device, 0x406018, 0xc0000000);
267 nv_wr32(priv, 0x407020, 0x40000000); 243 nvkm_wr32(device, 0x407020, 0x40000000);
268 nv_wr32(priv, 0x405840, 0xc0000000); 244 nvkm_wr32(device, 0x405840, 0xc0000000);
269 nv_wr32(priv, 0x405844, 0x00ffffff); 245 nvkm_wr32(device, 0x405844, 0x00ffffff);
270 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); 246 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
271 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000); 247 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
272 248
273 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 249 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
274 nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000); 250 nvkm_wr32(device, GPC_UNIT(gpc, 0x3038), 0xc0000000);
275 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); 251 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
276 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); 252 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
277 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); 253 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
278 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); 254 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
279 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 255 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
280 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 256 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
281 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 257 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
282 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 258 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
283 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 259 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
284 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 260 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
285 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); 261 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
286 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); 262 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
287 } 263 }
288 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 264 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
289 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 265 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
290 } 266 }
291 267
292 for (rop = 0; rop < priv->rop_nr; rop++) { 268 for (rop = 0; rop < gr->rop_nr; rop++) {
293 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); 269 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
294 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); 270 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
295 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff); 271 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
296 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff); 272 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
297 } 273 }
298 274
299 nv_wr32(priv, 0x400108, 0xffffffff); 275 nvkm_wr32(device, 0x400108, 0xffffffff);
300 nv_wr32(priv, 0x400138, 0xffffffff); 276 nvkm_wr32(device, 0x400138, 0xffffffff);
301 nv_wr32(priv, 0x400118, 0xffffffff); 277 nvkm_wr32(device, 0x400118, 0xffffffff);
302 nv_wr32(priv, 0x400130, 0xffffffff); 278 nvkm_wr32(device, 0x400130, 0xffffffff);
303 nv_wr32(priv, 0x40011c, 0xffffffff); 279 nvkm_wr32(device, 0x40011c, 0xffffffff);
304 nv_wr32(priv, 0x400134, 0xffffffff); 280 nvkm_wr32(device, 0x400134, 0xffffffff);
305 281
306 nv_wr32(priv, 0x400054, 0x34ce3464); 282 nvkm_wr32(device, 0x400054, 0x34ce3464);
307 283
308 gf100_gr_zbc_init(priv); 284 gf100_gr_zbc_init(gr);
309 285
310 return gf100_gr_init_ctxctl(priv); 286 return gf100_gr_init_ctxctl(gr);
311}
312
313int
314gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
315 struct nvkm_oclass *oclass, void *data, u32 size,
316 struct nvkm_object **pobject)
317{
318 struct nvkm_pmu *pmu = nvkm_pmu(parent);
319 if (pmu)
320 pmu->pgob(pmu, false);
321 return gf100_gr_ctor(parent, engine, oclass, data, size, pobject);
322} 287}
323 288
324#include "fuc/hubgk104.fuc3.h" 289#include "fuc/hubgk104.fuc3.h"
@@ -341,19 +306,25 @@ gk104_gr_gpccs_ucode = {
341 .data.size = sizeof(gk104_grgpc_data), 306 .data.size = sizeof(gk104_grgpc_data),
342}; 307};
343 308
344struct nvkm_oclass * 309static const struct gf100_gr_func
345gk104_gr_oclass = &(struct gf100_gr_oclass) { 310gk104_gr = {
346 .base.handle = NV_ENGINE(GR, 0xe4), 311 .init = gk104_gr_init,
347 .base.ofuncs = &(struct nvkm_ofuncs) {
348 .ctor = gk104_gr_ctor,
349 .dtor = gf100_gr_dtor,
350 .init = gk104_gr_init,
351 .fini = _nvkm_gr_fini,
352 },
353 .cclass = &gk104_grctx_oclass,
354 .sclass = gk104_gr_sclass,
355 .mmio = gk104_gr_pack_mmio, 312 .mmio = gk104_gr_pack_mmio,
356 .fecs.ucode = &gk104_gr_fecs_ucode, 313 .fecs.ucode = &gk104_gr_fecs_ucode,
357 .gpccs.ucode = &gk104_gr_gpccs_ucode, 314 .gpccs.ucode = &gk104_gr_gpccs_ucode,
358 .ppc_nr = 1, 315 .ppc_nr = 1,
359}.base; 316 .grctx = &gk104_grctx,
317 .sclass = {
318 { -1, -1, FERMI_TWOD_A },
319 { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
320 { -1, -1, KEPLER_A, &gf100_fermi },
321 { -1, -1, KEPLER_COMPUTE_A },
322 {}
323 }
324};
325
326int
327gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
328{
329 return gf100_gr_new_(&gk104_gr, device, index, pgr);
330}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f4cd8e5546af..32aa2946e7b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -29,19 +29,6 @@
29#include <nvif/class.h> 29#include <nvif/class.h>
30 30
31/******************************************************************************* 31/*******************************************************************************
32 * Graphics object classes
33 ******************************************************************************/
34
35struct nvkm_oclass
36gk110_gr_sclass[] = {
37 { FERMI_TWOD_A, &nvkm_object_ofuncs },
38 { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
39 { KEPLER_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
40 { KEPLER_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
41 {}
42};
43
44/*******************************************************************************
45 * PGRAPH register lists 32 * PGRAPH register lists
46 ******************************************************************************/ 33 ******************************************************************************/
47 34
@@ -193,19 +180,25 @@ gk110_gr_gpccs_ucode = {
193 .data.size = sizeof(gk110_grgpc_data), 180 .data.size = sizeof(gk110_grgpc_data),
194}; 181};
195 182
196struct nvkm_oclass * 183static const struct gf100_gr_func
197gk110_gr_oclass = &(struct gf100_gr_oclass) { 184gk110_gr = {
198 .base.handle = NV_ENGINE(GR, 0xf0), 185 .init = gk104_gr_init,
199 .base.ofuncs = &(struct nvkm_ofuncs) {
200 .ctor = gk104_gr_ctor,
201 .dtor = gf100_gr_dtor,
202 .init = gk104_gr_init,
203 .fini = _nvkm_gr_fini,
204 },
205 .cclass = &gk110_grctx_oclass,
206 .sclass = gk110_gr_sclass,
207 .mmio = gk110_gr_pack_mmio, 186 .mmio = gk110_gr_pack_mmio,
208 .fecs.ucode = &gk110_gr_fecs_ucode, 187 .fecs.ucode = &gk110_gr_fecs_ucode,
209 .gpccs.ucode = &gk110_gr_gpccs_ucode, 188 .gpccs.ucode = &gk110_gr_gpccs_ucode,
210 .ppc_nr = 2, 189 .ppc_nr = 2,
211}.base; 190 .grctx = &gk110_grctx,
191 .sclass = {
192 { -1, -1, FERMI_TWOD_A },
193 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
194 { -1, -1, KEPLER_B, &gf100_fermi },
195 { -1, -1, KEPLER_COMPUTE_B },
196 {}
197 }
198};
199
200int
201gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
202{
203 return gf100_gr_new_(&gk110_gr, device, index, pgr);
204}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 9ff9eab0ccaf..22f88afbf35f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -24,6 +24,8 @@
24#include "gf100.h" 24#include "gf100.h"
25#include "ctxgf100.h" 25#include "ctxgf100.h"
26 26
27#include <nvif/class.h>
28
27/******************************************************************************* 29/*******************************************************************************
28 * PGRAPH register lists 30 * PGRAPH register lists
29 ******************************************************************************/ 31 ******************************************************************************/
@@ -98,19 +100,25 @@ gk110b_gr_pack_mmio[] = {
98 * PGRAPH engine/subdev functions 100 * PGRAPH engine/subdev functions
99 ******************************************************************************/ 101 ******************************************************************************/
100 102
101struct nvkm_oclass * 103static const struct gf100_gr_func
102gk110b_gr_oclass = &(struct gf100_gr_oclass) { 104gk110b_gr = {
103 .base.handle = NV_ENGINE(GR, 0xf1), 105 .init = gk104_gr_init,
104 .base.ofuncs = &(struct nvkm_ofuncs) {
105 .ctor = gk104_gr_ctor,
106 .dtor = gf100_gr_dtor,
107 .init = gk104_gr_init,
108 .fini = _nvkm_gr_fini,
109 },
110 .cclass = &gk110b_grctx_oclass,
111 .sclass = gk110_gr_sclass,
112 .mmio = gk110b_gr_pack_mmio, 106 .mmio = gk110b_gr_pack_mmio,
113 .fecs.ucode = &gk110_gr_fecs_ucode, 107 .fecs.ucode = &gk110_gr_fecs_ucode,
114 .gpccs.ucode = &gk110_gr_gpccs_ucode, 108 .gpccs.ucode = &gk110_gr_gpccs_ucode,
115 .ppc_nr = 2, 109 .ppc_nr = 2,
116}.base; 110 .grctx = &gk110b_grctx,
111 .sclass = {
112 { -1, -1, FERMI_TWOD_A },
113 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
114 { -1, -1, KEPLER_B, &gf100_fermi },
115 { -1, -1, KEPLER_COMPUTE_B },
116 {}
117 }
118};
119
120int
121gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
122{
123 return gf100_gr_new_(&gk110b_gr, device, index, pgr);
124}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 85f44a3d5d11..ee7554fc87dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -29,19 +29,6 @@
29#include <nvif/class.h> 29#include <nvif/class.h>
30 30
31/******************************************************************************* 31/*******************************************************************************
32 * Graphics object classes
33 ******************************************************************************/
34
35static struct nvkm_oclass
36gk208_gr_sclass[] = {
37 { FERMI_TWOD_A, &nvkm_object_ofuncs },
38 { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
39 { KEPLER_B, &gf100_fermi_ofuncs },
40 { KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
41 {}
42};
43
44/*******************************************************************************
45 * PGRAPH register lists 32 * PGRAPH register lists
46 ******************************************************************************/ 33 ******************************************************************************/
47 34
@@ -172,19 +159,25 @@ gk208_gr_gpccs_ucode = {
172 .data.size = sizeof(gk208_grgpc_data), 159 .data.size = sizeof(gk208_grgpc_data),
173}; 160};
174 161
175struct nvkm_oclass * 162static const struct gf100_gr_func
176gk208_gr_oclass = &(struct gf100_gr_oclass) { 163gk208_gr = {
177 .base.handle = NV_ENGINE(GR, 0x08), 164 .init = gk104_gr_init,
178 .base.ofuncs = &(struct nvkm_ofuncs) {
179 .ctor = gk104_gr_ctor,
180 .dtor = gf100_gr_dtor,
181 .init = gk104_gr_init,
182 .fini = _nvkm_gr_fini,
183 },
184 .cclass = &gk208_grctx_oclass,
185 .sclass = gk208_gr_sclass,
186 .mmio = gk208_gr_pack_mmio, 165 .mmio = gk208_gr_pack_mmio,
187 .fecs.ucode = &gk208_gr_fecs_ucode, 166 .fecs.ucode = &gk208_gr_fecs_ucode,
188 .gpccs.ucode = &gk208_gr_gpccs_ucode, 167 .gpccs.ucode = &gk208_gr_gpccs_ucode,
189 .ppc_nr = 1, 168 .ppc_nr = 1,
190}.base; 169 .grctx = &gk208_grctx,
170 .sclass = {
171 { -1, -1, FERMI_TWOD_A },
172 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
173 { -1, -1, KEPLER_B, &gf100_fermi },
174 { -1, -1, KEPLER_COMPUTE_B },
175 {}
176 }
177};
178
179int
180gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
181{
182 return gf100_gr_new_(&gk208_gr, device, index, pgr);
183}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 40ff5eb9180c..b8758d3b8b51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,28 +22,335 @@
22#include "gf100.h" 22#include "gf100.h"
23#include "ctxgf100.h" 23#include "ctxgf100.h"
24 24
25#include <subdev/timer.h>
26
25#include <nvif/class.h> 27#include <nvif/class.h>
26 28
27static struct nvkm_oclass 29static void
28gk20a_gr_sclass[] = { 30gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
29 { FERMI_TWOD_A, &nvkm_object_ofuncs }, 31{
30 { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs }, 32 vfree(pack);
31 { KEPLER_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds }, 33}
32 { KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds }, 34
33 {} 35struct gk20a_fw_av
36{
37 u32 addr;
38 u32 data;
39};
40
41static struct gf100_gr_pack *
42gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
43{
44 struct gf100_gr_init *init;
45 struct gf100_gr_pack *pack;
46 const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
47 int i;
48
49 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
50 if (!pack)
51 return ERR_PTR(-ENOMEM);
52
53 init = (void *)(pack + 2);
54
55 pack[0].init = init;
56
57 for (i = 0; i < nent; i++) {
58 struct gf100_gr_init *ent = &init[i];
59 struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
60
61 ent->addr = av->addr;
62 ent->data = av->data;
63 ent->count = 1;
64 ent->pitch = 1;
65 }
66
67 return pack;
68}
69
70struct gk20a_fw_aiv
71{
72 u32 addr;
73 u32 index;
74 u32 data;
34}; 75};
35 76
36struct nvkm_oclass * 77static struct gf100_gr_pack *
37gk20a_gr_oclass = &(struct gf100_gr_oclass) { 78gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
38 .base.handle = NV_ENGINE(GR, 0xea), 79{
39 .base.ofuncs = &(struct nvkm_ofuncs) { 80 struct gf100_gr_init *init;
40 .ctor = gf100_gr_ctor, 81 struct gf100_gr_pack *pack;
41 .dtor = gf100_gr_dtor, 82 const int nent = (fuc->size / sizeof(struct gk20a_fw_aiv));
42 .init = gk104_gr_init, 83 int i;
43 .fini = _nvkm_gr_fini, 84
44 }, 85 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
45 .cclass = &gk20a_grctx_oclass, 86 if (!pack)
46 .sclass = gk20a_gr_sclass, 87 return ERR_PTR(-ENOMEM);
47 .mmio = gk104_gr_pack_mmio, 88
89 init = (void *)(pack + 2);
90
91 pack[0].init = init;
92
93 for (i = 0; i < nent; i++) {
94 struct gf100_gr_init *ent = &init[i];
95 struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc->data)[i];
96
97 ent->addr = av->addr;
98 ent->data = av->data;
99 ent->count = 1;
100 ent->pitch = 1;
101 }
102
103 return pack;
104}
105
106static struct gf100_gr_pack *
107gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
108{
109 struct gf100_gr_init *init;
110 struct gf100_gr_pack *pack;
111 /* We don't suppose we will initialize more than 16 classes here... */
112 static const unsigned int max_classes = 16;
113 const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
114 int i, classidx = 0;
115 u32 prevclass = 0;
116
117 pack = vzalloc((sizeof(*pack) * max_classes) +
118 (sizeof(*init) * (nent + 1)));
119 if (!pack)
120 return ERR_PTR(-ENOMEM);
121
122 init = (void *)(pack + max_classes);
123
124 for (i = 0; i < nent; i++) {
125 struct gf100_gr_init *ent = &init[i];
126 struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
127 u32 class = av->addr & 0xffff;
128 u32 addr = (av->addr & 0xffff0000) >> 14;
129
130 if (prevclass != class) {
131 pack[classidx].init = ent;
132 pack[classidx].type = class;
133 prevclass = class;
134 if (++classidx >= max_classes) {
135 vfree(pack);
136 return ERR_PTR(-ENOSPC);
137 }
138 }
139
140 ent->addr = addr;
141 ent->data = av->data;
142 ent->count = 1;
143 ent->pitch = 1;
144 }
145
146 return pack;
147}
148
149static int
150gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
151{
152 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
153 struct nvkm_device *device = subdev->device;
154
155 if (nvkm_msec(device, 2000,
156 if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
157 break;
158 ) < 0) {
159 nvkm_error(subdev, "FECS mem scrubbing timeout\n");
160 return -ETIMEDOUT;
161 }
162
163 if (nvkm_msec(device, 2000,
164 if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
165 break;
166 ) < 0) {
167 nvkm_error(subdev, "GPCCS mem scrubbing timeout\n");
168 return -ETIMEDOUT;
169 }
170
171 return 0;
172}
173
174static void
175gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
176{
177 struct nvkm_device *device = gr->base.engine.subdev.device;
178 nvkm_wr32(device, 0x419e44, 0x1ffffe);
179 nvkm_wr32(device, 0x419e4c, 0x7f);
180}
181
182int
183gk20a_gr_init(struct gf100_gr *gr)
184{
185 struct nvkm_device *device = gr->base.engine.subdev.device;
186 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
187 u32 data[TPC_MAX / 8] = {};
188 u8 tpcnr[GPC_MAX];
189 int gpc, tpc;
190 int ret, i;
191
192 /* Clear SCC RAM */
193 nvkm_wr32(device, 0x40802c, 0x1);
194
195 gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
196
197 ret = gk20a_gr_wait_mem_scrubbing(gr);
198 if (ret)
199 return ret;
200
201 ret = gf100_gr_wait_idle(gr);
202 if (ret)
203 return ret;
204
205 /* MMU debug buffer */
206 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
207 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
208
209 if (gr->func->init_gpc_mmu)
210 gr->func->init_gpc_mmu(gr);
211
212 /* Set the PE as stream master */
213 nvkm_mask(device, 0x503018, 0x1, 0x1);
214
215 /* Zcull init */
216 memset(data, 0x00, sizeof(data));
217 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
218 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
219 do {
220 gpc = (gpc + 1) % gr->gpc_nr;
221 } while (!tpcnr[gpc]);
222 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
223
224 data[i / 8] |= tpc << ((i % 8) * 4);
225 }
226
227 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
228 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
229 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
230 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
231
232 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
233 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
234 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
235 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
236 gr->tpc_total);
237 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
238 }
239
240 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
241
242 /* Enable FIFO access */
243 nvkm_wr32(device, 0x400500, 0x00010001);
244
245 /* Enable interrupts */
246 nvkm_wr32(device, 0x400100, 0xffffffff);
247 nvkm_wr32(device, 0x40013c, 0xffffffff);
248
249 /* Enable FECS error interrupts */
250 nvkm_wr32(device, 0x409c24, 0x000f0000);
251
252 /* Enable hardware warning exceptions */
253 nvkm_wr32(device, 0x404000, 0xc0000000);
254 nvkm_wr32(device, 0x404600, 0xc0000000);
255
256 if (gr->func->set_hww_esr_report_mask)
257 gr->func->set_hww_esr_report_mask(gr);
258
259 /* Enable TPC exceptions per GPC */
260 nvkm_wr32(device, 0x419d0c, 0x2);
261 nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16);
262
263 /* Reset and enable all exceptions */
264 nvkm_wr32(device, 0x400108, 0xffffffff);
265 nvkm_wr32(device, 0x400138, 0xffffffff);
266 nvkm_wr32(device, 0x400118, 0xffffffff);
267 nvkm_wr32(device, 0x400130, 0xffffffff);
268 nvkm_wr32(device, 0x40011c, 0xffffffff);
269 nvkm_wr32(device, 0x400134, 0xffffffff);
270
271 gf100_gr_zbc_init(gr);
272
273 return gf100_gr_init_ctxctl(gr);
274}
275
276void
277gk20a_gr_dtor(struct gf100_gr *gr)
278{
279 gk20a_gr_init_dtor(gr->fuc_method);
280 gk20a_gr_init_dtor(gr->fuc_bundle);
281 gk20a_gr_init_dtor(gr->fuc_sw_ctx);
282 gk20a_gr_init_dtor(gr->fuc_sw_nonctx);
283}
284
285int
286gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
287 int index, struct nvkm_gr **pgr)
288{
289 struct gf100_gr_fuc fuc;
290 struct gf100_gr *gr;
291 int ret;
292
293 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
294 return -ENOMEM;
295 *pgr = &gr->base;
296
297 ret = gf100_gr_ctor(func, device, index, gr);
298 if (ret)
299 return ret;
300
301 ret = gf100_gr_ctor_fw(gr, "sw_nonctx", &fuc);
302 if (ret)
303 return ret;
304 gr->fuc_sw_nonctx = gk20a_gr_av_to_init(&fuc);
305 gf100_gr_dtor_fw(&fuc);
306 if (IS_ERR(gr->fuc_sw_nonctx))
307 return PTR_ERR(gr->fuc_sw_nonctx);
308
309 ret = gf100_gr_ctor_fw(gr, "sw_ctx", &fuc);
310 if (ret)
311 return ret;
312 gr->fuc_sw_ctx = gk20a_gr_aiv_to_init(&fuc);
313 gf100_gr_dtor_fw(&fuc);
314 if (IS_ERR(gr->fuc_sw_ctx))
315 return PTR_ERR(gr->fuc_sw_ctx);
316
317 ret = gf100_gr_ctor_fw(gr, "sw_bundle_init", &fuc);
318 if (ret)
319 return ret;
320 gr->fuc_bundle = gk20a_gr_av_to_init(&fuc);
321 gf100_gr_dtor_fw(&fuc);
322 if (IS_ERR(gr->fuc_bundle))
323 return PTR_ERR(gr->fuc_bundle);
324
325 ret = gf100_gr_ctor_fw(gr, "sw_method_init", &fuc);
326 if (ret)
327 return ret;
328 gr->fuc_method = gk20a_gr_av_to_method(&fuc);
329 gf100_gr_dtor_fw(&fuc);
330 if (IS_ERR(gr->fuc_method))
331 return PTR_ERR(gr->fuc_method);
332
333 return 0;
334}
335
336static const struct gf100_gr_func
337gk20a_gr = {
338 .dtor = gk20a_gr_dtor,
339 .init = gk20a_gr_init,
340 .set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
48 .ppc_nr = 1, 341 .ppc_nr = 1,
49}.base; 342 .grctx = &gk20a_grctx,
343 .sclass = {
344 { -1, -1, FERMI_TWOD_A },
345 { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
346 { -1, -1, KEPLER_C, &gf100_fermi },
347 { -1, -1, KEPLER_COMPUTE_A },
348 {}
349 }
350};
351
352int
353gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
354{
355 return gk20a_gr_new_(&gk20a_gr, device, index, pgr);
356}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index a5ebd459bc24..56e960212e5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -30,19 +30,6 @@
30#include <nvif/class.h> 30#include <nvif/class.h>
31 31
32/******************************************************************************* 32/*******************************************************************************
33 * Graphics object classes
34 ******************************************************************************/
35
36static struct nvkm_oclass
37gm107_gr_sclass[] = {
38 { FERMI_TWOD_A, &nvkm_object_ofuncs },
39 { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
40 { MAXWELL_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
41 { MAXWELL_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
42 {}
43};
44
45/*******************************************************************************
46 * PGRAPH register lists 33 * PGRAPH register lists
47 ******************************************************************************/ 34 ******************************************************************************/
48 35
@@ -292,7 +279,7 @@ gm107_gr_pack_mmio[] = {
292 ******************************************************************************/ 279 ******************************************************************************/
293 280
294void 281void
295gm107_gr_init_bios(struct gf100_gr_priv *priv) 282gm107_gr_init_bios(struct gf100_gr *gr)
296{ 283{
297 static const struct { 284 static const struct {
298 u32 ctrl; 285 u32 ctrl;
@@ -304,7 +291,8 @@ gm107_gr_init_bios(struct gf100_gr_priv *priv)
304 { 0x419af0, 0x419af4 }, 291 { 0x419af0, 0x419af4 },
305 { 0x419af8, 0x419afc }, 292 { 0x419af8, 0x419afc },
306 }; 293 };
307 struct nvkm_bios *bios = nvkm_bios(priv); 294 struct nvkm_device *device = gr->base.engine.subdev.device;
295 struct nvkm_bios *bios = device->bios;
308 struct nvbios_P0260E infoE; 296 struct nvbios_P0260E infoE;
309 struct nvbios_P0260X infoX; 297 struct nvbios_P0260X infoX;
310 int E = -1, X; 298 int E = -1, X;
@@ -312,124 +300,119 @@ gm107_gr_init_bios(struct gf100_gr_priv *priv)
312 300
313 while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) { 301 while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) {
314 if (X = -1, E < ARRAY_SIZE(regs)) { 302 if (X = -1, E < ARRAY_SIZE(regs)) {
315 nv_wr32(priv, regs[E].ctrl, infoE.data); 303 nvkm_wr32(device, regs[E].ctrl, infoE.data);
316 while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX)) 304 while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX))
317 nv_wr32(priv, regs[E].data, infoX.data); 305 nvkm_wr32(device, regs[E].data, infoX.data);
318 } 306 }
319 } 307 }
320} 308}
321 309
322int 310int
323gm107_gr_init(struct nvkm_object *object) 311gm107_gr_init(struct gf100_gr *gr)
324{ 312{
325 struct gf100_gr_oclass *oclass = (void *)object->oclass; 313 struct nvkm_device *device = gr->base.engine.subdev.device;
326 struct gf100_gr_priv *priv = (void *)object; 314 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
327 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
328 u32 data[TPC_MAX / 8] = {}; 315 u32 data[TPC_MAX / 8] = {};
329 u8 tpcnr[GPC_MAX]; 316 u8 tpcnr[GPC_MAX];
330 int gpc, tpc, ppc, rop; 317 int gpc, tpc, ppc, rop;
331 int ret, i; 318 int i;
332
333 ret = nvkm_gr_init(&priv->base);
334 if (ret)
335 return ret;
336 319
337 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000); 320 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
338 nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000); 321 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
339 nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000); 322 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
340 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8); 323 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
341 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8); 324 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
342 325
343 gf100_gr_mmio(priv, oclass->mmio); 326 gf100_gr_mmio(gr, gr->func->mmio);
344 327
345 gm107_gr_init_bios(priv); 328 gm107_gr_init_bios(gr);
346 329
347 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001); 330 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
348 331
349 memset(data, 0x00, sizeof(data)); 332 memset(data, 0x00, sizeof(data));
350 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 333 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
351 for (i = 0, gpc = -1; i < priv->tpc_total; i++) { 334 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
352 do { 335 do {
353 gpc = (gpc + 1) % priv->gpc_nr; 336 gpc = (gpc + 1) % gr->gpc_nr;
354 } while (!tpcnr[gpc]); 337 } while (!tpcnr[gpc]);
355 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; 338 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
356 339
357 data[i / 8] |= tpc << ((i % 8) * 4); 340 data[i / 8] |= tpc << ((i % 8) * 4);
358 } 341 }
359 342
360 nv_wr32(priv, GPC_BCAST(0x0980), data[0]); 343 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
361 nv_wr32(priv, GPC_BCAST(0x0984), data[1]); 344 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
362 nv_wr32(priv, GPC_BCAST(0x0988), data[2]); 345 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
363 nv_wr32(priv, GPC_BCAST(0x098c), data[3]); 346 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
364 347
365 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 348 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
366 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), 349 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
367 priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]); 350 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
368 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | 351 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
369 priv->tpc_total); 352 gr->tpc_total);
370 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); 353 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
371 } 354 }
372 355
373 nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918); 356 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
374 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); 357 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
375 358
376 nv_wr32(priv, 0x400500, 0x00010001); 359 nvkm_wr32(device, 0x400500, 0x00010001);
377 360
378 nv_wr32(priv, 0x400100, 0xffffffff); 361 nvkm_wr32(device, 0x400100, 0xffffffff);
379 nv_wr32(priv, 0x40013c, 0xffffffff); 362 nvkm_wr32(device, 0x40013c, 0xffffffff);
380 nv_wr32(priv, 0x400124, 0x00000002); 363 nvkm_wr32(device, 0x400124, 0x00000002);
381 nv_wr32(priv, 0x409c24, 0x000e0000); 364 nvkm_wr32(device, 0x409c24, 0x000e0000);
382 365
383 nv_wr32(priv, 0x404000, 0xc0000000); 366 nvkm_wr32(device, 0x404000, 0xc0000000);
384 nv_wr32(priv, 0x404600, 0xc0000000); 367 nvkm_wr32(device, 0x404600, 0xc0000000);
385 nv_wr32(priv, 0x408030, 0xc0000000); 368 nvkm_wr32(device, 0x408030, 0xc0000000);
386 nv_wr32(priv, 0x404490, 0xc0000000); 369 nvkm_wr32(device, 0x404490, 0xc0000000);
387 nv_wr32(priv, 0x406018, 0xc0000000); 370 nvkm_wr32(device, 0x406018, 0xc0000000);
388 nv_wr32(priv, 0x407020, 0x40000000); 371 nvkm_wr32(device, 0x407020, 0x40000000);
389 nv_wr32(priv, 0x405840, 0xc0000000); 372 nvkm_wr32(device, 0x405840, 0xc0000000);
390 nv_wr32(priv, 0x405844, 0x00ffffff); 373 nvkm_wr32(device, 0x405844, 0x00ffffff);
391 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); 374 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
392 375
393 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 376 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
394 for (ppc = 0; ppc < 2 /* priv->ppc_nr[gpc] */; ppc++) 377 for (ppc = 0; ppc < 2 /* gr->ppc_nr[gpc] */; ppc++)
395 nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); 378 nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
396 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); 379 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
397 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); 380 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
398 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); 381 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
399 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); 382 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
400 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 383 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
401 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 384 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
402 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 385 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
403 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 386 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
404 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 387 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
405 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 388 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
406 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000); 389 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
407 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe); 390 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
408 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005); 391 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
409 } 392 }
410 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 393 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
411 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 394 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
412 } 395 }
413 396
414 for (rop = 0; rop < priv->rop_nr; rop++) { 397 for (rop = 0; rop < gr->rop_nr; rop++) {
415 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000); 398 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
416 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000); 399 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
417 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff); 400 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
418 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff); 401 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
419 } 402 }
420 403
421 nv_wr32(priv, 0x400108, 0xffffffff); 404 nvkm_wr32(device, 0x400108, 0xffffffff);
422 nv_wr32(priv, 0x400138, 0xffffffff); 405 nvkm_wr32(device, 0x400138, 0xffffffff);
423 nv_wr32(priv, 0x400118, 0xffffffff); 406 nvkm_wr32(device, 0x400118, 0xffffffff);
424 nv_wr32(priv, 0x400130, 0xffffffff); 407 nvkm_wr32(device, 0x400130, 0xffffffff);
425 nv_wr32(priv, 0x40011c, 0xffffffff); 408 nvkm_wr32(device, 0x40011c, 0xffffffff);
426 nv_wr32(priv, 0x400134, 0xffffffff); 409 nvkm_wr32(device, 0x400134, 0xffffffff);
427 410
428 nv_wr32(priv, 0x400054, 0x2c350f63); 411 nvkm_wr32(device, 0x400054, 0x2c350f63);
429 412
430 gf100_gr_zbc_init(priv); 413 gf100_gr_zbc_init(gr);
431 414
432 return gf100_gr_init_ctxctl(priv); 415 return gf100_gr_init_ctxctl(gr);
433} 416}
434 417
435#include "fuc/hubgm107.fuc5.h" 418#include "fuc/hubgm107.fuc5.h"
@@ -452,19 +435,25 @@ gm107_gr_gpccs_ucode = {
452 .data.size = sizeof(gm107_grgpc_data), 435 .data.size = sizeof(gm107_grgpc_data),
453}; 436};
454 437
455struct nvkm_oclass * 438static const struct gf100_gr_func
456gm107_gr_oclass = &(struct gf100_gr_oclass) { 439gm107_gr = {
457 .base.handle = NV_ENGINE(GR, 0x07), 440 .init = gm107_gr_init,
458 .base.ofuncs = &(struct nvkm_ofuncs) {
459 .ctor = gf100_gr_ctor,
460 .dtor = gf100_gr_dtor,
461 .init = gm107_gr_init,
462 .fini = _nvkm_gr_fini,
463 },
464 .cclass = &gm107_grctx_oclass,
465 .sclass = gm107_gr_sclass,
466 .mmio = gm107_gr_pack_mmio, 441 .mmio = gm107_gr_pack_mmio,
467 .fecs.ucode = &gm107_gr_fecs_ucode, 442 .fecs.ucode = &gm107_gr_fecs_ucode,
468 .gpccs.ucode = &gm107_gr_gpccs_ucode, 443 .gpccs.ucode = &gm107_gr_gpccs_ucode,
469 .ppc_nr = 2, 444 .ppc_nr = 2,
470}.base; 445 .grctx = &gm107_grctx,
446 .sclass = {
447 { -1, -1, FERMI_TWOD_A },
448 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
449 { -1, -1, MAXWELL_A, &gf100_fermi },
450 { -1, -1, MAXWELL_COMPUTE_A },
451 {}
452 }
453};
454
455int
456gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
457{
458 return gf100_gr_new_(&gm107_gr, device, index, pgr);
459}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
index fdb1dcf16a59..90381dde451a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
@@ -27,19 +27,6 @@
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29/******************************************************************************* 29/*******************************************************************************
30 * Graphics object classes
31 ******************************************************************************/
32
33struct nvkm_oclass
34gm204_gr_sclass[] = {
35 { FERMI_TWOD_A, &nvkm_object_ofuncs },
36 { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
37 { MAXWELL_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
38 { MAXWELL_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
39 {}
40};
41
42/*******************************************************************************
43 * PGRAPH register lists 30 * PGRAPH register lists
44 ******************************************************************************/ 31 ******************************************************************************/
45 32
@@ -243,144 +230,144 @@ gm204_gr_data[] = {
243 ******************************************************************************/ 230 ******************************************************************************/
244 231
245static int 232static int
246gm204_gr_init_ctxctl(struct gf100_gr_priv *priv) 233gm204_gr_init_ctxctl(struct gf100_gr *gr)
247{ 234{
248 return 0; 235 return 0;
249} 236}
250 237
251int 238int
252gm204_gr_init(struct nvkm_object *object) 239gm204_gr_init(struct gf100_gr *gr)
253{ 240{
254 struct gf100_gr_oclass *oclass = (void *)object->oclass; 241 struct nvkm_device *device = gr->base.engine.subdev.device;
255 struct gf100_gr_priv *priv = (void *)object; 242 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
256 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); 243 u32 data[TPC_MAX / 8] = {}, tmp;
257 u32 data[TPC_MAX / 8] = {};
258 u8 tpcnr[GPC_MAX]; 244 u8 tpcnr[GPC_MAX];
259 int gpc, tpc, ppc, rop; 245 int gpc, tpc, ppc, rop;
260 int ret, i; 246 int i;
261 u32 tmp;
262
263 ret = nvkm_gr_init(&priv->base);
264 if (ret)
265 return ret;
266 247
267 tmp = nv_rd32(priv, 0x100c80); /*XXX: mask? */ 248 tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
268 nv_wr32(priv, 0x418880, 0x00001000 | (tmp & 0x00000fff)); 249 nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
269 nv_wr32(priv, 0x418890, 0x00000000); 250 nvkm_wr32(device, 0x418890, 0x00000000);
270 nv_wr32(priv, 0x418894, 0x00000000); 251 nvkm_wr32(device, 0x418894, 0x00000000);
271 nv_wr32(priv, 0x4188b4, priv->unk4188b4->addr >> 8); 252 nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
272 nv_wr32(priv, 0x4188b8, priv->unk4188b8->addr >> 8); 253 nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
273 nv_mask(priv, 0x4188b0, 0x00040000, 0x00040000); 254 nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
274 255
275 /*XXX: belongs in fb */ 256 /*XXX: belongs in fb */
276 nv_wr32(priv, 0x100cc8, priv->unk4188b4->addr >> 8); 257 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
277 nv_wr32(priv, 0x100ccc, priv->unk4188b8->addr >> 8); 258 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
278 nv_mask(priv, 0x100cc4, 0x00040000, 0x00040000); 259 nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
279 260
280 gf100_gr_mmio(priv, oclass->mmio); 261 gf100_gr_mmio(gr, gr->func->mmio);
281 262
282 gm107_gr_init_bios(priv); 263 gm107_gr_init_bios(gr);
283 264
284 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001); 265 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
285 266
286 memset(data, 0x00, sizeof(data)); 267 memset(data, 0x00, sizeof(data));
287 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 268 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
288 for (i = 0, gpc = -1; i < priv->tpc_total; i++) { 269 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
289 do { 270 do {
290 gpc = (gpc + 1) % priv->gpc_nr; 271 gpc = (gpc + 1) % gr->gpc_nr;
291 } while (!tpcnr[gpc]); 272 } while (!tpcnr[gpc]);
292 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; 273 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
293 274
294 data[i / 8] |= tpc << ((i % 8) * 4); 275 data[i / 8] |= tpc << ((i % 8) * 4);
295 } 276 }
296 277
297 nv_wr32(priv, GPC_BCAST(0x0980), data[0]); 278 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
298 nv_wr32(priv, GPC_BCAST(0x0984), data[1]); 279 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
299 nv_wr32(priv, GPC_BCAST(0x0988), data[2]); 280 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
300 nv_wr32(priv, GPC_BCAST(0x098c), data[3]); 281 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
301 282
302 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 283 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
303 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), 284 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
304 priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]); 285 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
305 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | 286 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
306 priv->tpc_total); 287 gr->tpc_total);
307 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); 288 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
308 } 289 }
309 290
310 nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918); 291 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
311 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); 292 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
312 nv_wr32(priv, GPC_BCAST(0x033c), nv_rd32(priv, 0x100804)); 293 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
313 294
314 nv_wr32(priv, 0x400500, 0x00010001); 295 nvkm_wr32(device, 0x400500, 0x00010001);
315 nv_wr32(priv, 0x400100, 0xffffffff); 296 nvkm_wr32(device, 0x400100, 0xffffffff);
316 nv_wr32(priv, 0x40013c, 0xffffffff); 297 nvkm_wr32(device, 0x40013c, 0xffffffff);
317 nv_wr32(priv, 0x400124, 0x00000002); 298 nvkm_wr32(device, 0x400124, 0x00000002);
318 nv_wr32(priv, 0x409c24, 0x000e0000); 299 nvkm_wr32(device, 0x409c24, 0x000e0000);
319 nv_wr32(priv, 0x405848, 0xc0000000); 300 nvkm_wr32(device, 0x405848, 0xc0000000);
320 nv_wr32(priv, 0x40584c, 0x00000001); 301 nvkm_wr32(device, 0x40584c, 0x00000001);
321 nv_wr32(priv, 0x404000, 0xc0000000); 302 nvkm_wr32(device, 0x404000, 0xc0000000);
322 nv_wr32(priv, 0x404600, 0xc0000000); 303 nvkm_wr32(device, 0x404600, 0xc0000000);
323 nv_wr32(priv, 0x408030, 0xc0000000); 304 nvkm_wr32(device, 0x408030, 0xc0000000);
324 nv_wr32(priv, 0x404490, 0xc0000000); 305 nvkm_wr32(device, 0x404490, 0xc0000000);
325 nv_wr32(priv, 0x406018, 0xc0000000); 306 nvkm_wr32(device, 0x406018, 0xc0000000);
326 nv_wr32(priv, 0x407020, 0x40000000); 307 nvkm_wr32(device, 0x407020, 0x40000000);
327 nv_wr32(priv, 0x405840, 0xc0000000); 308 nvkm_wr32(device, 0x405840, 0xc0000000);
328 nv_wr32(priv, 0x405844, 0x00ffffff); 309 nvkm_wr32(device, 0x405844, 0x00ffffff);
329 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); 310 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
330 311
331 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 312 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
332 for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) 313 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
333 nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); 314 nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
334 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); 315 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
335 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); 316 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
336 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); 317 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
337 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); 318 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
338 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 319 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
339 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 320 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
340 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 321 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
341 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 322 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
342 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 323 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
343 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 324 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
344 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000); 325 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
345 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe); 326 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
346 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005); 327 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
347 } 328 }
348 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 329 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
349 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 330 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
350 } 331 }
351 332
352 for (rop = 0; rop < priv->rop_nr; rop++) { 333 for (rop = 0; rop < gr->rop_nr; rop++) {
353 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000); 334 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
354 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000); 335 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
355 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff); 336 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
356 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff); 337 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
357 } 338 }
358 339
359 nv_wr32(priv, 0x400108, 0xffffffff); 340 nvkm_wr32(device, 0x400108, 0xffffffff);
360 nv_wr32(priv, 0x400138, 0xffffffff); 341 nvkm_wr32(device, 0x400138, 0xffffffff);
361 nv_wr32(priv, 0x400118, 0xffffffff); 342 nvkm_wr32(device, 0x400118, 0xffffffff);
362 nv_wr32(priv, 0x400130, 0xffffffff); 343 nvkm_wr32(device, 0x400130, 0xffffffff);
363 nv_wr32(priv, 0x40011c, 0xffffffff); 344 nvkm_wr32(device, 0x40011c, 0xffffffff);
364 nv_wr32(priv, 0x400134, 0xffffffff); 345 nvkm_wr32(device, 0x400134, 0xffffffff);
365 346
366 nv_wr32(priv, 0x400054, 0x2c350f63); 347 nvkm_wr32(device, 0x400054, 0x2c350f63);
367 348
368 gf100_gr_zbc_init(priv); 349 gf100_gr_zbc_init(gr);
369 350
370 return gm204_gr_init_ctxctl(priv); 351 return gm204_gr_init_ctxctl(gr);
371} 352}
372 353
373struct nvkm_oclass * 354static const struct gf100_gr_func
374gm204_gr_oclass = &(struct gf100_gr_oclass) { 355gm204_gr = {
375 .base.handle = NV_ENGINE(GR, 0x24), 356 .init = gm204_gr_init,
376 .base.ofuncs = &(struct nvkm_ofuncs) {
377 .ctor = gf100_gr_ctor,
378 .dtor = gf100_gr_dtor,
379 .init = gm204_gr_init,
380 .fini = _nvkm_gr_fini,
381 },
382 .cclass = &gm204_grctx_oclass,
383 .sclass = gm204_gr_sclass,
384 .mmio = gm204_gr_pack_mmio, 357 .mmio = gm204_gr_pack_mmio,
385 .ppc_nr = 2, 358 .ppc_nr = 2,
386}.base; 359 .grctx = &gm204_grctx,
360 .sclass = {
361 { -1, -1, FERMI_TWOD_A },
362 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
363 { -1, -1, MAXWELL_B, &gf100_fermi },
364 { -1, -1, MAXWELL_COMPUTE_B },
365 {}
366 }
367};
368
369int
370gm204_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
371{
372 return gf100_gr_new_(&gm204_gr, device, index, pgr);
373}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
index 04b9733d146a..341dc560acbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
@@ -24,17 +24,25 @@
24#include "gf100.h" 24#include "gf100.h"
25#include "ctxgf100.h" 25#include "ctxgf100.h"
26 26
27struct nvkm_oclass * 27#include <nvif/class.h>
28gm206_gr_oclass = &(struct gf100_gr_oclass) { 28
29 .base.handle = NV_ENGINE(GR, 0x26), 29static const struct gf100_gr_func
30 .base.ofuncs = &(struct nvkm_ofuncs) { 30gm206_gr = {
31 .ctor = gf100_gr_ctor, 31 .init = gm204_gr_init,
32 .dtor = gf100_gr_dtor,
33 .init = gm204_gr_init,
34 .fini = _nvkm_gr_fini,
35 },
36 .cclass = &gm206_grctx_oclass,
37 .sclass = gm204_gr_sclass,
38 .mmio = gm204_gr_pack_mmio, 32 .mmio = gm204_gr_pack_mmio,
39 .ppc_nr = 2, 33 .ppc_nr = 2,
40}.base; 34 .grctx = &gm206_grctx,
35 .sclass = {
36 { -1, -1, FERMI_TWOD_A },
37 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
38 { -1, -1, MAXWELL_B, &gf100_fermi },
39 { -1, -1, MAXWELL_COMPUTE_B },
40 {}
41 }
42};
43
44int
45gm206_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
46{
47 return gf100_gr_new_(&gm206_gr, device, index, pgr);
48}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
new file mode 100644
index 000000000000..65b6e3d1e90d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "gf100.h"
23#include "ctxgf100.h"
24
25#include <subdev/timer.h>
26
27#include <nvif/class.h>
28
29static void
30gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
31{
32 struct nvkm_device *device = gr->base.engine.subdev.device;
33 u32 val;
34
35 /* TODO this needs to be removed once secure boot works */
36 if (1) {
37 nvkm_wr32(device, 0x100ce4, 0xffffffff);
38 }
39
40 /* TODO update once secure boot works */
41 val = nvkm_rd32(device, 0x100c80);
42 val &= 0xf000087f;
43 nvkm_wr32(device, 0x418880, val);
44 nvkm_wr32(device, 0x418890, 0);
45 nvkm_wr32(device, 0x418894, 0);
46
47 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
48 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
49 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
50
51 nvkm_wr32(device, 0x4188ac, nvkm_rd32(device, 0x100800));
52}
53
54static void
55gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
56{
57 struct nvkm_device *device = gr->base.engine.subdev.device;
58 nvkm_wr32(device, 0x419e44, 0xdffffe);
59 nvkm_wr32(device, 0x419e4c, 0x5);
60}
61
62static const struct gf100_gr_func
63gm20b_gr = {
64 .dtor = gk20a_gr_dtor,
65 .init = gk20a_gr_init,
66 .init_gpc_mmu = gm20b_gr_init_gpc_mmu,
67 .set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
68 .ppc_nr = 1,
69 .grctx = &gm20b_grctx,
70 .sclass = {
71 { -1, -1, FERMI_TWOD_A },
72 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
73 { -1, -1, MAXWELL_B, &gf100_fermi },
74 { -1, -1, MAXWELL_COMPUTE_B },
75 {}
76 }
77};
78
79int
80gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
81{
82 return gk20a_gr_new_(&gm20b_gr, device, index, pgr);
83}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
new file mode 100644
index 000000000000..2e68919f00b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25
26static const struct nvkm_gr_func
27gt200_gr = {
28 .init = nv50_gr_init,
29 .intr = nv50_gr_intr,
30 .chan_new = nv50_gr_chan_new,
31 .tlb_flush = g84_gr_tlb_flush,
32 .units = nv50_gr_units,
33 .sclass = {
34 { -1, -1, 0x0030, &nv50_gr_object },
35 { -1, -1, 0x502d, &nv50_gr_object },
36 { -1, -1, 0x5039, &nv50_gr_object },
37 { -1, -1, 0x50c0, &nv50_gr_object },
38 { -1, -1, 0x8397, &nv50_gr_object },
39 {}
40 }
41};
42
43int
44gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
45{
46 return nv50_gr_new_(&gt200_gr, device, index, pgr);
47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
new file mode 100644
index 000000000000..2bf7aac360cc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25
26static const struct nvkm_gr_func
27gt215_gr = {
28 .init = nv50_gr_init,
29 .intr = nv50_gr_intr,
30 .chan_new = nv50_gr_chan_new,
31 .tlb_flush = g84_gr_tlb_flush,
32 .units = nv50_gr_units,
33 .sclass = {
34 { -1, -1, 0x0030, &nv50_gr_object },
35 { -1, -1, 0x502d, &nv50_gr_object },
36 { -1, -1, 0x5039, &nv50_gr_object },
37 { -1, -1, 0x50c0, &nv50_gr_object },
38 { -1, -1, 0x8597, &nv50_gr_object },
39 { -1, -1, 0x85c0, &nv50_gr_object },
40 {}
41 }
42};
43
44int
45gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
46{
47 return nv50_gr_new_(&gt215_gr, device, index, pgr);
48}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
new file mode 100644
index 000000000000..95d5219faf93
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -0,0 +1,46 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25
26static const struct nvkm_gr_func
27mcp79_gr = {
28 .init = nv50_gr_init,
29 .intr = nv50_gr_intr,
30 .chan_new = nv50_gr_chan_new,
31 .units = nv50_gr_units,
32 .sclass = {
33 { -1, -1, 0x0030, &nv50_gr_object },
34 { -1, -1, 0x502d, &nv50_gr_object },
35 { -1, -1, 0x5039, &nv50_gr_object },
36 { -1, -1, 0x50c0, &nv50_gr_object },
37 { -1, -1, 0x8397, &nv50_gr_object },
38 {}
39 }
40};
41
42int
43mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
44{
45 return nv50_gr_new_(&mcp79_gr, device, index, pgr);
46}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
new file mode 100644
index 000000000000..027b58e5976b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25
26static const struct nvkm_gr_func
27mcp89_gr = {
28 .init = nv50_gr_init,
29 .intr = nv50_gr_intr,
30 .chan_new = nv50_gr_chan_new,
31 .tlb_flush = g84_gr_tlb_flush,
32 .units = nv50_gr_units,
33 .sclass = {
34 { -1, -1, 0x0030, &nv50_gr_object },
35 { -1, -1, 0x502d, &nv50_gr_object },
36 { -1, -1, 0x5039, &nv50_gr_object },
37 { -1, -1, 0x50c0, &nv50_gr_object },
38 { -1, -1, 0x85c0, &nv50_gr_object },
39 { -1, -1, 0x8697, &nv50_gr_object },
40 {}
41 }
42};
43
44int
45mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
46{
47 return nv50_gr_new_(&mcp89_gr, device, index, pgr);
48}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 2614510c28d0..426ba0025a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -21,13 +21,13 @@
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24#include <engine/gr.h> 24#include "priv.h"
25#include "regs.h" 25#include "regs.h"
26 26
27#include <core/client.h> 27#include <core/client.h>
28#include <core/device.h> 28#include <core/gpuobj.h>
29#include <core/handle.h>
30#include <engine/fifo.h> 29#include <engine/fifo.h>
30#include <engine/fifo/chan.h>
31#include <subdev/instmem.h> 31#include <subdev/instmem.h>
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33 33
@@ -346,25 +346,23 @@ nv04_gr_ctx_regs[] = {
346 NV04_PGRAPH_DEBUG_3 346 NV04_PGRAPH_DEBUG_3
347}; 347};
348 348
349struct nv04_gr_priv { 349#define nv04_gr(p) container_of((p), struct nv04_gr, base)
350
351struct nv04_gr {
350 struct nvkm_gr base; 352 struct nvkm_gr base;
351 struct nv04_gr_chan *chan[16]; 353 struct nv04_gr_chan *chan[16];
352 spinlock_t lock; 354 spinlock_t lock;
353}; 355};
354 356
357#define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
358
355struct nv04_gr_chan { 359struct nv04_gr_chan {
356 struct nvkm_object base; 360 struct nvkm_object object;
361 struct nv04_gr *gr;
357 int chid; 362 int chid;
358 u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)]; 363 u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
359}; 364};
360 365
361
362static inline struct nv04_gr_priv *
363nv04_gr_priv(struct nv04_gr_chan *chan)
364{
365 return (void *)nv_object(chan)->engine;
366}
367
368/******************************************************************************* 366/*******************************************************************************
369 * Graphics object classes 367 * Graphics object classes
370 ******************************************************************************/ 368 ******************************************************************************/
@@ -444,35 +442,34 @@ nv04_gr_priv(struct nv04_gr_chan *chan)
444 */ 442 */
445 443
446static void 444static void
447nv04_gr_set_ctx1(struct nvkm_object *object, u32 mask, u32 value) 445nv04_gr_set_ctx1(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
448{ 446{
449 struct nv04_gr_priv *priv = (void *)object->engine; 447 int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
450 int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
451 u32 tmp; 448 u32 tmp;
452 449
453 tmp = nv_ro32(object, 0x00); 450 tmp = nvkm_rd32(device, 0x700000 + inst);
454 tmp &= ~mask; 451 tmp &= ~mask;
455 tmp |= value; 452 tmp |= value;
456 nv_wo32(object, 0x00, tmp); 453 nvkm_wr32(device, 0x700000 + inst, tmp);
457 454
458 nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp); 455 nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp);
459 nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); 456 nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc << 2), tmp);
460} 457}
461 458
462static void 459static void
463nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value) 460nv04_gr_set_ctx_val(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
464{ 461{
465 int class, op, valid = 1; 462 int class, op, valid = 1;
466 u32 tmp, ctx1; 463 u32 tmp, ctx1;
467 464
468 ctx1 = nv_ro32(object, 0x00); 465 ctx1 = nvkm_rd32(device, 0x700000 + inst);
469 class = ctx1 & 0xff; 466 class = ctx1 & 0xff;
470 op = (ctx1 >> 15) & 7; 467 op = (ctx1 >> 15) & 7;
471 468
472 tmp = nv_ro32(object, 0x0c); 469 tmp = nvkm_rd32(device, 0x70000c + inst);
473 tmp &= ~mask; 470 tmp &= ~mask;
474 tmp |= value; 471 tmp |= value;
475 nv_wo32(object, 0x0c, tmp); 472 nvkm_wr32(device, 0x70000c + inst, tmp);
476 473
477 /* check for valid surf2d/surf_dst/surf_color */ 474 /* check for valid surf2d/surf_dst/surf_color */
478 if (!(tmp & 0x02000000)) 475 if (!(tmp & 0x02000000))
@@ -504,527 +501,567 @@ nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
504 break; 501 break;
505 } 502 }
506 503
507 nv04_gr_set_ctx1(object, 0x01000000, valid << 24); 504 nv04_gr_set_ctx1(device, inst, 0x01000000, valid << 24);
508} 505}
509 506
510static int 507static bool
511nv04_gr_mthd_set_operation(struct nvkm_object *object, u32 mthd, 508nv04_gr_mthd_set_operation(struct nvkm_device *device, u32 inst, u32 data)
512 void *args, u32 size)
513{ 509{
514 u32 class = nv_ro32(object, 0) & 0xff; 510 u8 class = nvkm_rd32(device, 0x700000) & 0x000000ff;
515 u32 data = *(u32 *)args;
516 if (data > 5) 511 if (data > 5)
517 return 1; 512 return false;
518 /* Old versions of the objects only accept first three operations. */ 513 /* Old versions of the objects only accept first three operations. */
519 if (data > 2 && class < 0x40) 514 if (data > 2 && class < 0x40)
520 return 1; 515 return false;
521 nv04_gr_set_ctx1(object, 0x00038000, data << 15); 516 nv04_gr_set_ctx1(device, inst, 0x00038000, data << 15);
522 /* changing operation changes set of objects needed for validation */ 517 /* changing operation changes set of objects needed for validation */
523 nv04_gr_set_ctx_val(object, 0, 0); 518 nv04_gr_set_ctx_val(device, inst, 0, 0);
524 return 0; 519 return true;
525} 520}
526 521
527static int 522static bool
528nv04_gr_mthd_surf3d_clip_h(struct nvkm_object *object, u32 mthd, 523nv04_gr_mthd_surf3d_clip_h(struct nvkm_device *device, u32 inst, u32 data)
529 void *args, u32 size)
530{ 524{
531 struct nv04_gr_priv *priv = (void *)object->engine;
532 u32 data = *(u32 *)args;
533 u32 min = data & 0xffff, max; 525 u32 min = data & 0xffff, max;
534 u32 w = data >> 16; 526 u32 w = data >> 16;
535 if (min & 0x8000) 527 if (min & 0x8000)
536 /* too large */ 528 /* too large */
537 return 1; 529 return false;
538 if (w & 0x8000) 530 if (w & 0x8000)
539 /* yes, it accepts negative for some reason. */ 531 /* yes, it accepts negative for some reason. */
540 w |= 0xffff0000; 532 w |= 0xffff0000;
541 max = min + w; 533 max = min + w;
542 max &= 0x3ffff; 534 max &= 0x3ffff;
543 nv_wr32(priv, 0x40053c, min); 535 nvkm_wr32(device, 0x40053c, min);
544 nv_wr32(priv, 0x400544, max); 536 nvkm_wr32(device, 0x400544, max);
545 return 0; 537 return true;
546} 538}
547 539
548static int 540static bool
549nv04_gr_mthd_surf3d_clip_v(struct nvkm_object *object, u32 mthd, 541nv04_gr_mthd_surf3d_clip_v(struct nvkm_device *device, u32 inst, u32 data)
550 void *args, u32 size)
551{ 542{
552 struct nv04_gr_priv *priv = (void *)object->engine;
553 u32 data = *(u32 *)args;
554 u32 min = data & 0xffff, max; 543 u32 min = data & 0xffff, max;
555 u32 w = data >> 16; 544 u32 w = data >> 16;
556 if (min & 0x8000) 545 if (min & 0x8000)
557 /* too large */ 546 /* too large */
558 return 1; 547 return false;
559 if (w & 0x8000) 548 if (w & 0x8000)
560 /* yes, it accepts negative for some reason. */ 549 /* yes, it accepts negative for some reason. */
561 w |= 0xffff0000; 550 w |= 0xffff0000;
562 max = min + w; 551 max = min + w;
563 max &= 0x3ffff; 552 max &= 0x3ffff;
564 nv_wr32(priv, 0x400540, min); 553 nvkm_wr32(device, 0x400540, min);
565 nv_wr32(priv, 0x400548, max); 554 nvkm_wr32(device, 0x400548, max);
566 return 0; 555 return true;
567} 556}
568 557
569static u16 558static u8
570nv04_gr_mthd_bind_class(struct nvkm_object *object, u32 *args, u32 size) 559nv04_gr_mthd_bind_class(struct nvkm_device *device, u32 inst)
571{ 560{
572 struct nvkm_instmem *imem = nvkm_instmem(object); 561 return nvkm_rd32(device, 0x700000 + (inst << 4));
573 u32 inst = *(u32 *)args << 4;
574 return nv_ro32(imem, inst);
575} 562}
576 563
577static int 564static bool
578nv04_gr_mthd_bind_surf2d(struct nvkm_object *object, u32 mthd, 565nv04_gr_mthd_bind_surf2d(struct nvkm_device *device, u32 inst, u32 data)
579 void *args, u32 size)
580{ 566{
581 switch (nv04_gr_mthd_bind_class(object, args, size)) { 567 switch (nv04_gr_mthd_bind_class(device, data)) {
582 case 0x30: 568 case 0x30:
583 nv04_gr_set_ctx1(object, 0x00004000, 0); 569 nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
584 nv04_gr_set_ctx_val(object, 0x02000000, 0); 570 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
585 return 0; 571 return true;
586 case 0x42: 572 case 0x42:
587 nv04_gr_set_ctx1(object, 0x00004000, 0); 573 nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
588 nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000); 574 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
589 return 0; 575 return true;
590 } 576 }
591 return 1; 577 return false;
592} 578}
593 579
594static int 580static bool
595nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_object *object, u32 mthd, 581nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device *device, u32 inst, u32 data)
596 void *args, u32 size)
597{ 582{
598 switch (nv04_gr_mthd_bind_class(object, args, size)) { 583 switch (nv04_gr_mthd_bind_class(device, data)) {
599 case 0x30: 584 case 0x30:
600 nv04_gr_set_ctx1(object, 0x00004000, 0); 585 nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
601 nv04_gr_set_ctx_val(object, 0x02000000, 0); 586 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
602 return 0; 587 return true;
603 case 0x42: 588 case 0x42:
604 nv04_gr_set_ctx1(object, 0x00004000, 0); 589 nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
605 nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000); 590 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
606 return 0; 591 return true;
607 case 0x52: 592 case 0x52:
608 nv04_gr_set_ctx1(object, 0x00004000, 0x00004000); 593 nv04_gr_set_ctx1(device, inst, 0x00004000, 0x00004000);
609 nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000); 594 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
610 return 0; 595 return true;
611 } 596 }
612 return 1; 597 return false;
613} 598}
614 599
615static int 600static bool
616nv01_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd, 601nv01_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
617 void *args, u32 size)
618{ 602{
619 switch (nv04_gr_mthd_bind_class(object, args, size)) { 603 switch (nv04_gr_mthd_bind_class(device, data)) {
620 case 0x30: 604 case 0x30:
621 nv04_gr_set_ctx_val(object, 0x08000000, 0); 605 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
622 return 0; 606 return true;
623 case 0x18: 607 case 0x18:
624 nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000); 608 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
625 return 0; 609 return true;
626 } 610 }
627 return 1; 611 return false;
628} 612}
629 613
630static int 614static bool
631nv04_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd, 615nv04_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
632 void *args, u32 size)
633{ 616{
634 switch (nv04_gr_mthd_bind_class(object, args, size)) { 617 switch (nv04_gr_mthd_bind_class(device, data)) {
635 case 0x30: 618 case 0x30:
636 nv04_gr_set_ctx_val(object, 0x08000000, 0); 619 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
637 return 0; 620 return true;
638 case 0x44: 621 case 0x44:
639 nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000); 622 nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
640 return 0; 623 return true;
641 } 624 }
642 return 1; 625 return false;
643} 626}
644 627
645static int 628static bool
646nv04_gr_mthd_bind_rop(struct nvkm_object *object, u32 mthd, 629nv04_gr_mthd_bind_rop(struct nvkm_device *device, u32 inst, u32 data)
647 void *args, u32 size)
648{ 630{
649 switch (nv04_gr_mthd_bind_class(object, args, size)) { 631 switch (nv04_gr_mthd_bind_class(device, data)) {
650 case 0x30: 632 case 0x30:
651 nv04_gr_set_ctx_val(object, 0x10000000, 0); 633 nv04_gr_set_ctx_val(device, inst, 0x10000000, 0);
652 return 0; 634 return true;
653 case 0x43: 635 case 0x43:
654 nv04_gr_set_ctx_val(object, 0x10000000, 0x10000000); 636 nv04_gr_set_ctx_val(device, inst, 0x10000000, 0x10000000);
655 return 0; 637 return true;
656 } 638 }
657 return 1; 639 return false;
658} 640}
659 641
660static int 642static bool
661nv04_gr_mthd_bind_beta1(struct nvkm_object *object, u32 mthd, 643nv04_gr_mthd_bind_beta1(struct nvkm_device *device, u32 inst, u32 data)
662 void *args, u32 size)
663{ 644{
664 switch (nv04_gr_mthd_bind_class(object, args, size)) { 645 switch (nv04_gr_mthd_bind_class(device, data)) {
665 case 0x30: 646 case 0x30:
666 nv04_gr_set_ctx_val(object, 0x20000000, 0); 647 nv04_gr_set_ctx_val(device, inst, 0x20000000, 0);
667 return 0; 648 return true;
668 case 0x12: 649 case 0x12:
669 nv04_gr_set_ctx_val(object, 0x20000000, 0x20000000); 650 nv04_gr_set_ctx_val(device, inst, 0x20000000, 0x20000000);
670 return 0; 651 return true;
671 } 652 }
672 return 1; 653 return false;
673} 654}
674 655
675static int 656static bool
676nv04_gr_mthd_bind_beta4(struct nvkm_object *object, u32 mthd, 657nv04_gr_mthd_bind_beta4(struct nvkm_device *device, u32 inst, u32 data)
677 void *args, u32 size)
678{ 658{
679 switch (nv04_gr_mthd_bind_class(object, args, size)) { 659 switch (nv04_gr_mthd_bind_class(device, data)) {
680 case 0x30: 660 case 0x30:
681 nv04_gr_set_ctx_val(object, 0x40000000, 0); 661 nv04_gr_set_ctx_val(device, inst, 0x40000000, 0);
682 return 0; 662 return true;
683 case 0x72: 663 case 0x72:
684 nv04_gr_set_ctx_val(object, 0x40000000, 0x40000000); 664 nv04_gr_set_ctx_val(device, inst, 0x40000000, 0x40000000);
685 return 0; 665 return true;
686 } 666 }
687 return 1; 667 return false;
688} 668}
689 669
690static int 670static bool
691nv04_gr_mthd_bind_surf_dst(struct nvkm_object *object, u32 mthd, 671nv04_gr_mthd_bind_surf_dst(struct nvkm_device *device, u32 inst, u32 data)
692 void *args, u32 size)
693{ 672{
694 switch (nv04_gr_mthd_bind_class(object, args, size)) { 673 switch (nv04_gr_mthd_bind_class(device, data)) {
695 case 0x30: 674 case 0x30:
696 nv04_gr_set_ctx_val(object, 0x02000000, 0); 675 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
697 return 0; 676 return true;
698 case 0x58: 677 case 0x58:
699 nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000); 678 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
700 return 0; 679 return true;
701 } 680 }
702 return 1; 681 return false;
703} 682}
704 683
705static int 684static bool
706nv04_gr_mthd_bind_surf_src(struct nvkm_object *object, u32 mthd, 685nv04_gr_mthd_bind_surf_src(struct nvkm_device *device, u32 inst, u32 data)
707 void *args, u32 size)
708{ 686{
709 switch (nv04_gr_mthd_bind_class(object, args, size)) { 687 switch (nv04_gr_mthd_bind_class(device, data)) {
710 case 0x30: 688 case 0x30:
711 nv04_gr_set_ctx_val(object, 0x04000000, 0); 689 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
712 return 0; 690 return true;
713 case 0x59: 691 case 0x59:
714 nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000); 692 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
715 return 0; 693 return true;
716 } 694 }
717 return 1; 695 return false;
718} 696}
719 697
720static int 698static bool
721nv04_gr_mthd_bind_surf_color(struct nvkm_object *object, u32 mthd, 699nv04_gr_mthd_bind_surf_color(struct nvkm_device *device, u32 inst, u32 data)
722 void *args, u32 size)
723{ 700{
724 switch (nv04_gr_mthd_bind_class(object, args, size)) { 701 switch (nv04_gr_mthd_bind_class(device, data)) {
725 case 0x30: 702 case 0x30:
726 nv04_gr_set_ctx_val(object, 0x02000000, 0); 703 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
727 return 0; 704 return true;
728 case 0x5a: 705 case 0x5a:
729 nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000); 706 nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
730 return 0; 707 return true;
731 } 708 }
732 return 1; 709 return false;
733} 710}
734 711
735static int 712static bool
736nv04_gr_mthd_bind_surf_zeta(struct nvkm_object *object, u32 mthd, 713nv04_gr_mthd_bind_surf_zeta(struct nvkm_device *device, u32 inst, u32 data)
737 void *args, u32 size)
738{ 714{
739 switch (nv04_gr_mthd_bind_class(object, args, size)) { 715 switch (nv04_gr_mthd_bind_class(device, data)) {
740 case 0x30: 716 case 0x30:
741 nv04_gr_set_ctx_val(object, 0x04000000, 0); 717 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
742 return 0; 718 return true;
743 case 0x5b: 719 case 0x5b:
744 nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000); 720 nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
745 return 0; 721 return true;
746 } 722 }
747 return 1; 723 return false;
748} 724}
749 725
750static int 726static bool
751nv01_gr_mthd_bind_clip(struct nvkm_object *object, u32 mthd, 727nv01_gr_mthd_bind_clip(struct nvkm_device *device, u32 inst, u32 data)
752 void *args, u32 size)
753{ 728{
754 switch (nv04_gr_mthd_bind_class(object, args, size)) { 729 switch (nv04_gr_mthd_bind_class(device, data)) {
755 case 0x30: 730 case 0x30:
756 nv04_gr_set_ctx1(object, 0x2000, 0); 731 nv04_gr_set_ctx1(device, inst, 0x2000, 0);
757 return 0; 732 return true;
758 case 0x19: 733 case 0x19:
759 nv04_gr_set_ctx1(object, 0x2000, 0x2000); 734 nv04_gr_set_ctx1(device, inst, 0x2000, 0x2000);
760 return 0; 735 return true;
761 } 736 }
762 return 1; 737 return false;
763} 738}
764 739
765static int 740static bool
766nv01_gr_mthd_bind_chroma(struct nvkm_object *object, u32 mthd, 741nv01_gr_mthd_bind_chroma(struct nvkm_device *device, u32 inst, u32 data)
767 void *args, u32 size)
768{ 742{
769 switch (nv04_gr_mthd_bind_class(object, args, size)) { 743 switch (nv04_gr_mthd_bind_class(device, data)) {
770 case 0x30: 744 case 0x30:
771 nv04_gr_set_ctx1(object, 0x1000, 0); 745 nv04_gr_set_ctx1(device, inst, 0x1000, 0);
772 return 0; 746 return true;
773 /* Yes, for some reason even the old versions of objects 747 /* Yes, for some reason even the old versions of objects
774 * accept 0x57 and not 0x17. Consistency be damned. 748 * accept 0x57 and not 0x17. Consistency be damned.
775 */ 749 */
776 case 0x57: 750 case 0x57:
777 nv04_gr_set_ctx1(object, 0x1000, 0x1000); 751 nv04_gr_set_ctx1(device, inst, 0x1000, 0x1000);
778 return 0; 752 return true;
779 } 753 }
780 return 1; 754 return false;
781} 755}
782 756
783static struct nvkm_omthds 757static bool
784nv03_gr_gdi_omthds[] = { 758nv03_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
785 { 0x0184, 0x0184, nv01_gr_mthd_bind_patt }, 759{
786 { 0x0188, 0x0188, nv04_gr_mthd_bind_rop }, 760 bool (*func)(struct nvkm_device *, u32, u32);
787 { 0x018c, 0x018c, nv04_gr_mthd_bind_beta1 }, 761 switch (mthd) {
788 { 0x0190, 0x0190, nv04_gr_mthd_bind_surf_dst }, 762 case 0x0184: func = nv01_gr_mthd_bind_patt; break;
789 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 763 case 0x0188: func = nv04_gr_mthd_bind_rop; break;
790 {} 764 case 0x018c: func = nv04_gr_mthd_bind_beta1; break;
791}; 765 case 0x0190: func = nv04_gr_mthd_bind_surf_dst; break;
792 766 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
793static struct nvkm_omthds 767 default:
794nv04_gr_gdi_omthds[] = { 768 return false;
795 { 0x0188, 0x0188, nv04_gr_mthd_bind_patt }, 769 }
796 { 0x018c, 0x018c, nv04_gr_mthd_bind_rop }, 770 return func(device, inst, data);
797 { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 }, 771}
798 { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
799 { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
800 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
801 {}
802};
803 772
804static struct nvkm_omthds 773static bool
805nv01_gr_blit_omthds[] = { 774nv04_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
806 { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma }, 775{
807 { 0x0188, 0x0188, nv01_gr_mthd_bind_clip }, 776 bool (*func)(struct nvkm_device *, u32, u32);
808 { 0x018c, 0x018c, nv01_gr_mthd_bind_patt }, 777 switch (mthd) {
809 { 0x0190, 0x0190, nv04_gr_mthd_bind_rop }, 778 case 0x0188: func = nv04_gr_mthd_bind_patt; break;
810 { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 }, 779 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
811 { 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst }, 780 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
812 { 0x019c, 0x019c, nv04_gr_mthd_bind_surf_src }, 781 case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
813 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 782 case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
814 {} 783 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
815}; 784 default:
785 return false;
786 }
787 return func(device, inst, data);
788}
816 789
817static struct nvkm_omthds 790static bool
818nv04_gr_blit_omthds[] = { 791nv01_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
819 { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma }, 792{
820 { 0x0188, 0x0188, nv01_gr_mthd_bind_clip }, 793 bool (*func)(struct nvkm_device *, u32, u32);
821 { 0x018c, 0x018c, nv04_gr_mthd_bind_patt }, 794 switch (mthd) {
822 { 0x0190, 0x0190, nv04_gr_mthd_bind_rop }, 795 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
823 { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 }, 796 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
824 { 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 }, 797 case 0x018c: func = nv01_gr_mthd_bind_patt; break;
825 { 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d }, 798 case 0x0190: func = nv04_gr_mthd_bind_rop; break;
826 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 799 case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
827 {} 800 case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
828}; 801 case 0x019c: func = nv04_gr_mthd_bind_surf_src; break;
802 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
803 default:
804 return false;
805 }
806 return func(device, inst, data);
807}
829 808
830static struct nvkm_omthds 809static bool
831nv04_gr_iifc_omthds[] = { 810nv04_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
832 { 0x0188, 0x0188, nv01_gr_mthd_bind_chroma }, 811{
833 { 0x018c, 0x018c, nv01_gr_mthd_bind_clip }, 812 bool (*func)(struct nvkm_device *, u32, u32);
834 { 0x0190, 0x0190, nv04_gr_mthd_bind_patt }, 813 switch (mthd) {
835 { 0x0194, 0x0194, nv04_gr_mthd_bind_rop }, 814 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
836 { 0x0198, 0x0198, nv04_gr_mthd_bind_beta1 }, 815 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
837 { 0x019c, 0x019c, nv04_gr_mthd_bind_beta4 }, 816 case 0x018c: func = nv04_gr_mthd_bind_patt; break;
838 { 0x01a0, 0x01a0, nv04_gr_mthd_bind_surf2d_swzsurf }, 817 case 0x0190: func = nv04_gr_mthd_bind_rop; break;
839 { 0x03e4, 0x03e4, nv04_gr_mthd_set_operation }, 818 case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
840 {} 819 case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
841}; 820 case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
821 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
822 default:
823 return false;
824 }
825 return func(device, inst, data);
826}
842 827
843static struct nvkm_omthds 828static bool
844nv01_gr_ifc_omthds[] = { 829nv04_gr_mthd_iifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
845 { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma }, 830{
846 { 0x0188, 0x0188, nv01_gr_mthd_bind_clip }, 831 bool (*func)(struct nvkm_device *, u32, u32);
847 { 0x018c, 0x018c, nv01_gr_mthd_bind_patt }, 832 switch (mthd) {
848 { 0x0190, 0x0190, nv04_gr_mthd_bind_rop }, 833 case 0x0188: func = nv01_gr_mthd_bind_chroma; break;
849 { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 }, 834 case 0x018c: func = nv01_gr_mthd_bind_clip; break;
850 { 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst }, 835 case 0x0190: func = nv04_gr_mthd_bind_patt; break;
851 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 836 case 0x0194: func = nv04_gr_mthd_bind_rop; break;
852 {} 837 case 0x0198: func = nv04_gr_mthd_bind_beta1; break;
853}; 838 case 0x019c: func = nv04_gr_mthd_bind_beta4; break;
839 case 0x01a0: func = nv04_gr_mthd_bind_surf2d_swzsurf; break;
840 case 0x03e4: func = nv04_gr_mthd_set_operation; break;
841 default:
842 return false;
843 }
844 return func(device, inst, data);
845}
854 846
855static struct nvkm_omthds 847static bool
856nv04_gr_ifc_omthds[] = { 848nv01_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
857 { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma }, 849{
858 { 0x0188, 0x0188, nv01_gr_mthd_bind_clip }, 850 bool (*func)(struct nvkm_device *, u32, u32);
859 { 0x018c, 0x018c, nv04_gr_mthd_bind_patt }, 851 switch (mthd) {
860 { 0x0190, 0x0190, nv04_gr_mthd_bind_rop }, 852 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
861 { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 }, 853 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
862 { 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 }, 854 case 0x018c: func = nv01_gr_mthd_bind_patt; break;
863 { 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d }, 855 case 0x0190: func = nv04_gr_mthd_bind_rop; break;
864 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 856 case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
865 {} 857 case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
866}; 858 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
859 default:
860 return false;
861 }
862 return func(device, inst, data);
863}
867 864
868static struct nvkm_omthds 865static bool
869nv03_gr_sifc_omthds[] = { 866nv04_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
870 { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma }, 867{
871 { 0x0188, 0x0188, nv01_gr_mthd_bind_patt }, 868 bool (*func)(struct nvkm_device *, u32, u32);
872 { 0x018c, 0x018c, nv04_gr_mthd_bind_rop }, 869 switch (mthd) {
873 { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 }, 870 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
874 { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst }, 871 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
875 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 872 case 0x018c: func = nv04_gr_mthd_bind_patt; break;
876 {} 873 case 0x0190: func = nv04_gr_mthd_bind_rop; break;
877}; 874 case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
875 case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
876 case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
877 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
878 default:
879 return false;
880 }
881 return func(device, inst, data);
882}
878 883
879static struct nvkm_omthds 884static bool
880nv04_gr_sifc_omthds[] = { 885nv03_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
881 { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma }, 886{
882 { 0x0188, 0x0188, nv04_gr_mthd_bind_patt }, 887 bool (*func)(struct nvkm_device *, u32, u32);
883 { 0x018c, 0x018c, nv04_gr_mthd_bind_rop }, 888 switch (mthd) {
884 { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 }, 889 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
885 { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 }, 890 case 0x0188: func = nv01_gr_mthd_bind_patt; break;
886 { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d }, 891 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
887 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 892 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
888 {} 893 case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
889}; 894 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
895 default:
896 return false;
897 }
898 return func(device, inst, data);
899}
890 900
891static struct nvkm_omthds 901static bool
892nv03_gr_sifm_omthds[] = { 902nv04_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
893 { 0x0188, 0x0188, nv01_gr_mthd_bind_patt }, 903{
894 { 0x018c, 0x018c, nv04_gr_mthd_bind_rop }, 904 bool (*func)(struct nvkm_device *, u32, u32);
895 { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 }, 905 switch (mthd) {
896 { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst }, 906 case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
897 { 0x0304, 0x0304, nv04_gr_mthd_set_operation }, 907 case 0x0188: func = nv04_gr_mthd_bind_patt; break;
898 {} 908 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
899}; 909 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
910 case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
911 case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
912 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
913 default:
914 return false;
915 }
916 return func(device, inst, data);
917}
900 918
901static struct nvkm_omthds 919static bool
902nv04_gr_sifm_omthds[] = { 920nv03_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
903 { 0x0188, 0x0188, nv04_gr_mthd_bind_patt }, 921{
904 { 0x018c, 0x018c, nv04_gr_mthd_bind_rop }, 922 bool (*func)(struct nvkm_device *, u32, u32);
905 { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 }, 923 switch (mthd) {
906 { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 }, 924 case 0x0188: func = nv01_gr_mthd_bind_patt; break;
907 { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d }, 925 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
908 { 0x0304, 0x0304, nv04_gr_mthd_set_operation }, 926 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
909 {} 927 case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
910}; 928 case 0x0304: func = nv04_gr_mthd_set_operation; break;
929 default:
930 return false;
931 }
932 return func(device, inst, data);
933}
911 934
912static struct nvkm_omthds 935static bool
913nv04_gr_surf3d_omthds[] = { 936nv04_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
914 { 0x02f8, 0x02f8, nv04_gr_mthd_surf3d_clip_h }, 937{
915 { 0x02fc, 0x02fc, nv04_gr_mthd_surf3d_clip_v }, 938 bool (*func)(struct nvkm_device *, u32, u32);
916 {} 939 switch (mthd) {
917}; 940 case 0x0188: func = nv04_gr_mthd_bind_patt; break;
941 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
942 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
943 case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
944 case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
945 case 0x0304: func = nv04_gr_mthd_set_operation; break;
946 default:
947 return false;
948 }
949 return func(device, inst, data);
950}
918 951
919static struct nvkm_omthds 952static bool
920nv03_gr_ttri_omthds[] = { 953nv04_gr_mthd_surf3d(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
921 { 0x0188, 0x0188, nv01_gr_mthd_bind_clip }, 954{
922 { 0x018c, 0x018c, nv04_gr_mthd_bind_surf_color }, 955 bool (*func)(struct nvkm_device *, u32, u32);
923 { 0x0190, 0x0190, nv04_gr_mthd_bind_surf_zeta }, 956 switch (mthd) {
924 {} 957 case 0x02f8: func = nv04_gr_mthd_surf3d_clip_h; break;
925}; 958 case 0x02fc: func = nv04_gr_mthd_surf3d_clip_v; break;
959 default:
960 return false;
961 }
962 return func(device, inst, data);
963}
926 964
927static struct nvkm_omthds 965static bool
928nv01_gr_prim_omthds[] = { 966nv03_gr_mthd_ttri(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
929 { 0x0184, 0x0184, nv01_gr_mthd_bind_clip }, 967{
930 { 0x0188, 0x0188, nv01_gr_mthd_bind_patt }, 968 bool (*func)(struct nvkm_device *, u32, u32);
931 { 0x018c, 0x018c, nv04_gr_mthd_bind_rop }, 969 switch (mthd) {
932 { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 }, 970 case 0x0188: func = nv01_gr_mthd_bind_clip; break;
933 { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst }, 971 case 0x018c: func = nv04_gr_mthd_bind_surf_color; break;
934 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 972 case 0x0190: func = nv04_gr_mthd_bind_surf_zeta; break;
935 {} 973 default:
936}; 974 return false;
975 }
976 return func(device, inst, data);
977}
937 978
938static struct nvkm_omthds 979static bool
939nv04_gr_prim_omthds[] = { 980nv01_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
940 { 0x0184, 0x0184, nv01_gr_mthd_bind_clip }, 981{
941 { 0x0188, 0x0188, nv04_gr_mthd_bind_patt }, 982 bool (*func)(struct nvkm_device *, u32, u32);
942 { 0x018c, 0x018c, nv04_gr_mthd_bind_rop }, 983 switch (mthd) {
943 { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 }, 984 case 0x0184: func = nv01_gr_mthd_bind_clip; break;
944 { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 }, 985 case 0x0188: func = nv01_gr_mthd_bind_patt; break;
945 { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d }, 986 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
946 { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation }, 987 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
947 {} 988 case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
948}; 989 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
990 default:
991 return false;
992 }
993 return func(device, inst, data);
994}
949 995
950static int 996static bool
951nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 997nv04_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
952 struct nvkm_oclass *oclass, void *data, u32 size,
953 struct nvkm_object **pobject)
954{ 998{
955 struct nvkm_gpuobj *obj; 999 bool (*func)(struct nvkm_device *, u32, u32);
956 int ret; 1000 switch (mthd) {
1001 case 0x0184: func = nv01_gr_mthd_bind_clip; break;
1002 case 0x0188: func = nv04_gr_mthd_bind_patt; break;
1003 case 0x018c: func = nv04_gr_mthd_bind_rop; break;
1004 case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
1005 case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
1006 case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
1007 case 0x02fc: func = nv04_gr_mthd_set_operation; break;
1008 default:
1009 return false;
1010 }
1011 return func(device, inst, data);
1012}
957 1013
958 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent, 1014static bool
959 16, 16, 0, &obj); 1015nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
960 *pobject = nv_object(obj); 1016{
961 if (ret) 1017 bool (*func)(struct nvkm_device *, u32, u32, u32);
962 return ret; 1018 switch (nvkm_rd32(device, 0x700000 + inst) & 0x000000ff) {
1019 case 0x1c ... 0x1e:
1020 func = nv01_gr_mthd_prim; break;
1021 case 0x1f: func = nv01_gr_mthd_blit; break;
1022 case 0x21: func = nv01_gr_mthd_ifc; break;
1023 case 0x36: func = nv03_gr_mthd_sifc; break;
1024 case 0x37: func = nv03_gr_mthd_sifm; break;
1025 case 0x48: func = nv03_gr_mthd_ttri; break;
1026 case 0x4a: func = nv04_gr_mthd_gdi; break;
1027 case 0x4b: func = nv03_gr_mthd_gdi; break;
1028 case 0x53: func = nv04_gr_mthd_surf3d; break;
1029 case 0x5c ... 0x5e:
1030 func = nv04_gr_mthd_prim; break;
1031 case 0x5f: func = nv04_gr_mthd_blit; break;
1032 case 0x60: func = nv04_gr_mthd_iifc; break;
1033 case 0x61: func = nv04_gr_mthd_ifc; break;
1034 case 0x76: func = nv04_gr_mthd_sifc; break;
1035 case 0x77: func = nv04_gr_mthd_sifm; break;
1036 default:
1037 return false;
1038 }
1039 return func(device, inst, mthd, data);
1040}
963 1041
964 nv_wo32(obj, 0x00, nv_mclass(obj)); 1042static int
1043nv04_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
1044 int align, struct nvkm_gpuobj **pgpuobj)
1045{
1046 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
1047 false, parent, pgpuobj);
1048 if (ret == 0) {
1049 nvkm_kmap(*pgpuobj);
1050 nvkm_wo32(*pgpuobj, 0x00, object->oclass);
1051 nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
1052 nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
965#ifdef __BIG_ENDIAN 1053#ifdef __BIG_ENDIAN
966 nv_mo32(obj, 0x00, 0x00080000, 0x00080000); 1054 nvkm_mo32(*pgpuobj, 0x08, 0x00080000, 0x00080000);
967#endif 1055#endif
968 nv_wo32(obj, 0x04, 0x00000000); 1056 nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
969 nv_wo32(obj, 0x08, 0x00000000); 1057 nvkm_done(*pgpuobj);
970 nv_wo32(obj, 0x0c, 0x00000000); 1058 }
971 return 0; 1059 return ret;
972} 1060}
973 1061
974struct nvkm_ofuncs 1062const struct nvkm_object_func
975nv04_gr_ofuncs = { 1063nv04_gr_object = {
976 .ctor = nv04_gr_object_ctor, 1064 .bind = nv04_gr_object_bind,
977 .dtor = _nvkm_gpuobj_dtor,
978 .init = _nvkm_gpuobj_init,
979 .fini = _nvkm_gpuobj_fini,
980 .rd32 = _nvkm_gpuobj_rd32,
981 .wr32 = _nvkm_gpuobj_wr32,
982};
983
984static struct nvkm_oclass
985nv04_gr_sclass[] = {
986 { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
987 { 0x0017, &nv04_gr_ofuncs }, /* chroma */
988 { 0x0018, &nv04_gr_ofuncs }, /* pattern (nv01) */
989 { 0x0019, &nv04_gr_ofuncs }, /* clip */
990 { 0x001c, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* line */
991 { 0x001d, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* tri */
992 { 0x001e, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* rect */
993 { 0x001f, &nv04_gr_ofuncs, nv01_gr_blit_omthds },
994 { 0x0021, &nv04_gr_ofuncs, nv01_gr_ifc_omthds },
995 { 0x0030, &nv04_gr_ofuncs }, /* null */
996 { 0x0036, &nv04_gr_ofuncs, nv03_gr_sifc_omthds },
997 { 0x0037, &nv04_gr_ofuncs, nv03_gr_sifm_omthds },
998 { 0x0038, &nv04_gr_ofuncs }, /* dvd subpicture */
999 { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
1000 { 0x0042, &nv04_gr_ofuncs }, /* surf2d */
1001 { 0x0043, &nv04_gr_ofuncs }, /* rop */
1002 { 0x0044, &nv04_gr_ofuncs }, /* pattern */
1003 { 0x0048, &nv04_gr_ofuncs, nv03_gr_ttri_omthds },
1004 { 0x004a, &nv04_gr_ofuncs, nv04_gr_gdi_omthds },
1005 { 0x004b, &nv04_gr_ofuncs, nv03_gr_gdi_omthds },
1006 { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
1007 { 0x0053, &nv04_gr_ofuncs, nv04_gr_surf3d_omthds },
1008 { 0x0054, &nv04_gr_ofuncs }, /* ttri */
1009 { 0x0055, &nv04_gr_ofuncs }, /* mtri */
1010 { 0x0057, &nv04_gr_ofuncs }, /* chroma */
1011 { 0x0058, &nv04_gr_ofuncs }, /* surf_dst */
1012 { 0x0059, &nv04_gr_ofuncs }, /* surf_src */
1013 { 0x005a, &nv04_gr_ofuncs }, /* surf_color */
1014 { 0x005b, &nv04_gr_ofuncs }, /* surf_zeta */
1015 { 0x005c, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* line */
1016 { 0x005d, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* tri */
1017 { 0x005e, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* rect */
1018 { 0x005f, &nv04_gr_ofuncs, nv04_gr_blit_omthds },
1019 { 0x0060, &nv04_gr_ofuncs, nv04_gr_iifc_omthds },
1020 { 0x0061, &nv04_gr_ofuncs, nv04_gr_ifc_omthds },
1021 { 0x0064, &nv04_gr_ofuncs }, /* iifc (nv05) */
1022 { 0x0065, &nv04_gr_ofuncs }, /* ifc (nv05) */
1023 { 0x0066, &nv04_gr_ofuncs }, /* sifc (nv05) */
1024 { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
1025 { 0x0076, &nv04_gr_ofuncs, nv04_gr_sifc_omthds },
1026 { 0x0077, &nv04_gr_ofuncs, nv04_gr_sifm_omthds },
1027 {},
1028}; 1065};
1029 1066
1030/******************************************************************************* 1067/*******************************************************************************
@@ -1032,13 +1069,14 @@ nv04_gr_sclass[] = {
1032 ******************************************************************************/ 1069 ******************************************************************************/
1033 1070
1034static struct nv04_gr_chan * 1071static struct nv04_gr_chan *
1035nv04_gr_channel(struct nv04_gr_priv *priv) 1072nv04_gr_channel(struct nv04_gr *gr)
1036{ 1073{
1074 struct nvkm_device *device = gr->base.engine.subdev.device;
1037 struct nv04_gr_chan *chan = NULL; 1075 struct nv04_gr_chan *chan = NULL;
1038 if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) { 1076 if (nvkm_rd32(device, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
1039 int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24; 1077 int chid = nvkm_rd32(device, NV04_PGRAPH_CTX_USER) >> 24;
1040 if (chid < ARRAY_SIZE(priv->chan)) 1078 if (chid < ARRAY_SIZE(gr->chan))
1041 chan = priv->chan[chid]; 1079 chan = gr->chan[chid];
1042 } 1080 }
1043 return chan; 1081 return chan;
1044} 1082}
@@ -1046,55 +1084,52 @@ nv04_gr_channel(struct nv04_gr_priv *priv)
1046static int 1084static int
1047nv04_gr_load_context(struct nv04_gr_chan *chan, int chid) 1085nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
1048{ 1086{
1049 struct nv04_gr_priv *priv = nv04_gr_priv(chan); 1087 struct nvkm_device *device = chan->gr->base.engine.subdev.device;
1050 int i; 1088 int i;
1051 1089
1052 for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++) 1090 for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
1053 nv_wr32(priv, nv04_gr_ctx_regs[i], chan->nv04[i]); 1091 nvkm_wr32(device, nv04_gr_ctx_regs[i], chan->nv04[i]);
1054 1092
1055 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100); 1093 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
1056 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24); 1094 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
1057 nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000); 1095 nvkm_mask(device, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
1058 return 0; 1096 return 0;
1059} 1097}
1060 1098
1061static int 1099static int
1062nv04_gr_unload_context(struct nv04_gr_chan *chan) 1100nv04_gr_unload_context(struct nv04_gr_chan *chan)
1063{ 1101{
1064 struct nv04_gr_priv *priv = nv04_gr_priv(chan); 1102 struct nvkm_device *device = chan->gr->base.engine.subdev.device;
1065 int i; 1103 int i;
1066 1104
1067 for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++) 1105 for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
1068 chan->nv04[i] = nv_rd32(priv, nv04_gr_ctx_regs[i]); 1106 chan->nv04[i] = nvkm_rd32(device, nv04_gr_ctx_regs[i]);
1069 1107
1070 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000); 1108 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
1071 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000); 1109 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1072 return 0; 1110 return 0;
1073} 1111}
1074 1112
1075static void 1113static void
1076nv04_gr_context_switch(struct nv04_gr_priv *priv) 1114nv04_gr_context_switch(struct nv04_gr *gr)
1077{ 1115{
1116 struct nvkm_device *device = gr->base.engine.subdev.device;
1078 struct nv04_gr_chan *prev = NULL; 1117 struct nv04_gr_chan *prev = NULL;
1079 struct nv04_gr_chan *next = NULL; 1118 struct nv04_gr_chan *next = NULL;
1080 unsigned long flags;
1081 int chid; 1119 int chid;
1082 1120
1083 spin_lock_irqsave(&priv->lock, flags); 1121 nv04_gr_idle(&gr->base);
1084 nv04_gr_idle(priv);
1085 1122
1086 /* If previous context is valid, we need to save it */ 1123 /* If previous context is valid, we need to save it */
1087 prev = nv04_gr_channel(priv); 1124 prev = nv04_gr_channel(gr);
1088 if (prev) 1125 if (prev)
1089 nv04_gr_unload_context(prev); 1126 nv04_gr_unload_context(prev);
1090 1127
1091 /* load context for next channel */ 1128 /* load context for next channel */
1092 chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f; 1129 chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
1093 next = priv->chan[chid]; 1130 next = gr->chan[chid];
1094 if (next) 1131 if (next)
1095 nv04_gr_load_context(next, chid); 1132 nv04_gr_load_context(next, chid);
1096
1097 spin_unlock_irqrestore(&priv->lock, flags);
1098} 1133}
1099 1134
1100static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg) 1135static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
@@ -1109,98 +1144,85 @@ static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
1109 return NULL; 1144 return NULL;
1110} 1145}
1111 1146
1112static int 1147static void *
1113nv04_gr_context_ctor(struct nvkm_object *parent, 1148nv04_gr_chan_dtor(struct nvkm_object *object)
1114 struct nvkm_object *engine,
1115 struct nvkm_oclass *oclass, void *data, u32 size,
1116 struct nvkm_object **pobject)
1117{ 1149{
1118 struct nvkm_fifo_chan *fifo = (void *)parent; 1150 struct nv04_gr_chan *chan = nv04_gr_chan(object);
1119 struct nv04_gr_priv *priv = (void *)engine; 1151 struct nv04_gr *gr = chan->gr;
1120 struct nv04_gr_chan *chan;
1121 unsigned long flags; 1152 unsigned long flags;
1122 int ret;
1123
1124 ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
1125 *pobject = nv_object(chan);
1126 if (ret)
1127 return ret;
1128
1129 spin_lock_irqsave(&priv->lock, flags);
1130 if (priv->chan[fifo->chid]) {
1131 *pobject = nv_object(priv->chan[fifo->chid]);
1132 atomic_inc(&(*pobject)->refcount);
1133 spin_unlock_irqrestore(&priv->lock, flags);
1134 nvkm_object_destroy(&chan->base);
1135 return 1;
1136 }
1137 1153
1138 *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31; 1154 spin_lock_irqsave(&gr->lock, flags);
1139 1155 gr->chan[chan->chid] = NULL;
1140 priv->chan[fifo->chid] = chan; 1156 spin_unlock_irqrestore(&gr->lock, flags);
1141 chan->chid = fifo->chid; 1157 return chan;
1142 spin_unlock_irqrestore(&priv->lock, flags);
1143 return 0;
1144} 1158}
1145 1159
1146static void 1160static int
1147nv04_gr_context_dtor(struct nvkm_object *object) 1161nv04_gr_chan_fini(struct nvkm_object *object, bool suspend)
1148{ 1162{
1149 struct nv04_gr_priv *priv = (void *)object->engine; 1163 struct nv04_gr_chan *chan = nv04_gr_chan(object);
1150 struct nv04_gr_chan *chan = (void *)object; 1164 struct nv04_gr *gr = chan->gr;
1165 struct nvkm_device *device = gr->base.engine.subdev.device;
1151 unsigned long flags; 1166 unsigned long flags;
1152 1167
1153 spin_lock_irqsave(&priv->lock, flags); 1168 spin_lock_irqsave(&gr->lock, flags);
1154 priv->chan[chan->chid] = NULL; 1169 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1155 spin_unlock_irqrestore(&priv->lock, flags); 1170 if (nv04_gr_channel(gr) == chan)
1156 1171 nv04_gr_unload_context(chan);
1157 nvkm_object_destroy(&chan->base); 1172 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1173 spin_unlock_irqrestore(&gr->lock, flags);
1174 return 0;
1158} 1175}
1159 1176
1177static const struct nvkm_object_func
1178nv04_gr_chan = {
1179 .dtor = nv04_gr_chan_dtor,
1180 .fini = nv04_gr_chan_fini,
1181};
1182
1160static int 1183static int
1161nv04_gr_context_fini(struct nvkm_object *object, bool suspend) 1184nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
1185 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
1162{ 1186{
1163 struct nv04_gr_priv *priv = (void *)object->engine; 1187 struct nv04_gr *gr = nv04_gr(base);
1164 struct nv04_gr_chan *chan = (void *)object; 1188 struct nv04_gr_chan *chan;
1165 unsigned long flags; 1189 unsigned long flags;
1166 1190
1167 spin_lock_irqsave(&priv->lock, flags); 1191 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
1168 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); 1192 return -ENOMEM;
1169 if (nv04_gr_channel(priv) == chan) 1193 nvkm_object_ctor(&nv04_gr_chan, oclass, &chan->object);
1170 nv04_gr_unload_context(chan); 1194 chan->gr = gr;
1171 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); 1195 chan->chid = fifoch->chid;
1172 spin_unlock_irqrestore(&priv->lock, flags); 1196 *pobject = &chan->object;
1173 1197
1174 return nvkm_object_fini(&chan->base, suspend); 1198 *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
1175}
1176 1199
1177static struct nvkm_oclass 1200 spin_lock_irqsave(&gr->lock, flags);
1178nv04_gr_cclass = { 1201 gr->chan[chan->chid] = chan;
1179 .handle = NV_ENGCTX(GR, 0x04), 1202 spin_unlock_irqrestore(&gr->lock, flags);
1180 .ofuncs = &(struct nvkm_ofuncs) { 1203 return 0;
1181 .ctor = nv04_gr_context_ctor, 1204}
1182 .dtor = nv04_gr_context_dtor,
1183 .init = nvkm_object_init,
1184 .fini = nv04_gr_context_fini,
1185 },
1186};
1187 1205
1188/******************************************************************************* 1206/*******************************************************************************
1189 * PGRAPH engine/subdev functions 1207 * PGRAPH engine/subdev functions
1190 ******************************************************************************/ 1208 ******************************************************************************/
1191 1209
1192bool 1210bool
1193nv04_gr_idle(void *obj) 1211nv04_gr_idle(struct nvkm_gr *gr)
1194{ 1212{
1195 struct nvkm_gr *gr = nvkm_gr(obj); 1213 struct nvkm_subdev *subdev = &gr->engine.subdev;
1214 struct nvkm_device *device = subdev->device;
1196 u32 mask = 0xffffffff; 1215 u32 mask = 0xffffffff;
1197 1216
1198 if (nv_device(obj)->card_type == NV_40) 1217 if (device->card_type == NV_40)
1199 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL; 1218 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1200 1219
1201 if (!nv_wait(gr, NV04_PGRAPH_STATUS, mask, 0)) { 1220 if (nvkm_msec(device, 2000,
1202 nv_error(gr, "idle timed out with status 0x%08x\n", 1221 if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask))
1203 nv_rd32(gr, NV04_PGRAPH_STATUS)); 1222 break;
1223 ) < 0) {
1224 nvkm_error(subdev, "idle timed out with status %08x\n",
1225 nvkm_rd32(device, NV04_PGRAPH_STATUS));
1204 return false; 1226 return false;
1205 } 1227 }
1206 1228
@@ -1247,136 +1269,159 @@ nv04_gr_nsource[] = {
1247}; 1269};
1248 1270
1249static void 1271static void
1250nv04_gr_intr(struct nvkm_subdev *subdev) 1272nv04_gr_intr(struct nvkm_gr *base)
1251{ 1273{
1252 struct nv04_gr_priv *priv = (void *)subdev; 1274 struct nv04_gr *gr = nv04_gr(base);
1253 struct nv04_gr_chan *chan = NULL; 1275 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1254 struct nvkm_namedb *namedb = NULL; 1276 struct nvkm_device *device = subdev->device;
1255 struct nvkm_handle *handle = NULL; 1277 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
1256 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); 1278 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
1257 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); 1279 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
1258 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); 1280 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
1259 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
1260 u32 chid = (addr & 0x0f000000) >> 24; 1281 u32 chid = (addr & 0x0f000000) >> 24;
1261 u32 subc = (addr & 0x0000e000) >> 13; 1282 u32 subc = (addr & 0x0000e000) >> 13;
1262 u32 mthd = (addr & 0x00001ffc); 1283 u32 mthd = (addr & 0x00001ffc);
1263 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); 1284 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
1264 u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff; 1285 u32 class = nvkm_rd32(device, 0x400180 + subc * 4) & 0xff;
1265 u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4; 1286 u32 inst = (nvkm_rd32(device, 0x40016c) & 0xffff) << 4;
1266 u32 show = stat; 1287 u32 show = stat;
1288 char msg[128], src[128], sta[128];
1289 struct nv04_gr_chan *chan;
1267 unsigned long flags; 1290 unsigned long flags;
1268 1291
1269 spin_lock_irqsave(&priv->lock, flags); 1292 spin_lock_irqsave(&gr->lock, flags);
1270 chan = priv->chan[chid]; 1293 chan = gr->chan[chid];
1271 if (chan)
1272 namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1273 spin_unlock_irqrestore(&priv->lock, flags);
1274 1294
1275 if (stat & NV_PGRAPH_INTR_NOTIFY) { 1295 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1276 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) { 1296 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1277 handle = nvkm_namedb_get_vinst(namedb, inst); 1297 if (!nv04_gr_mthd(device, inst, mthd, data))
1278 if (handle && !nv_call(handle->object, mthd, data))
1279 show &= ~NV_PGRAPH_INTR_NOTIFY; 1298 show &= ~NV_PGRAPH_INTR_NOTIFY;
1280 } 1299 }
1281 } 1300 }
1282 1301
1283 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { 1302 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1284 nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 1303 nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1285 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1304 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1286 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1305 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1287 nv04_gr_context_switch(priv); 1306 nv04_gr_context_switch(gr);
1288 } 1307 }
1289 1308
1290 nv_wr32(priv, NV03_PGRAPH_INTR, stat); 1309 nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
1291 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 1310 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
1292 1311
1293 if (show) { 1312 if (show) {
1294 nv_error(priv, "%s", ""); 1313 nvkm_snprintbf(msg, sizeof(msg), nv04_gr_intr_name, show);
1295 nvkm_bitfield_print(nv04_gr_intr_name, show); 1314 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
1296 pr_cont(" nsource:"); 1315 nvkm_snprintbf(sta, sizeof(sta), nv04_gr_nstatus, nstatus);
1297 nvkm_bitfield_print(nv04_gr_nsource, nsource); 1316 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
1298 pr_cont(" nstatus:"); 1317 "nstatus %08x [%s] ch %d [%s] subc %d "
1299 nvkm_bitfield_print(nv04_gr_nstatus, nstatus); 1318 "class %04x mthd %04x data %08x\n",
1300 pr_cont("\n"); 1319 show, msg, nsource, src, nstatus, sta, chid,
1301 nv_error(priv, 1320 chan ? chan->object.client->name : "unknown",
1302 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 1321 subc, class, mthd, data);
1303 chid, nvkm_client_name(chan), subc, class, mthd,
1304 data);
1305 } 1322 }
1306 1323
1307 nvkm_namedb_put(handle); 1324 spin_unlock_irqrestore(&gr->lock, flags);
1308} 1325}
1309 1326
1310static int 1327static int
1311nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1328nv04_gr_init(struct nvkm_gr *base)
1312 struct nvkm_oclass *oclass, void *data, u32 size,
1313 struct nvkm_object **pobject)
1314{ 1329{
1315 struct nv04_gr_priv *priv; 1330 struct nv04_gr *gr = nv04_gr(base);
1316 int ret; 1331 struct nvkm_device *device = gr->base.engine.subdev.device;
1317
1318 ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
1319 *pobject = nv_object(priv);
1320 if (ret)
1321 return ret;
1322
1323 nv_subdev(priv)->unit = 0x00001000;
1324 nv_subdev(priv)->intr = nv04_gr_intr;
1325 nv_engine(priv)->cclass = &nv04_gr_cclass;
1326 nv_engine(priv)->sclass = nv04_gr_sclass;
1327 spin_lock_init(&priv->lock);
1328 return 0;
1329}
1330
1331static int
1332nv04_gr_init(struct nvkm_object *object)
1333{
1334 struct nvkm_engine *engine = nv_engine(object);
1335 struct nv04_gr_priv *priv = (void *)engine;
1336 int ret;
1337
1338 ret = nvkm_gr_init(&priv->base);
1339 if (ret)
1340 return ret;
1341 1332
1342 /* Enable PGRAPH interrupts */ 1333 /* Enable PGRAPH interrupts */
1343 nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF); 1334 nvkm_wr32(device, NV03_PGRAPH_INTR, 0xFFFFFFFF);
1344 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 1335 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1345 1336
1346 nv_wr32(priv, NV04_PGRAPH_VALID1, 0); 1337 nvkm_wr32(device, NV04_PGRAPH_VALID1, 0);
1347 nv_wr32(priv, NV04_PGRAPH_VALID2, 0); 1338 nvkm_wr32(device, NV04_PGRAPH_VALID2, 0);
1348 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF); 1339 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF);
1349 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ 1340 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
1350 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000); 1341 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x1231c000);
1351 /*1231C000 blob, 001 haiku*/ 1342 /*1231C000 blob, 001 haiku*/
1352 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ 1343 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
1353 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100); 1344 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x72111100);
1354 /*0x72111100 blob , 01 haiku*/ 1345 /*0x72111100 blob , 01 haiku*/
1355 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ 1346 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
1356 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071); 1347 nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
1357 /*haiku same*/ 1348 /*haiku same*/
1358 1349
1359 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ 1350 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
1360 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31); 1351 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
1361 /*haiku and blob 10d4*/ 1352 /*haiku and blob 10d4*/
1362 1353
1363 nv_wr32(priv, NV04_PGRAPH_STATE , 0xFFFFFFFF); 1354 nvkm_wr32(device, NV04_PGRAPH_STATE , 0xFFFFFFFF);
1364 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL , 0x10000100); 1355 nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
1365 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000); 1356 nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1366 1357
1367 /* These don't belong here, they're part of a per-channel context */ 1358 /* These don't belong here, they're part of a per-channel context */
1368 nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); 1359 nvkm_wr32(device, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
1369 nv_wr32(priv, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); 1360 nvkm_wr32(device, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
1370 return 0; 1361 return 0;
1371} 1362}
1372 1363
1373struct nvkm_oclass 1364static const struct nvkm_gr_func
1374nv04_gr_oclass = { 1365nv04_gr = {
1375 .handle = NV_ENGINE(GR, 0x04), 1366 .init = nv04_gr_init,
1376 .ofuncs = &(struct nvkm_ofuncs) { 1367 .intr = nv04_gr_intr,
1377 .ctor = nv04_gr_ctor, 1368 .chan_new = nv04_gr_chan_new,
1378 .dtor = _nvkm_gr_dtor, 1369 .sclass = {
1379 .init = nv04_gr_init, 1370 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
1380 .fini = _nvkm_gr_fini, 1371 { -1, -1, 0x0017, &nv04_gr_object }, /* chroma */
1381 }, 1372 { -1, -1, 0x0018, &nv04_gr_object }, /* pattern (nv01) */
1373 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
1374 { -1, -1, 0x001c, &nv04_gr_object }, /* line */
1375 { -1, -1, 0x001d, &nv04_gr_object }, /* tri */
1376 { -1, -1, 0x001e, &nv04_gr_object }, /* rect */
1377 { -1, -1, 0x001f, &nv04_gr_object },
1378 { -1, -1, 0x0021, &nv04_gr_object },
1379 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
1380 { -1, -1, 0x0036, &nv04_gr_object },
1381 { -1, -1, 0x0037, &nv04_gr_object },
1382 { -1, -1, 0x0038, &nv04_gr_object }, /* dvd subpicture */
1383 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
1384 { -1, -1, 0x0042, &nv04_gr_object }, /* surf2d */
1385 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
1386 { -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
1387 { -1, -1, 0x0048, &nv04_gr_object },
1388 { -1, -1, 0x004a, &nv04_gr_object },
1389 { -1, -1, 0x004b, &nv04_gr_object },
1390 { -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
1391 { -1, -1, 0x0053, &nv04_gr_object },
1392 { -1, -1, 0x0054, &nv04_gr_object }, /* ttri */
1393 { -1, -1, 0x0055, &nv04_gr_object }, /* mtri */
1394 { -1, -1, 0x0057, &nv04_gr_object }, /* chroma */
1395 { -1, -1, 0x0058, &nv04_gr_object }, /* surf_dst */
1396 { -1, -1, 0x0059, &nv04_gr_object }, /* surf_src */
1397 { -1, -1, 0x005a, &nv04_gr_object }, /* surf_color */
1398 { -1, -1, 0x005b, &nv04_gr_object }, /* surf_zeta */
1399 { -1, -1, 0x005c, &nv04_gr_object }, /* line */
1400 { -1, -1, 0x005d, &nv04_gr_object }, /* tri */
1401 { -1, -1, 0x005e, &nv04_gr_object }, /* rect */
1402 { -1, -1, 0x005f, &nv04_gr_object },
1403 { -1, -1, 0x0060, &nv04_gr_object },
1404 { -1, -1, 0x0061, &nv04_gr_object },
1405 { -1, -1, 0x0064, &nv04_gr_object }, /* iifc (nv05) */
1406 { -1, -1, 0x0065, &nv04_gr_object }, /* ifc (nv05) */
1407 { -1, -1, 0x0066, &nv04_gr_object }, /* sifc (nv05) */
1408 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
1409 { -1, -1, 0x0076, &nv04_gr_object },
1410 { -1, -1, 0x0077, &nv04_gr_object },
1411 {}
1412 }
1382}; 1413};
1414
1415int
1416nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
1417{
1418 struct nv04_gr *gr;
1419
1420 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
1421 return -ENOMEM;
1422 spin_lock_init(&gr->lock);
1423 *pgr = &gr->base;
1424
1425 return nvkm_gr_ctor(&nv04_gr, device, index, 0x00001000,
1426 true, &gr->base);
1427}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 389904eb603f..9436ada62cba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -21,13 +21,13 @@
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24#include <engine/gr.h> 24#include "nv10.h"
25#include "regs.h" 25#include "regs.h"
26 26
27#include <core/client.h> 27#include <core/client.h>
28#include <core/device.h> 28#include <core/gpuobj.h>
29#include <core/handle.h>
30#include <engine/fifo.h> 29#include <engine/fifo.h>
30#include <engine/fifo/chan.h>
31#include <subdev/fb.h> 31#include <subdev/fb.h>
32 32
33struct pipe_state { 33struct pipe_state {
@@ -386,14 +386,19 @@ static int nv17_gr_ctx_regs[] = {
386 0x00400a04, 386 0x00400a04,
387}; 387};
388 388
389struct nv10_gr_priv { 389#define nv10_gr(p) container_of((p), struct nv10_gr, base)
390
391struct nv10_gr {
390 struct nvkm_gr base; 392 struct nvkm_gr base;
391 struct nv10_gr_chan *chan[32]; 393 struct nv10_gr_chan *chan[32];
392 spinlock_t lock; 394 spinlock_t lock;
393}; 395};
394 396
397#define nv10_gr_chan(p) container_of((p), struct nv10_gr_chan, object)
398
395struct nv10_gr_chan { 399struct nv10_gr_chan {
396 struct nvkm_object base; 400 struct nvkm_object object;
401 struct nv10_gr *gr;
397 int chid; 402 int chid;
398 int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)]; 403 int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
399 int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)]; 404 int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
@@ -402,214 +407,151 @@ struct nv10_gr_chan {
402}; 407};
403 408
404 409
405static inline struct nv10_gr_priv *
406nv10_gr_priv(struct nv10_gr_chan *chan)
407{
408 return (void *)nv_object(chan)->engine;
409}
410
411/******************************************************************************* 410/*******************************************************************************
412 * Graphics object classes 411 * Graphics object classes
413 ******************************************************************************/ 412 ******************************************************************************/
414 413
415#define PIPE_SAVE(priv, state, addr) \ 414#define PIPE_SAVE(gr, state, addr) \
416 do { \ 415 do { \
417 int __i; \ 416 int __i; \
418 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \ 417 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr); \
419 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ 418 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
420 state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \ 419 state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
421 } while (0) 420 } while (0)
422 421
423#define PIPE_RESTORE(priv, state, addr) \ 422#define PIPE_RESTORE(gr, state, addr) \
424 do { \ 423 do { \
425 int __i; \ 424 int __i; \
426 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \ 425 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr); \
427 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ 426 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
428 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \ 427 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
429 } while (0) 428 } while (0)
430 429
431static struct nvkm_oclass 430static void
432nv10_gr_sclass[] = { 431nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
433 { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
434 { 0x0019, &nv04_gr_ofuncs }, /* clip */
435 { 0x0030, &nv04_gr_ofuncs }, /* null */
436 { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
437 { 0x0043, &nv04_gr_ofuncs }, /* rop */
438 { 0x0044, &nv04_gr_ofuncs }, /* pattern */
439 { 0x004a, &nv04_gr_ofuncs }, /* gdi */
440 { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
441 { 0x005f, &nv04_gr_ofuncs }, /* blit */
442 { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
443 { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
444 { 0x0089, &nv04_gr_ofuncs }, /* sifm */
445 { 0x008a, &nv04_gr_ofuncs }, /* ifc */
446 { 0x009f, &nv04_gr_ofuncs }, /* blit */
447 { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
448 { 0x0094, &nv04_gr_ofuncs }, /* ttri */
449 { 0x0095, &nv04_gr_ofuncs }, /* mtri */
450 { 0x0056, &nv04_gr_ofuncs }, /* celcius */
451 {},
452};
453
454static struct nvkm_oclass
455nv15_gr_sclass[] = {
456 { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
457 { 0x0019, &nv04_gr_ofuncs }, /* clip */
458 { 0x0030, &nv04_gr_ofuncs }, /* null */
459 { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
460 { 0x0043, &nv04_gr_ofuncs }, /* rop */
461 { 0x0044, &nv04_gr_ofuncs }, /* pattern */
462 { 0x004a, &nv04_gr_ofuncs }, /* gdi */
463 { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
464 { 0x005f, &nv04_gr_ofuncs }, /* blit */
465 { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
466 { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
467 { 0x0089, &nv04_gr_ofuncs }, /* sifm */
468 { 0x008a, &nv04_gr_ofuncs }, /* ifc */
469 { 0x009f, &nv04_gr_ofuncs }, /* blit */
470 { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
471 { 0x0094, &nv04_gr_ofuncs }, /* ttri */
472 { 0x0095, &nv04_gr_ofuncs }, /* mtri */
473 { 0x0096, &nv04_gr_ofuncs }, /* celcius */
474 {},
475};
476
477static int
478nv17_gr_mthd_lma_window(struct nvkm_object *object, u32 mthd,
479 void *args, u32 size)
480{ 432{
481 struct nv10_gr_chan *chan = (void *)object->parent; 433 struct nvkm_device *device = chan->object.engine->subdev.device;
482 struct nv10_gr_priv *priv = nv10_gr_priv(chan); 434 struct nvkm_gr *gr = &chan->gr->base;
483 struct pipe_state *pipe = &chan->pipe_state; 435 struct pipe_state *pipe = &chan->pipe_state;
484 u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; 436 u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
485 u32 xfmode0, xfmode1; 437 u32 xfmode0, xfmode1;
486 u32 data = *(u32 *)args;
487 int i; 438 int i;
488 439
489 chan->lma_window[(mthd - 0x1638) / 4] = data; 440 chan->lma_window[(mthd - 0x1638) / 4] = data;
490 441
491 if (mthd != 0x1644) 442 if (mthd != 0x1644)
492 return 0; 443 return;
493 444
494 nv04_gr_idle(priv); 445 nv04_gr_idle(gr);
495 446
496 PIPE_SAVE(priv, pipe_0x0040, 0x0040); 447 PIPE_SAVE(device, pipe_0x0040, 0x0040);
497 PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200); 448 PIPE_SAVE(device, pipe->pipe_0x0200, 0x0200);
498 449
499 PIPE_RESTORE(priv, chan->lma_window, 0x6790); 450 PIPE_RESTORE(device, chan->lma_window, 0x6790);
500 451
501 nv04_gr_idle(priv); 452 nv04_gr_idle(gr);
502 453
503 xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0); 454 xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
504 xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1); 455 xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
505 456
506 PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400); 457 PIPE_SAVE(device, pipe->pipe_0x4400, 0x4400);
507 PIPE_SAVE(priv, pipe_0x64c0, 0x64c0); 458 PIPE_SAVE(device, pipe_0x64c0, 0x64c0);
508 PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0); 459 PIPE_SAVE(device, pipe_0x6ab0, 0x6ab0);
509 PIPE_SAVE(priv, pipe_0x6a80, 0x6a80); 460 PIPE_SAVE(device, pipe_0x6a80, 0x6a80);
510 461
511 nv04_gr_idle(priv); 462 nv04_gr_idle(gr);
512 463
513 nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000); 464 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
514 nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000); 465 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
515 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); 466 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
516 for (i = 0; i < 4; i++) 467 for (i = 0; i < 4; i++)
517 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000); 468 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
518 for (i = 0; i < 4; i++) 469 for (i = 0; i < 4; i++)
519 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); 470 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
520 471
521 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); 472 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
522 for (i = 0; i < 3; i++) 473 for (i = 0; i < 3; i++)
523 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000); 474 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
524 475
525 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); 476 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
526 for (i = 0; i < 3; i++) 477 for (i = 0; i < 3; i++)
527 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); 478 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
528
529 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
530 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
531 479
532 PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200); 480 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
481 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
533 482
534 nv04_gr_idle(priv); 483 PIPE_RESTORE(device, pipe->pipe_0x0200, 0x0200);
535 484
536 PIPE_RESTORE(priv, pipe_0x0040, 0x0040); 485 nv04_gr_idle(gr);
537 486
538 nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0); 487 PIPE_RESTORE(device, pipe_0x0040, 0x0040);
539 nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
540 488
541 PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0); 489 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
542 PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0); 490 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
543 PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
544 PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
545 491
546 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0); 492 PIPE_RESTORE(device, pipe_0x64c0, 0x64c0);
547 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); 493 PIPE_RESTORE(device, pipe_0x6ab0, 0x6ab0);
494 PIPE_RESTORE(device, pipe_0x6a80, 0x6a80);
495 PIPE_RESTORE(device, pipe->pipe_0x4400, 0x4400);
548 496
549 nv04_gr_idle(priv); 497 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
498 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
550 499
551 return 0; 500 nv04_gr_idle(gr);
552} 501}
553 502
554static int 503static void
555nv17_gr_mthd_lma_enable(struct nvkm_object *object, u32 mthd, 504nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data)
556 void *args, u32 size)
557{ 505{
558 struct nv10_gr_chan *chan = (void *)object->parent; 506 struct nvkm_device *device = chan->object.engine->subdev.device;
559 struct nv10_gr_priv *priv = nv10_gr_priv(chan); 507 struct nvkm_gr *gr = &chan->gr->base;
560 508
561 nv04_gr_idle(priv); 509 nv04_gr_idle(gr);
562 510
563 nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100); 511 nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
564 nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000); 512 nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000);
565 return 0;
566} 513}
567 514
568static struct nvkm_omthds 515static bool
569nv17_celcius_omthds[] = { 516nv17_gr_mthd_celcius(struct nv10_gr_chan *chan, u32 mthd, u32 data)
570 { 0x1638, 0x1638, nv17_gr_mthd_lma_window }, 517{
571 { 0x163c, 0x163c, nv17_gr_mthd_lma_window }, 518 void (*func)(struct nv10_gr_chan *, u32, u32);
572 { 0x1640, 0x1640, nv17_gr_mthd_lma_window }, 519 switch (mthd) {
573 { 0x1644, 0x1644, nv17_gr_mthd_lma_window }, 520 case 0x1638 ... 0x1644:
574 { 0x1658, 0x1658, nv17_gr_mthd_lma_enable }, 521 func = nv17_gr_mthd_lma_window; break;
575 {} 522 case 0x1658: func = nv17_gr_mthd_lma_enable; break;
576}; 523 default:
524 return false;
525 }
526 func(chan, mthd, data);
527 return true;
528}
577 529
578static struct nvkm_oclass 530static bool
579nv17_gr_sclass[] = { 531nv10_gr_mthd(struct nv10_gr_chan *chan, u8 class, u32 mthd, u32 data)
580 { 0x0012, &nv04_gr_ofuncs }, /* beta1 */ 532{
581 { 0x0019, &nv04_gr_ofuncs }, /* clip */ 533 bool (*func)(struct nv10_gr_chan *, u32, u32);
582 { 0x0030, &nv04_gr_ofuncs }, /* null */ 534 switch (class) {
583 { 0x0039, &nv04_gr_ofuncs }, /* m2mf */ 535 case 0x99: func = nv17_gr_mthd_celcius; break;
584 { 0x0043, &nv04_gr_ofuncs }, /* rop */ 536 default:
585 { 0x0044, &nv04_gr_ofuncs }, /* pattern */ 537 return false;
586 { 0x004a, &nv04_gr_ofuncs }, /* gdi */ 538 }
587 { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */ 539 return func(chan, mthd, data);
588 { 0x005f, &nv04_gr_ofuncs }, /* blit */ 540}
589 { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
590 { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
591 { 0x0089, &nv04_gr_ofuncs }, /* sifm */
592 { 0x008a, &nv04_gr_ofuncs }, /* ifc */
593 { 0x009f, &nv04_gr_ofuncs }, /* blit */
594 { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
595 { 0x0094, &nv04_gr_ofuncs }, /* ttri */
596 { 0x0095, &nv04_gr_ofuncs }, /* mtri */
597 { 0x0099, &nv04_gr_ofuncs, nv17_celcius_omthds },
598 {},
599};
600 541
601/******************************************************************************* 542/*******************************************************************************
602 * PGRAPH context 543 * PGRAPH context
603 ******************************************************************************/ 544 ******************************************************************************/
604 545
605static struct nv10_gr_chan * 546static struct nv10_gr_chan *
606nv10_gr_channel(struct nv10_gr_priv *priv) 547nv10_gr_channel(struct nv10_gr *gr)
607{ 548{
549 struct nvkm_device *device = gr->base.engine.subdev.device;
608 struct nv10_gr_chan *chan = NULL; 550 struct nv10_gr_chan *chan = NULL;
609 if (nv_rd32(priv, 0x400144) & 0x00010000) { 551 if (nvkm_rd32(device, 0x400144) & 0x00010000) {
610 int chid = nv_rd32(priv, 0x400148) >> 24; 552 int chid = nvkm_rd32(device, 0x400148) >> 24;
611 if (chid < ARRAY_SIZE(priv->chan)) 553 if (chid < ARRAY_SIZE(gr->chan))
612 chan = priv->chan[chid]; 554 chan = gr->chan[chid];
613 } 555 }
614 return chan; 556 return chan;
615} 557}
@@ -617,75 +559,78 @@ nv10_gr_channel(struct nv10_gr_priv *priv)
617static void 559static void
618nv10_gr_save_pipe(struct nv10_gr_chan *chan) 560nv10_gr_save_pipe(struct nv10_gr_chan *chan)
619{ 561{
620 struct nv10_gr_priv *priv = nv10_gr_priv(chan); 562 struct nv10_gr *gr = chan->gr;
621 struct pipe_state *pipe = &chan->pipe_state; 563 struct pipe_state *pipe = &chan->pipe_state;
622 564 struct nvkm_device *device = gr->base.engine.subdev.device;
623 PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400); 565
624 PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200); 566 PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
625 PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400); 567 PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
626 PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800); 568 PIPE_SAVE(gr, pipe->pipe_0x6400, 0x6400);
627 PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00); 569 PIPE_SAVE(gr, pipe->pipe_0x6800, 0x6800);
628 PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000); 570 PIPE_SAVE(gr, pipe->pipe_0x6c00, 0x6c00);
629 PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400); 571 PIPE_SAVE(gr, pipe->pipe_0x7000, 0x7000);
630 PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800); 572 PIPE_SAVE(gr, pipe->pipe_0x7400, 0x7400);
631 PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040); 573 PIPE_SAVE(gr, pipe->pipe_0x7800, 0x7800);
632 PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000); 574 PIPE_SAVE(gr, pipe->pipe_0x0040, 0x0040);
575 PIPE_SAVE(gr, pipe->pipe_0x0000, 0x0000);
633} 576}
634 577
635static void 578static void
636nv10_gr_load_pipe(struct nv10_gr_chan *chan) 579nv10_gr_load_pipe(struct nv10_gr_chan *chan)
637{ 580{
638 struct nv10_gr_priv *priv = nv10_gr_priv(chan); 581 struct nv10_gr *gr = chan->gr;
639 struct pipe_state *pipe = &chan->pipe_state; 582 struct pipe_state *pipe = &chan->pipe_state;
583 struct nvkm_device *device = gr->base.engine.subdev.device;
640 u32 xfmode0, xfmode1; 584 u32 xfmode0, xfmode1;
641 int i; 585 int i;
642 586
643 nv04_gr_idle(priv); 587 nv04_gr_idle(&gr->base);
644 /* XXX check haiku comments */ 588 /* XXX check haiku comments */
645 xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0); 589 xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
646 xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1); 590 xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
647 nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000); 591 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
648 nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000); 592 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
649 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); 593 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
650 for (i = 0; i < 4; i++) 594 for (i = 0; i < 4; i++)
651 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000); 595 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
652 for (i = 0; i < 4; i++) 596 for (i = 0; i < 4; i++)
653 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); 597 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
654 598
655 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); 599 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
656 for (i = 0; i < 3; i++) 600 for (i = 0; i < 3; i++)
657 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000); 601 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
658 602
659 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); 603 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
660 for (i = 0; i < 3; i++) 604 for (i = 0; i < 3; i++)
661 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); 605 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
662 606
663 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); 607 nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
664 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008); 608 nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
665 609
666 610
667 PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200); 611 PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
668 nv04_gr_idle(priv); 612 nv04_gr_idle(&gr->base);
669 613
670 /* restore XFMODE */ 614 /* restore XFMODE */
671 nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0); 615 nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
672 nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1); 616 nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
673 PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400); 617 PIPE_RESTORE(gr, pipe->pipe_0x6400, 0x6400);
674 PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800); 618 PIPE_RESTORE(gr, pipe->pipe_0x6800, 0x6800);
675 PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00); 619 PIPE_RESTORE(gr, pipe->pipe_0x6c00, 0x6c00);
676 PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000); 620 PIPE_RESTORE(gr, pipe->pipe_0x7000, 0x7000);
677 PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400); 621 PIPE_RESTORE(gr, pipe->pipe_0x7400, 0x7400);
678 PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800); 622 PIPE_RESTORE(gr, pipe->pipe_0x7800, 0x7800);
679 PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400); 623 PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
680 PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000); 624 PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
681 PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040); 625 PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
682 nv04_gr_idle(priv); 626 nv04_gr_idle(&gr->base);
683} 627}
684 628
685static void 629static void
686nv10_gr_create_pipe(struct nv10_gr_chan *chan) 630nv10_gr_create_pipe(struct nv10_gr_chan *chan)
687{ 631{
688 struct nv10_gr_priv *priv = nv10_gr_priv(chan); 632 struct nv10_gr *gr = chan->gr;
633 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
689 struct pipe_state *pipe_state = &chan->pipe_state; 634 struct pipe_state *pipe_state = &chan->pipe_state;
690 u32 *pipe_state_addr; 635 u32 *pipe_state_addr;
691 int i; 636 int i;
@@ -698,7 +643,7 @@ nv10_gr_create_pipe(struct nv10_gr_chan *chan)
698 u32 *__end_addr = pipe_state->pipe_##addr + \ 643 u32 *__end_addr = pipe_state->pipe_##addr + \
699 ARRAY_SIZE(pipe_state->pipe_##addr); \ 644 ARRAY_SIZE(pipe_state->pipe_##addr); \
700 if (pipe_state_addr != __end_addr) \ 645 if (pipe_state_addr != __end_addr) \
701 nv_error(priv, "incomplete pipe init for 0x%x : %p/%p\n", \ 646 nvkm_error(subdev, "incomplete pipe init for 0x%x : %p/%p\n", \
702 addr, pipe_state_addr, __end_addr); \ 647 addr, pipe_state_addr, __end_addr); \
703 } while (0) 648 } while (0)
704#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value 649#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
@@ -838,33 +783,36 @@ nv10_gr_create_pipe(struct nv10_gr_chan *chan)
838} 783}
839 784
840static int 785static int
841nv10_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg) 786nv10_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
842{ 787{
788 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
843 int i; 789 int i;
844 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) { 790 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
845 if (nv10_gr_ctx_regs[i] == reg) 791 if (nv10_gr_ctx_regs[i] == reg)
846 return i; 792 return i;
847 } 793 }
848 nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg); 794 nvkm_error(subdev, "unknow offset nv10_ctx_regs %d\n", reg);
849 return -1; 795 return -1;
850} 796}
851 797
852static int 798static int
853nv17_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg) 799nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
854{ 800{
801 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
855 int i; 802 int i;
856 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) { 803 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
857 if (nv17_gr_ctx_regs[i] == reg) 804 if (nv17_gr_ctx_regs[i] == reg)
858 return i; 805 return i;
859 } 806 }
860 nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg); 807 nvkm_error(subdev, "unknow offset nv17_ctx_regs %d\n", reg);
861 return -1; 808 return -1;
862} 809}
863 810
864static void 811static void
865nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst) 812nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
866{ 813{
867 struct nv10_gr_priv *priv = nv10_gr_priv(chan); 814 struct nv10_gr *gr = chan->gr;
815 struct nvkm_device *device = gr->base.engine.subdev.device;
868 u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4]; 816 u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
869 u32 ctx_user, ctx_switch[5]; 817 u32 ctx_user, ctx_switch[5];
870 int i, subchan = -1; 818 int i, subchan = -1;
@@ -876,7 +824,7 @@ nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
876 824
877 /* Look for a celsius object */ 825 /* Look for a celsius object */
878 for (i = 0; i < 8; i++) { 826 for (i = 0; i < 8; i++) {
879 int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff; 827 int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
880 828
881 if (class == 0x56 || class == 0x96 || class == 0x99) { 829 if (class == 0x56 || class == 0x96 || class == 0x99) {
882 subchan = i; 830 subchan = i;
@@ -888,159 +836,183 @@ nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
888 return; 836 return;
889 837
890 /* Save the current ctx object */ 838 /* Save the current ctx object */
891 ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER); 839 ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER);
892 for (i = 0; i < 5; i++) 840 for (i = 0; i < 5; i++)
893 ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i)); 841 ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i));
894 842
895 /* Save the FIFO state */ 843 /* Save the FIFO state */
896 st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2); 844 st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2);
897 st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL); 845 st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL);
898 st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH); 846 st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH);
899 fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR); 847 fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR);
900 848
901 for (i = 0; i < ARRAY_SIZE(fifo); i++) 849 for (i = 0; i < ARRAY_SIZE(fifo); i++)
902 fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i); 850 fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i);
903 851
904 /* Switch to the celsius subchannel */ 852 /* Switch to the celsius subchannel */
905 for (i = 0; i < 5; i++) 853 for (i = 0; i < 5; i++)
906 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), 854 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i),
907 nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i))); 855 nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i)));
908 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13); 856 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
909 857
910 /* Inject NV10TCL_DMA_VTXBUF */ 858 /* Inject NV10TCL_DMA_VTXBUF */
911 nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0); 859 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
912 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 860 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2,
913 0x2c000000 | chid << 20 | subchan << 16 | 0x18c); 861 0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
914 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst); 862 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
915 nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); 863 nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
916 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); 864 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
917 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); 865 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
918 866
919 /* Restore the FIFO state */ 867 /* Restore the FIFO state */
920 for (i = 0; i < ARRAY_SIZE(fifo); i++) 868 for (i = 0; i < ARRAY_SIZE(fifo); i++)
921 nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]); 869 nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]);
922 870
923 nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr); 871 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
924 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2); 872 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2);
925 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl); 873 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
926 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh); 874 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
927 875
928 /* Restore the current ctx object */ 876 /* Restore the current ctx object */
929 for (i = 0; i < 5; i++) 877 for (i = 0; i < 5; i++)
930 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]); 878 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
931 nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user); 879 nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user);
932} 880}
933 881
934static int 882static int
935nv10_gr_load_context(struct nv10_gr_chan *chan, int chid) 883nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
936{ 884{
937 struct nv10_gr_priv *priv = nv10_gr_priv(chan); 885 struct nv10_gr *gr = chan->gr;
886 struct nvkm_device *device = gr->base.engine.subdev.device;
938 u32 inst; 887 u32 inst;
939 int i; 888 int i;
940 889
941 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) 890 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
942 nv_wr32(priv, nv10_gr_ctx_regs[i], chan->nv10[i]); 891 nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]);
943 892
944 if (nv_device(priv)->card_type >= NV_11 && 893 if (device->card_type >= NV_11 && device->chipset >= 0x17) {
945 nv_device(priv)->chipset >= 0x17) {
946 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) 894 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
947 nv_wr32(priv, nv17_gr_ctx_regs[i], chan->nv17[i]); 895 nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]);
948 } 896 }
949 897
950 nv10_gr_load_pipe(chan); 898 nv10_gr_load_pipe(chan);
951 899
952 inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff; 900 inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
953 nv10_gr_load_dma_vtxbuf(chan, chid, inst); 901 nv10_gr_load_dma_vtxbuf(chan, chid, inst);
954 902
955 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100); 903 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
956 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24); 904 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
957 nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000); 905 nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
958 return 0; 906 return 0;
959} 907}
960 908
961static int 909static int
962nv10_gr_unload_context(struct nv10_gr_chan *chan) 910nv10_gr_unload_context(struct nv10_gr_chan *chan)
963{ 911{
964 struct nv10_gr_priv *priv = nv10_gr_priv(chan); 912 struct nv10_gr *gr = chan->gr;
913 struct nvkm_device *device = gr->base.engine.subdev.device;
965 int i; 914 int i;
966 915
967 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) 916 for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
968 chan->nv10[i] = nv_rd32(priv, nv10_gr_ctx_regs[i]); 917 chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]);
969 918
970 if (nv_device(priv)->card_type >= NV_11 && 919 if (device->card_type >= NV_11 && device->chipset >= 0x17) {
971 nv_device(priv)->chipset >= 0x17) {
972 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) 920 for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
973 chan->nv17[i] = nv_rd32(priv, nv17_gr_ctx_regs[i]); 921 chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]);
974 } 922 }
975 923
976 nv10_gr_save_pipe(chan); 924 nv10_gr_save_pipe(chan);
977 925
978 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000); 926 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
979 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000); 927 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
980 return 0; 928 return 0;
981} 929}
982 930
983static void 931static void
984nv10_gr_context_switch(struct nv10_gr_priv *priv) 932nv10_gr_context_switch(struct nv10_gr *gr)
985{ 933{
934 struct nvkm_device *device = gr->base.engine.subdev.device;
986 struct nv10_gr_chan *prev = NULL; 935 struct nv10_gr_chan *prev = NULL;
987 struct nv10_gr_chan *next = NULL; 936 struct nv10_gr_chan *next = NULL;
988 unsigned long flags;
989 int chid; 937 int chid;
990 938
991 spin_lock_irqsave(&priv->lock, flags); 939 nv04_gr_idle(&gr->base);
992 nv04_gr_idle(priv);
993 940
994 /* If previous context is valid, we need to save it */ 941 /* If previous context is valid, we need to save it */
995 prev = nv10_gr_channel(priv); 942 prev = nv10_gr_channel(gr);
996 if (prev) 943 if (prev)
997 nv10_gr_unload_context(prev); 944 nv10_gr_unload_context(prev);
998 945
999 /* load context for next channel */ 946 /* load context for next channel */
1000 chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; 947 chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
1001 next = priv->chan[chid]; 948 next = gr->chan[chid];
1002 if (next) 949 if (next)
1003 nv10_gr_load_context(next, chid); 950 nv10_gr_load_context(next, chid);
951}
952
953static int
954nv10_gr_chan_fini(struct nvkm_object *object, bool suspend)
955{
956 struct nv10_gr_chan *chan = nv10_gr_chan(object);
957 struct nv10_gr *gr = chan->gr;
958 struct nvkm_device *device = gr->base.engine.subdev.device;
959 unsigned long flags;
1004 960
1005 spin_unlock_irqrestore(&priv->lock, flags); 961 spin_lock_irqsave(&gr->lock, flags);
962 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
963 if (nv10_gr_channel(gr) == chan)
964 nv10_gr_unload_context(chan);
965 nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
966 spin_unlock_irqrestore(&gr->lock, flags);
967 return 0;
968}
969
970static void *
971nv10_gr_chan_dtor(struct nvkm_object *object)
972{
973 struct nv10_gr_chan *chan = nv10_gr_chan(object);
974 struct nv10_gr *gr = chan->gr;
975 unsigned long flags;
976
977 spin_lock_irqsave(&gr->lock, flags);
978 gr->chan[chan->chid] = NULL;
979 spin_unlock_irqrestore(&gr->lock, flags);
980 return chan;
1006} 981}
1007 982
983static const struct nvkm_object_func
984nv10_gr_chan = {
985 .dtor = nv10_gr_chan_dtor,
986 .fini = nv10_gr_chan_fini,
987};
988
1008#define NV_WRITE_CTX(reg, val) do { \ 989#define NV_WRITE_CTX(reg, val) do { \
1009 int offset = nv10_gr_ctx_regs_find_offset(priv, reg); \ 990 int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
1010 if (offset > 0) \ 991 if (offset > 0) \
1011 chan->nv10[offset] = val; \ 992 chan->nv10[offset] = val; \
1012 } while (0) 993 } while (0)
1013 994
1014#define NV17_WRITE_CTX(reg, val) do { \ 995#define NV17_WRITE_CTX(reg, val) do { \
1015 int offset = nv17_gr_ctx_regs_find_offset(priv, reg); \ 996 int offset = nv17_gr_ctx_regs_find_offset(gr, reg); \
1016 if (offset > 0) \ 997 if (offset > 0) \
1017 chan->nv17[offset] = val; \ 998 chan->nv17[offset] = val; \
1018 } while (0) 999 } while (0)
1019 1000
1020static int 1001int
1021nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1002nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
1022 struct nvkm_oclass *oclass, void *data, u32 size, 1003 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
1023 struct nvkm_object **pobject)
1024{ 1004{
1025 struct nvkm_fifo_chan *fifo = (void *)parent; 1005 struct nv10_gr *gr = nv10_gr(base);
1026 struct nv10_gr_priv *priv = (void *)engine;
1027 struct nv10_gr_chan *chan; 1006 struct nv10_gr_chan *chan;
1007 struct nvkm_device *device = gr->base.engine.subdev.device;
1028 unsigned long flags; 1008 unsigned long flags;
1029 int ret; 1009
1030 1010 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
1031 ret = nvkm_object_create(parent, engine, oclass, 0, &chan); 1011 return -ENOMEM;
1032 *pobject = nv_object(chan); 1012 nvkm_object_ctor(&nv10_gr_chan, oclass, &chan->object);
1033 if (ret) 1013 chan->gr = gr;
1034 return ret; 1014 chan->chid = fifoch->chid;
1035 1015 *pobject = &chan->object;
1036 spin_lock_irqsave(&priv->lock, flags);
1037 if (priv->chan[fifo->chid]) {
1038 *pobject = nv_object(priv->chan[fifo->chid]);
1039 atomic_inc(&(*pobject)->refcount);
1040 spin_unlock_irqrestore(&priv->lock, flags);
1041 nvkm_object_destroy(&chan->base);
1042 return 1;
1043 }
1044 1016
1045 NV_WRITE_CTX(0x00400e88, 0x08000000); 1017 NV_WRITE_CTX(0x00400e88, 0x08000000);
1046 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); 1018 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -1049,12 +1021,11 @@ nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1049 NV_WRITE_CTX(0x00400e14, 0x00001000); 1021 NV_WRITE_CTX(0x00400e14, 0x00001000);
1050 NV_WRITE_CTX(0x00400e30, 0x00080008); 1022 NV_WRITE_CTX(0x00400e30, 0x00080008);
1051 NV_WRITE_CTX(0x00400e34, 0x00080008); 1023 NV_WRITE_CTX(0x00400e34, 0x00080008);
1052 if (nv_device(priv)->card_type >= NV_11 && 1024 if (device->card_type >= NV_11 && device->chipset >= 0x17) {
1053 nv_device(priv)->chipset >= 0x17) {
1054 /* is it really needed ??? */ 1025 /* is it really needed ??? */
1055 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, 1026 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
1056 nv_rd32(priv, NV10_PGRAPH_DEBUG_4)); 1027 nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
1057 NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0)); 1028 NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
1058 NV17_WRITE_CTX(0x00400eac, 0x0fff0000); 1029 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
1059 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); 1030 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
1060 NV17_WRITE_CTX(0x00400ec0, 0x00000080); 1031 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
@@ -1064,74 +1035,32 @@ nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1064 1035
1065 nv10_gr_create_pipe(chan); 1036 nv10_gr_create_pipe(chan);
1066 1037
1067 priv->chan[fifo->chid] = chan; 1038 spin_lock_irqsave(&gr->lock, flags);
1068 chan->chid = fifo->chid; 1039 gr->chan[chan->chid] = chan;
1069 spin_unlock_irqrestore(&priv->lock, flags); 1040 spin_unlock_irqrestore(&gr->lock, flags);
1070 return 0; 1041 return 0;
1071} 1042}
1072 1043
1073static void
1074nv10_gr_context_dtor(struct nvkm_object *object)
1075{
1076 struct nv10_gr_priv *priv = (void *)object->engine;
1077 struct nv10_gr_chan *chan = (void *)object;
1078 unsigned long flags;
1079
1080 spin_lock_irqsave(&priv->lock, flags);
1081 priv->chan[chan->chid] = NULL;
1082 spin_unlock_irqrestore(&priv->lock, flags);
1083
1084 nvkm_object_destroy(&chan->base);
1085}
1086
1087static int
1088nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
1089{
1090 struct nv10_gr_priv *priv = (void *)object->engine;
1091 struct nv10_gr_chan *chan = (void *)object;
1092 unsigned long flags;
1093
1094 spin_lock_irqsave(&priv->lock, flags);
1095 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1096 if (nv10_gr_channel(priv) == chan)
1097 nv10_gr_unload_context(chan);
1098 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1099 spin_unlock_irqrestore(&priv->lock, flags);
1100
1101 return nvkm_object_fini(&chan->base, suspend);
1102}
1103
1104static struct nvkm_oclass
1105nv10_gr_cclass = {
1106 .handle = NV_ENGCTX(GR, 0x10),
1107 .ofuncs = &(struct nvkm_ofuncs) {
1108 .ctor = nv10_gr_context_ctor,
1109 .dtor = nv10_gr_context_dtor,
1110 .init = nvkm_object_init,
1111 .fini = nv10_gr_context_fini,
1112 },
1113};
1114
1115/******************************************************************************* 1044/*******************************************************************************
1116 * PGRAPH engine/subdev functions 1045 * PGRAPH engine/subdev functions
1117 ******************************************************************************/ 1046 ******************************************************************************/
1118 1047
1119static void 1048void
1120nv10_gr_tile_prog(struct nvkm_engine *engine, int i) 1049nv10_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
1121{ 1050{
1122 struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i]; 1051 struct nv10_gr *gr = nv10_gr(base);
1123 struct nvkm_fifo *pfifo = nvkm_fifo(engine); 1052 struct nvkm_device *device = gr->base.engine.subdev.device;
1124 struct nv10_gr_priv *priv = (void *)engine; 1053 struct nvkm_fifo *fifo = device->fifo;
1125 unsigned long flags; 1054 unsigned long flags;
1126 1055
1127 pfifo->pause(pfifo, &flags); 1056 nvkm_fifo_pause(fifo, &flags);
1128 nv04_gr_idle(priv); 1057 nv04_gr_idle(&gr->base);
1129 1058
1130 nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit); 1059 nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
1131 nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch); 1060 nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
1132 nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr); 1061 nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr);
1133 1062
1134 pfifo->start(pfifo, &flags); 1063 nvkm_fifo_start(fifo, &flags);
1135} 1064}
1136 1065
1137const struct nvkm_bitfield nv10_gr_intr_name[] = { 1066const struct nvkm_bitfield nv10_gr_intr_name[] = {
@@ -1148,168 +1077,145 @@ const struct nvkm_bitfield nv10_gr_nstatus[] = {
1148 {} 1077 {}
1149}; 1078};
1150 1079
1151static void 1080void
1152nv10_gr_intr(struct nvkm_subdev *subdev) 1081nv10_gr_intr(struct nvkm_gr *base)
1153{ 1082{
1154 struct nv10_gr_priv *priv = (void *)subdev; 1083 struct nv10_gr *gr = nv10_gr(base);
1155 struct nv10_gr_chan *chan = NULL; 1084 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1156 struct nvkm_namedb *namedb = NULL; 1085 struct nvkm_device *device = subdev->device;
1157 struct nvkm_handle *handle = NULL; 1086 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
1158 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); 1087 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
1159 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); 1088 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
1160 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); 1089 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
1161 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
1162 u32 chid = (addr & 0x01f00000) >> 20; 1090 u32 chid = (addr & 0x01f00000) >> 20;
1163 u32 subc = (addr & 0x00070000) >> 16; 1091 u32 subc = (addr & 0x00070000) >> 16;
1164 u32 mthd = (addr & 0x00001ffc); 1092 u32 mthd = (addr & 0x00001ffc);
1165 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); 1093 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
1166 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff; 1094 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
1167 u32 show = stat; 1095 u32 show = stat;
1096 char msg[128], src[128], sta[128];
1097 struct nv10_gr_chan *chan;
1168 unsigned long flags; 1098 unsigned long flags;
1169 1099
1170 spin_lock_irqsave(&priv->lock, flags); 1100 spin_lock_irqsave(&gr->lock, flags);
1171 chan = priv->chan[chid]; 1101 chan = gr->chan[chid];
1172 if (chan)
1173 namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1174 spin_unlock_irqrestore(&priv->lock, flags);
1175 1102
1176 if (stat & NV_PGRAPH_INTR_ERROR) { 1103 if (stat & NV_PGRAPH_INTR_ERROR) {
1177 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) { 1104 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1178 handle = nvkm_namedb_get_class(namedb, class); 1105 if (!nv10_gr_mthd(chan, class, mthd, data))
1179 if (handle && !nv_call(handle->object, mthd, data))
1180 show &= ~NV_PGRAPH_INTR_ERROR; 1106 show &= ~NV_PGRAPH_INTR_ERROR;
1181 } 1107 }
1182 } 1108 }
1183 1109
1184 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { 1110 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1185 nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 1111 nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1186 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1112 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1187 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1113 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1188 nv10_gr_context_switch(priv); 1114 nv10_gr_context_switch(gr);
1189 } 1115 }
1190 1116
1191 nv_wr32(priv, NV03_PGRAPH_INTR, stat); 1117 nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
1192 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 1118 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
1193 1119
1194 if (show) { 1120 if (show) {
1195 nv_error(priv, "%s", ""); 1121 nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
1196 nvkm_bitfield_print(nv10_gr_intr_name, show); 1122 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
1197 pr_cont(" nsource:"); 1123 nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
1198 nvkm_bitfield_print(nv04_gr_nsource, nsource); 1124 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
1199 pr_cont(" nstatus:"); 1125 "nstatus %08x [%s] ch %d [%s] subc %d "
1200 nvkm_bitfield_print(nv10_gr_nstatus, nstatus); 1126 "class %04x mthd %04x data %08x\n",
1201 pr_cont("\n"); 1127 show, msg, nsource, src, nstatus, sta, chid,
1202 nv_error(priv, 1128 chan ? chan->object.client->name : "unknown",
1203 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 1129 subc, class, mthd, data);
1204 chid, nvkm_client_name(chan), subc, class, mthd,
1205 data);
1206 } 1130 }
1207 1131
1208 nvkm_namedb_put(handle); 1132 spin_unlock_irqrestore(&gr->lock, flags);
1209} 1133}
1210 1134
1211static int 1135int
1212nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1136nv10_gr_init(struct nvkm_gr *base)
1213 struct nvkm_oclass *oclass, void *data, u32 size,
1214 struct nvkm_object **pobject)
1215{ 1137{
1216 struct nv10_gr_priv *priv; 1138 struct nv10_gr *gr = nv10_gr(base);
1217 int ret; 1139 struct nvkm_device *device = gr->base.engine.subdev.device;
1218 1140
1219 ret = nvkm_gr_create(parent, engine, oclass, true, &priv); 1141 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
1220 *pobject = nv_object(priv); 1142 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1221 if (ret) 1143
1222 return ret; 1144 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
1223 1145 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
1224 nv_subdev(priv)->unit = 0x00001000; 1146 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
1225 nv_subdev(priv)->intr = nv10_gr_intr; 1147 /* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
1226 nv_engine(priv)->cclass = &nv10_gr_cclass; 1148 nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1227 1149 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1228 if (nv_device(priv)->chipset <= 0x10) 1150
1229 nv_engine(priv)->sclass = nv10_gr_sclass; 1151 if (device->card_type >= NV_11 && device->chipset >= 0x17) {
1230 else 1152 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1231 if (nv_device(priv)->chipset < 0x17 || 1153 nvkm_wr32(device, 0x400a10, 0x03ff3fb6);
1232 nv_device(priv)->card_type < NV_11) 1154 nvkm_wr32(device, 0x400838, 0x002f8684);
1233 nv_engine(priv)->sclass = nv15_gr_sclass; 1155 nvkm_wr32(device, 0x40083c, 0x00115f3f);
1234 else 1156 nvkm_wr32(device, 0x4006b0, 0x40000020);
1235 nv_engine(priv)->sclass = nv17_gr_sclass;
1236
1237 nv_engine(priv)->tile_prog = nv10_gr_tile_prog;
1238 spin_lock_init(&priv->lock);
1239 return 0;
1240}
1241
1242static void
1243nv10_gr_dtor(struct nvkm_object *object)
1244{
1245 struct nv10_gr_priv *priv = (void *)object;
1246 nvkm_gr_destroy(&priv->base);
1247}
1248
1249static int
1250nv10_gr_init(struct nvkm_object *object)
1251{
1252 struct nvkm_engine *engine = nv_engine(object);
1253 struct nvkm_fb *pfb = nvkm_fb(object);
1254 struct nv10_gr_priv *priv = (void *)engine;
1255 int ret, i;
1256
1257 ret = nvkm_gr_init(&priv->base);
1258 if (ret)
1259 return ret;
1260
1261 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
1262 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1263
1264 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
1265 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
1266 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
1267 /* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
1268 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1269 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1270
1271 if (nv_device(priv)->card_type >= NV_11 &&
1272 nv_device(priv)->chipset >= 0x17) {
1273 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1274 nv_wr32(priv, 0x400a10, 0x03ff3fb6);
1275 nv_wr32(priv, 0x400838, 0x002f8684);
1276 nv_wr32(priv, 0x40083c, 0x00115f3f);
1277 nv_wr32(priv, 0x4006b0, 0x40000020);
1278 } else { 1157 } else {
1279 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000); 1158 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
1280 } 1159 }
1281 1160
1282 /* Turn all the tiling regions off. */ 1161 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
1283 for (i = 0; i < pfb->tile.regions; i++) 1162 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
1284 engine->tile_prog(engine, i); 1163 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
1285 1164 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
1286 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); 1165 nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
1287 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); 1166 nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF);
1288 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
1289 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
1290 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
1291 nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
1292 1167
1293 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000); 1168 nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
1294 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 1169 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
1295 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); 1170 nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
1296 return 0; 1171 return 0;
1297} 1172}
1298 1173
1299static int 1174int
1300nv10_gr_fini(struct nvkm_object *object, bool suspend) 1175nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
1176 int index, struct nvkm_gr **pgr)
1301{ 1177{
1302 struct nv10_gr_priv *priv = (void *)object; 1178 struct nv10_gr *gr;
1303 return nvkm_gr_fini(&priv->base, suspend); 1179
1180 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
1181 return -ENOMEM;
1182 spin_lock_init(&gr->lock);
1183 *pgr = &gr->base;
1184
1185 return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
1304} 1186}
1305 1187
1306struct nvkm_oclass 1188static const struct nvkm_gr_func
1307nv10_gr_oclass = { 1189nv10_gr = {
1308 .handle = NV_ENGINE(GR, 0x10), 1190 .init = nv10_gr_init,
1309 .ofuncs = &(struct nvkm_ofuncs) { 1191 .intr = nv10_gr_intr,
1310 .ctor = nv10_gr_ctor, 1192 .tile = nv10_gr_tile,
1311 .dtor = nv10_gr_dtor, 1193 .chan_new = nv10_gr_chan_new,
1312 .init = nv10_gr_init, 1194 .sclass = {
1313 .fini = nv10_gr_fini, 1195 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
1314 }, 1196 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
1197 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
1198 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
1199 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
1200 { -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
1201 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
1202 { -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
1203 { -1, -1, 0x005f, &nv04_gr_object }, /* blit */
1204 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
1205 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
1206 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
1207 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
1208 { -1, -1, 0x009f, &nv04_gr_object }, /* blit */
1209 { -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
1210 { -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
1211 { -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
1212 { -1, -1, 0x0056, &nv04_gr_object }, /* celcius */
1213 {}
1214 }
1315}; 1215};
1216
1217int
1218nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
1219{
1220 return nv10_gr_new_(&nv10_gr, device, index, pgr);
1221}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
new file mode 100644
index 000000000000..d7c3d86cc99d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -0,0 +1,13 @@
1#ifndef __NV10_GR_H__
2#define __NV10_GR_H__
3#include "priv.h"
4
5int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
6 struct nvkm_gr **);
7int nv10_gr_init(struct nvkm_gr *);
8void nv10_gr_intr(struct nvkm_gr *);
9void nv10_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
10
11int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
12 const struct nvkm_oclass *, struct nvkm_object **);
13#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
new file mode 100644
index 000000000000..3e2c6856b4c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragr) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#include "nv10.h"
25
26static const struct nvkm_gr_func
27nv15_gr = {
28 .init = nv10_gr_init,
29 .intr = nv10_gr_intr,
30 .tile = nv10_gr_tile,
31 .chan_new = nv10_gr_chan_new,
32 .sclass = {
33 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
34 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
35 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
36 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
37 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
38 { -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
39 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
40 { -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
41 { -1, -1, 0x005f, &nv04_gr_object }, /* blit */
42 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
43 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
44 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
45 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
46 { -1, -1, 0x009f, &nv04_gr_object }, /* blit */
47 { -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
48 { -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
49 { -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
50 { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
51 {}
52 }
53};
54
55int
56nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
57{
58 return nv10_gr_new_(&nv15_gr, device, index, pgr);
59}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
new file mode 100644
index 000000000000..12437d085a73
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragr) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#include "nv10.h"
25
26static const struct nvkm_gr_func
27nv17_gr = {
28 .init = nv10_gr_init,
29 .intr = nv10_gr_intr,
30 .tile = nv10_gr_tile,
31 .chan_new = nv10_gr_chan_new,
32 .sclass = {
33 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
34 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
35 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
36 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
37 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
38 { -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
39 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
40 { -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
41 { -1, -1, 0x005f, &nv04_gr_object }, /* blit */
42 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
43 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
44 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
45 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
46 { -1, -1, 0x009f, &nv04_gr_object }, /* blit */
47 { -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
48 { -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
49 { -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
50 { -1, -1, 0x0099, &nv04_gr_object },
51 {}
52 }
53};
54
55int
56nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
57{
58 return nv10_gr_new_(&nv17_gr, device, index, pgr);
59}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 1713ffb669e8..5caef65d3c6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -2,375 +2,374 @@
2#include "regs.h" 2#include "regs.h"
3 3
4#include <core/client.h> 4#include <core/client.h>
5#include <core/device.h> 5#include <core/gpuobj.h>
6#include <core/handle.h>
7#include <engine/fifo.h> 6#include <engine/fifo.h>
7#include <engine/fifo/chan.h>
8#include <subdev/fb.h> 8#include <subdev/fb.h>
9#include <subdev/timer.h> 9#include <subdev/timer.h>
10 10
11/******************************************************************************* 11/*******************************************************************************
12 * Graphics object classes 12 * PGRAPH context
13 ******************************************************************************/ 13 ******************************************************************************/
14 14
15static struct nvkm_oclass 15int
16nv20_gr_sclass[] = { 16nv20_gr_chan_init(struct nvkm_object *object)
17 { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */ 17{
18 { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */ 18 struct nv20_gr_chan *chan = nv20_gr_chan(object);
19 { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */ 19 struct nv20_gr *gr = chan->gr;
20 { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */ 20 u32 inst = nvkm_memory_addr(chan->inst);
21 { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
22 { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
23 { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
24 { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
25 { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
26 { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
27 { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
28 { 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
29 { 0x0097, &nv04_gr_ofuncs, NULL }, /* kelvin */
30 { 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
31 { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
32 {},
33};
34 21
35/******************************************************************************* 22 nvkm_kmap(gr->ctxtab);
36 * PGRAPH context 23 nvkm_wo32(gr->ctxtab, chan->chid * 4, inst >> 4);
37 ******************************************************************************/ 24 nvkm_done(gr->ctxtab);
25 return 0;
26}
27
28int
29nv20_gr_chan_fini(struct nvkm_object *object, bool suspend)
30{
31 struct nv20_gr_chan *chan = nv20_gr_chan(object);
32 struct nv20_gr *gr = chan->gr;
33 struct nvkm_device *device = gr->base.engine.subdev.device;
34 u32 inst = nvkm_memory_addr(chan->inst);
35 int chid = -1;
36
37 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
38 if (nvkm_rd32(device, 0x400144) & 0x00010000)
39 chid = (nvkm_rd32(device, 0x400148) & 0x1f000000) >> 24;
40 if (chan->chid == chid) {
41 nvkm_wr32(device, 0x400784, inst >> 4);
42 nvkm_wr32(device, 0x400788, 0x00000002);
43 nvkm_msec(device, 2000,
44 if (!nvkm_rd32(device, 0x400700))
45 break;
46 );
47 nvkm_wr32(device, 0x400144, 0x10000000);
48 nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000);
49 }
50 nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
51
52 nvkm_kmap(gr->ctxtab);
53 nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
54 nvkm_done(gr->ctxtab);
55 return 0;
56}
57
58void *
59nv20_gr_chan_dtor(struct nvkm_object *object)
60{
61 struct nv20_gr_chan *chan = nv20_gr_chan(object);
62 nvkm_memory_del(&chan->inst);
63 return chan;
64}
65
66static const struct nvkm_object_func
67nv20_gr_chan = {
68 .dtor = nv20_gr_chan_dtor,
69 .init = nv20_gr_chan_init,
70 .fini = nv20_gr_chan_fini,
71};
38 72
39static int 73static int
40nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 74nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
41 struct nvkm_oclass *oclass, void *data, u32 size, 75 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
42 struct nvkm_object **pobject)
43{ 76{
77 struct nv20_gr *gr = nv20_gr(base);
44 struct nv20_gr_chan *chan; 78 struct nv20_gr_chan *chan;
45 int ret, i; 79 int ret, i;
46 80
47 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0, 81 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
48 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 82 return -ENOMEM;
49 *pobject = nv_object(chan); 83 nvkm_object_ctor(&nv20_gr_chan, oclass, &chan->object);
84 chan->gr = gr;
85 chan->chid = fifoch->chid;
86 *pobject = &chan->object;
87
88 ret = nvkm_memory_new(gr->base.engine.subdev.device,
89 NVKM_MEM_TARGET_INST, 0x37f0, 16, true,
90 &chan->inst);
50 if (ret) 91 if (ret)
51 return ret; 92 return ret;
52 93
53 chan->chid = nvkm_fifo_chan(parent)->chid; 94 nvkm_kmap(chan->inst);
54 95 nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
55 nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24)); 96 nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
56 nv_wo32(chan, 0x033c, 0xffff0000); 97 nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
57 nv_wo32(chan, 0x03a0, 0x0fff0000); 98 nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
58 nv_wo32(chan, 0x03a4, 0x0fff0000); 99 nvkm_wo32(chan->inst, 0x047c, 0x00000101);
59 nv_wo32(chan, 0x047c, 0x00000101); 100 nvkm_wo32(chan->inst, 0x0490, 0x00000111);
60 nv_wo32(chan, 0x0490, 0x00000111); 101 nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
61 nv_wo32(chan, 0x04a8, 0x44400000);
62 for (i = 0x04d4; i <= 0x04e0; i += 4) 102 for (i = 0x04d4; i <= 0x04e0; i += 4)
63 nv_wo32(chan, i, 0x00030303); 103 nvkm_wo32(chan->inst, i, 0x00030303);
64 for (i = 0x04f4; i <= 0x0500; i += 4) 104 for (i = 0x04f4; i <= 0x0500; i += 4)
65 nv_wo32(chan, i, 0x00080000); 105 nvkm_wo32(chan->inst, i, 0x00080000);
66 for (i = 0x050c; i <= 0x0518; i += 4) 106 for (i = 0x050c; i <= 0x0518; i += 4)
67 nv_wo32(chan, i, 0x01012000); 107 nvkm_wo32(chan->inst, i, 0x01012000);
68 for (i = 0x051c; i <= 0x0528; i += 4) 108 for (i = 0x051c; i <= 0x0528; i += 4)
69 nv_wo32(chan, i, 0x000105b8); 109 nvkm_wo32(chan->inst, i, 0x000105b8);
70 for (i = 0x052c; i <= 0x0538; i += 4) 110 for (i = 0x052c; i <= 0x0538; i += 4)
71 nv_wo32(chan, i, 0x00080008); 111 nvkm_wo32(chan->inst, i, 0x00080008);
72 for (i = 0x055c; i <= 0x0598; i += 4) 112 for (i = 0x055c; i <= 0x0598; i += 4)
73 nv_wo32(chan, i, 0x07ff0000); 113 nvkm_wo32(chan->inst, i, 0x07ff0000);
74 nv_wo32(chan, 0x05a4, 0x4b7fffff); 114 nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
75 nv_wo32(chan, 0x05fc, 0x00000001); 115 nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
76 nv_wo32(chan, 0x0604, 0x00004000); 116 nvkm_wo32(chan->inst, 0x0604, 0x00004000);
77 nv_wo32(chan, 0x0610, 0x00000001); 117 nvkm_wo32(chan->inst, 0x0610, 0x00000001);
78 nv_wo32(chan, 0x0618, 0x00040000); 118 nvkm_wo32(chan->inst, 0x0618, 0x00040000);
79 nv_wo32(chan, 0x061c, 0x00010000); 119 nvkm_wo32(chan->inst, 0x061c, 0x00010000);
80 for (i = 0x1c1c; i <= 0x248c; i += 16) { 120 for (i = 0x1c1c; i <= 0x248c; i += 16) {
81 nv_wo32(chan, (i + 0), 0x10700ff9); 121 nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
82 nv_wo32(chan, (i + 4), 0x0436086c); 122 nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
83 nv_wo32(chan, (i + 8), 0x000c001b); 123 nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
84 } 124 }
85 nv_wo32(chan, 0x281c, 0x3f800000); 125 nvkm_wo32(chan->inst, 0x281c, 0x3f800000);
86 nv_wo32(chan, 0x2830, 0x3f800000); 126 nvkm_wo32(chan->inst, 0x2830, 0x3f800000);
87 nv_wo32(chan, 0x285c, 0x40000000); 127 nvkm_wo32(chan->inst, 0x285c, 0x40000000);
88 nv_wo32(chan, 0x2860, 0x3f800000); 128 nvkm_wo32(chan->inst, 0x2860, 0x3f800000);
89 nv_wo32(chan, 0x2864, 0x3f000000); 129 nvkm_wo32(chan->inst, 0x2864, 0x3f000000);
90 nv_wo32(chan, 0x286c, 0x40000000); 130 nvkm_wo32(chan->inst, 0x286c, 0x40000000);
91 nv_wo32(chan, 0x2870, 0x3f800000); 131 nvkm_wo32(chan->inst, 0x2870, 0x3f800000);
92 nv_wo32(chan, 0x2878, 0xbf800000); 132 nvkm_wo32(chan->inst, 0x2878, 0xbf800000);
93 nv_wo32(chan, 0x2880, 0xbf800000); 133 nvkm_wo32(chan->inst, 0x2880, 0xbf800000);
94 nv_wo32(chan, 0x34a4, 0x000fe000); 134 nvkm_wo32(chan->inst, 0x34a4, 0x000fe000);
95 nv_wo32(chan, 0x3530, 0x000003f8); 135 nvkm_wo32(chan->inst, 0x3530, 0x000003f8);
96 nv_wo32(chan, 0x3540, 0x002fe000); 136 nvkm_wo32(chan->inst, 0x3540, 0x002fe000);
97 for (i = 0x355c; i <= 0x3578; i += 4) 137 for (i = 0x355c; i <= 0x3578; i += 4)
98 nv_wo32(chan, i, 0x001c527c); 138 nvkm_wo32(chan->inst, i, 0x001c527c);
99 return 0; 139 nvkm_done(chan->inst);
100}
101
102int
103nv20_gr_context_init(struct nvkm_object *object)
104{
105 struct nv20_gr_priv *priv = (void *)object->engine;
106 struct nv20_gr_chan *chan = (void *)object;
107 int ret;
108
109 ret = nvkm_gr_context_init(&chan->base);
110 if (ret)
111 return ret;
112
113 nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
114 return 0; 140 return 0;
115} 141}
116 142
117int
118nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
119{
120 struct nv20_gr_priv *priv = (void *)object->engine;
121 struct nv20_gr_chan *chan = (void *)object;
122 int chid = -1;
123
124 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
125 if (nv_rd32(priv, 0x400144) & 0x00010000)
126 chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
127 if (chan->chid == chid) {
128 nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
129 nv_wr32(priv, 0x400788, 0x00000002);
130 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
131 nv_wr32(priv, 0x400144, 0x10000000);
132 nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
133 }
134 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
135
136 nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
137 return nvkm_gr_context_fini(&chan->base, suspend);
138}
139
140static struct nvkm_oclass
141nv20_gr_cclass = {
142 .handle = NV_ENGCTX(GR, 0x20),
143 .ofuncs = &(struct nvkm_ofuncs) {
144 .ctor = nv20_gr_context_ctor,
145 .dtor = _nvkm_gr_context_dtor,
146 .init = nv20_gr_context_init,
147 .fini = nv20_gr_context_fini,
148 .rd32 = _nvkm_gr_context_rd32,
149 .wr32 = _nvkm_gr_context_wr32,
150 },
151};
152
153/******************************************************************************* 143/*******************************************************************************
154 * PGRAPH engine/subdev functions 144 * PGRAPH engine/subdev functions
155 ******************************************************************************/ 145 ******************************************************************************/
156 146
157void 147void
158nv20_gr_tile_prog(struct nvkm_engine *engine, int i) 148nv20_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
159{ 149{
160 struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i]; 150 struct nv20_gr *gr = nv20_gr(base);
161 struct nvkm_fifo *pfifo = nvkm_fifo(engine); 151 struct nvkm_device *device = gr->base.engine.subdev.device;
162 struct nv20_gr_priv *priv = (void *)engine; 152 struct nvkm_fifo *fifo = device->fifo;
163 unsigned long flags; 153 unsigned long flags;
164 154
165 pfifo->pause(pfifo, &flags); 155 nvkm_fifo_pause(fifo, &flags);
166 nv04_gr_idle(priv); 156 nv04_gr_idle(&gr->base);
167 157
168 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); 158 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
169 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); 159 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
170 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); 160 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
171 161
172 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); 162 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
173 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit); 163 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->limit);
174 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); 164 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
175 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch); 165 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->pitch);
176 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); 166 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
177 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr); 167 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->addr);
178 168
179 if (nv_device(engine)->chipset != 0x34) { 169 if (device->chipset != 0x34) {
180 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); 170 nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
181 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); 171 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
182 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp); 172 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->zcomp);
183 } 173 }
184 174
185 pfifo->start(pfifo, &flags); 175 nvkm_fifo_start(fifo, &flags);
186} 176}
187 177
188void 178void
189nv20_gr_intr(struct nvkm_subdev *subdev) 179nv20_gr_intr(struct nvkm_gr *base)
190{ 180{
191 struct nvkm_engine *engine = nv_engine(subdev); 181 struct nv20_gr *gr = nv20_gr(base);
192 struct nvkm_object *engctx; 182 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
193 struct nvkm_handle *handle; 183 struct nvkm_device *device = subdev->device;
194 struct nv20_gr_priv *priv = (void *)subdev; 184 struct nvkm_fifo_chan *chan;
195 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); 185 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
196 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); 186 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
197 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); 187 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
198 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR); 188 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
199 u32 chid = (addr & 0x01f00000) >> 20; 189 u32 chid = (addr & 0x01f00000) >> 20;
200 u32 subc = (addr & 0x00070000) >> 16; 190 u32 subc = (addr & 0x00070000) >> 16;
201 u32 mthd = (addr & 0x00001ffc); 191 u32 mthd = (addr & 0x00001ffc);
202 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); 192 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
203 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff; 193 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
204 u32 show = stat; 194 u32 show = stat;
195 char msg[128], src[128], sta[128];
196 unsigned long flags;
205 197
206 engctx = nvkm_engctx_get(engine, chid); 198 chan = nvkm_fifo_chan_chid(device->fifo, chid, &flags);
207 if (stat & NV_PGRAPH_INTR_ERROR) {
208 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
209 handle = nvkm_handle_get_class(engctx, class);
210 if (handle && !nv_call(handle->object, mthd, data))
211 show &= ~NV_PGRAPH_INTR_ERROR;
212 nvkm_handle_put(handle);
213 }
214 }
215 199
216 nv_wr32(priv, NV03_PGRAPH_INTR, stat); 200 nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
217 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 201 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
218 202
219 if (show) { 203 if (show) {
220 nv_error(priv, "%s", ""); 204 nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
221 nvkm_bitfield_print(nv10_gr_intr_name, show); 205 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
222 pr_cont(" nsource:"); 206 nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
223 nvkm_bitfield_print(nv04_gr_nsource, nsource); 207 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
224 pr_cont(" nstatus:"); 208 "nstatus %08x [%s] ch %d [%s] subc %d "
225 nvkm_bitfield_print(nv10_gr_nstatus, nstatus); 209 "class %04x mthd %04x data %08x\n",
226 pr_cont("\n"); 210 show, msg, nsource, src, nstatus, sta, chid,
227 nv_error(priv, 211 chan ? chan->object.client->name : "unknown",
228 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 212 subc, class, mthd, data);
229 chid, nvkm_client_name(engctx), subc, class, mthd,
230 data);
231 } 213 }
232 214
233 nvkm_engctx_put(engctx); 215 nvkm_fifo_chan_put(device->fifo, flags, &chan);
234}
235
236static int
237nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
238 struct nvkm_oclass *oclass, void *data, u32 size,
239 struct nvkm_object **pobject)
240{
241 struct nv20_gr_priv *priv;
242 int ret;
243
244 ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
245 *pobject = nv_object(priv);
246 if (ret)
247 return ret;
248
249 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
250 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
251 if (ret)
252 return ret;
253
254 nv_subdev(priv)->unit = 0x00001000;
255 nv_subdev(priv)->intr = nv20_gr_intr;
256 nv_engine(priv)->cclass = &nv20_gr_cclass;
257 nv_engine(priv)->sclass = nv20_gr_sclass;
258 nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
259 return 0;
260} 216}
261 217
262void 218int
263nv20_gr_dtor(struct nvkm_object *object) 219nv20_gr_oneinit(struct nvkm_gr *base)
264{ 220{
265 struct nv20_gr_priv *priv = (void *)object; 221 struct nv20_gr *gr = nv20_gr(base);
266 nvkm_gpuobj_ref(NULL, &priv->ctxtab); 222 return nvkm_memory_new(gr->base.engine.subdev.device,
267 nvkm_gr_destroy(&priv->base); 223 NVKM_MEM_TARGET_INST, 32 * 4, 16,
224 true, &gr->ctxtab);
268} 225}
269 226
270int 227int
271nv20_gr_init(struct nvkm_object *object) 228nv20_gr_init(struct nvkm_gr *base)
272{ 229{
273 struct nvkm_engine *engine = nv_engine(object); 230 struct nv20_gr *gr = nv20_gr(base);
274 struct nv20_gr_priv *priv = (void *)engine; 231 struct nvkm_device *device = gr->base.engine.subdev.device;
275 struct nvkm_fb *pfb = nvkm_fb(object);
276 u32 tmp, vramsz; 232 u32 tmp, vramsz;
277 int ret, i; 233 int i;
278
279 ret = nvkm_gr_init(&priv->base);
280 if (ret)
281 return ret;
282 234
283 nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4); 235 nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
236 nvkm_memory_addr(gr->ctxtab) >> 4);
284 237
285 if (nv_device(priv)->chipset == 0x20) { 238 if (device->chipset == 0x20) {
286 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000); 239 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
287 for (i = 0; i < 15; i++) 240 for (i = 0; i < 15; i++)
288 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000); 241 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
289 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000); 242 nvkm_msec(device, 2000,
243 if (!nvkm_rd32(device, 0x400700))
244 break;
245 );
290 } else { 246 } else {
291 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000); 247 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
292 for (i = 0; i < 32; i++) 248 for (i = 0; i < 32; i++)
293 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000); 249 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
294 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000); 250 nvkm_msec(device, 2000,
251 if (!nvkm_rd32(device, 0x400700))
252 break;
253 );
295 } 254 }
296 255
297 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF); 256 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
298 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 257 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
299 258
300 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); 259 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
301 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000); 260 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
302 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700); 261 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
303 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ 262 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
304 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000); 263 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
305 nv_wr32(priv, 0x40009C , 0x00000040); 264 nvkm_wr32(device, 0x40009C , 0x00000040);
306 265
307 if (nv_device(priv)->chipset >= 0x25) { 266 if (device->chipset >= 0x25) {
308 nv_wr32(priv, 0x400890, 0x00a8cfff); 267 nvkm_wr32(device, 0x400890, 0x00a8cfff);
309 nv_wr32(priv, 0x400610, 0x304B1FB6); 268 nvkm_wr32(device, 0x400610, 0x304B1FB6);
310 nv_wr32(priv, 0x400B80, 0x1cbd3883); 269 nvkm_wr32(device, 0x400B80, 0x1cbd3883);
311 nv_wr32(priv, 0x400B84, 0x44000000); 270 nvkm_wr32(device, 0x400B84, 0x44000000);
312 nv_wr32(priv, 0x400098, 0x40000080); 271 nvkm_wr32(device, 0x400098, 0x40000080);
313 nv_wr32(priv, 0x400B88, 0x000000ff); 272 nvkm_wr32(device, 0x400B88, 0x000000ff);
314 273
315 } else { 274 } else {
316 nv_wr32(priv, 0x400880, 0x0008c7df); 275 nvkm_wr32(device, 0x400880, 0x0008c7df);
317 nv_wr32(priv, 0x400094, 0x00000005); 276 nvkm_wr32(device, 0x400094, 0x00000005);
318 nv_wr32(priv, 0x400B80, 0x45eae20e); 277 nvkm_wr32(device, 0x400B80, 0x45eae20e);
319 nv_wr32(priv, 0x400B84, 0x24000000); 278 nvkm_wr32(device, 0x400B84, 0x24000000);
320 nv_wr32(priv, 0x400098, 0x00000040); 279 nvkm_wr32(device, 0x400098, 0x00000040);
321 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038); 280 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
322 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030); 281 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
323 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038); 282 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
324 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030); 283 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
325 } 284 }
326 285
327 /* Turn all the tiling regions off. */ 286 nvkm_wr32(device, 0x4009a0, nvkm_rd32(device, 0x100324));
328 for (i = 0; i < pfb->tile.regions; i++) 287 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
329 engine->tile_prog(engine, i); 288 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, nvkm_rd32(device, 0x100324));
330 289
331 nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324)); 290 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
332 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); 291 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
333 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
334 292
335 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 293 tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) & 0x0007ff00;
336 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF); 294 nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
337 295 tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) | 0x00020100;
338 tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00; 296 nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
339 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
340 tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
341 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
342 297
343 /* begin RAM config */ 298 /* begin RAM config */
344 vramsz = nv_device_resource_len(nv_device(priv), 0) - 1; 299 vramsz = device->func->resource_size(device, 1) - 1;
345 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); 300 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
346 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204)); 301 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
347 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); 302 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
348 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200)); 303 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100200));
349 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); 304 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
350 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204)); 305 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100204));
351 nv_wr32(priv, 0x400820, 0); 306 nvkm_wr32(device, 0x400820, 0);
352 nv_wr32(priv, 0x400824, 0); 307 nvkm_wr32(device, 0x400824, 0);
353 nv_wr32(priv, 0x400864, vramsz - 1); 308 nvkm_wr32(device, 0x400864, vramsz - 1);
354 nv_wr32(priv, 0x400868, vramsz - 1); 309 nvkm_wr32(device, 0x400868, vramsz - 1);
355 310
356 /* interesting.. the below overwrites some of the tile setup above.. */ 311 /* interesting.. the below overwrites some of the tile setup above.. */
357 nv_wr32(priv, 0x400B20, 0x00000000); 312 nvkm_wr32(device, 0x400B20, 0x00000000);
358 nv_wr32(priv, 0x400B04, 0xFFFFFFFF); 313 nvkm_wr32(device, 0x400B04, 0xFFFFFFFF);
359 314
360 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0); 315 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
361 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0); 316 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
362 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); 317 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
363 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); 318 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
364 return 0; 319 return 0;
365} 320}
366 321
367struct nvkm_oclass 322void *
368nv20_gr_oclass = { 323nv20_gr_dtor(struct nvkm_gr *base)
369 .handle = NV_ENGINE(GR, 0x20), 324{
370 .ofuncs = &(struct nvkm_ofuncs) { 325 struct nv20_gr *gr = nv20_gr(base);
371 .ctor = nv20_gr_ctor, 326 nvkm_memory_del(&gr->ctxtab);
372 .dtor = nv20_gr_dtor, 327 return gr;
373 .init = nv20_gr_init, 328}
374 .fini = _nvkm_gr_fini, 329
375 }, 330int
331nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
332 int index, struct nvkm_gr **pgr)
333{
334 struct nv20_gr *gr;
335
336 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
337 return -ENOMEM;
338 *pgr = &gr->base;
339
340 return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
341}
342
343static const struct nvkm_gr_func
344nv20_gr = {
345 .dtor = nv20_gr_dtor,
346 .oneinit = nv20_gr_oneinit,
347 .init = nv20_gr_init,
348 .intr = nv20_gr_intr,
349 .tile = nv20_gr_tile,
350 .chan_new = nv20_gr_chan_new,
351 .sclass = {
352 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
353 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
354 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
355 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
356 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
357 { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
358 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
359 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
360 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
361 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
362 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
363 { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
364 { -1, -1, 0x0097, &nv04_gr_object }, /* kelvin */
365 { -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
366 { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
367 {}
368 }
376}; 369};
370
371int
372nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
373{
374 return nv20_gr_new_(&nv20_gr, device, index, pgr);
375}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index ac4dc048fed1..cdf4501e3798 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -1,26 +1,33 @@
1#ifndef __NV20_GR_H__ 1#ifndef __NV20_GR_H__
2#define __NV20_GR_H__ 2#define __NV20_GR_H__
3#include <engine/gr.h> 3#define nv20_gr(p) container_of((p), struct nv20_gr, base)
4#include "priv.h"
4 5
5struct nv20_gr_priv { 6struct nv20_gr {
6 struct nvkm_gr base; 7 struct nvkm_gr base;
7 struct nvkm_gpuobj *ctxtab; 8 struct nvkm_memory *ctxtab;
8}; 9};
9 10
10struct nv20_gr_chan { 11int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *,
11 struct nvkm_gr_chan base; 12 int, struct nvkm_gr **);
12 int chid; 13void *nv20_gr_dtor(struct nvkm_gr *);
13}; 14int nv20_gr_oneinit(struct nvkm_gr *);
15int nv20_gr_init(struct nvkm_gr *);
16void nv20_gr_intr(struct nvkm_gr *);
17void nv20_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
14 18
15extern struct nvkm_oclass nv25_gr_sclass[]; 19int nv30_gr_init(struct nvkm_gr *);
16int nv20_gr_context_init(struct nvkm_object *);
17int nv20_gr_context_fini(struct nvkm_object *, bool);
18 20
19void nv20_gr_tile_prog(struct nvkm_engine *, int); 21#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
20void nv20_gr_intr(struct nvkm_subdev *);
21 22
22void nv20_gr_dtor(struct nvkm_object *); 23struct nv20_gr_chan {
23int nv20_gr_init(struct nvkm_object *); 24 struct nvkm_object object;
25 struct nv20_gr *gr;
26 int chid;
27 struct nvkm_memory *inst;
28};
24 29
25int nv30_gr_init(struct nvkm_object *); 30void *nv20_gr_chan_dtor(struct nvkm_object *);
31int nv20_gr_chan_init(struct nvkm_object *);
32int nv20_gr_chan_fini(struct nvkm_object *, bool);
26#endif 33#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index bc362519cebb..6c4a00819b4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,158 +1,134 @@
1#include "nv20.h" 1#include "nv20.h"
2#include "regs.h" 2#include "regs.h"
3 3
4#include <core/gpuobj.h>
4#include <engine/fifo.h> 5#include <engine/fifo.h>
6#include <engine/fifo/chan.h>
5 7
6/******************************************************************************* 8/*******************************************************************************
7 * Graphics object classes 9 * PGRAPH context
8 ******************************************************************************/ 10 ******************************************************************************/
9 11
10struct nvkm_oclass 12static const struct nvkm_object_func
11nv25_gr_sclass[] = { 13nv25_gr_chan = {
12 { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */ 14 .dtor = nv20_gr_chan_dtor,
13 { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */ 15 .init = nv20_gr_chan_init,
14 { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */ 16 .fini = nv20_gr_chan_fini,
15 { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
16 { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
17 { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
18 { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
19 { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
20 { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
21 { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
22 { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
23 { 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
24 { 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
25 { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
26 { 0x0597, &nv04_gr_ofuncs, NULL }, /* kelvin */
27 {},
28}; 17};
29 18
30/*******************************************************************************
31 * PGRAPH context
32 ******************************************************************************/
33
34static int 19static int
35nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 20nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
36 struct nvkm_oclass *oclass, void *data, u32 size, 21 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
37 struct nvkm_object **pobject)
38{ 22{
23 struct nv20_gr *gr = nv20_gr(base);
39 struct nv20_gr_chan *chan; 24 struct nv20_gr_chan *chan;
40 int ret, i; 25 int ret, i;
41 26
42 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724, 27 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
43 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 28 return -ENOMEM;
44 *pobject = nv_object(chan); 29 nvkm_object_ctor(&nv25_gr_chan, oclass, &chan->object);
30 chan->gr = gr;
31 chan->chid = fifoch->chid;
32 *pobject = &chan->object;
33
34 ret = nvkm_memory_new(gr->base.engine.subdev.device,
35 NVKM_MEM_TARGET_INST, 0x3724, 16, true,
36 &chan->inst);
45 if (ret) 37 if (ret)
46 return ret; 38 return ret;
47 39
48 chan->chid = nvkm_fifo_chan(parent)->chid; 40 nvkm_kmap(chan->inst);
49 41 nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
50 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); 42 nvkm_wo32(chan->inst, 0x035c, 0xffff0000);
51 nv_wo32(chan, 0x035c, 0xffff0000); 43 nvkm_wo32(chan->inst, 0x03c0, 0x0fff0000);
52 nv_wo32(chan, 0x03c0, 0x0fff0000); 44 nvkm_wo32(chan->inst, 0x03c4, 0x0fff0000);
53 nv_wo32(chan, 0x03c4, 0x0fff0000); 45 nvkm_wo32(chan->inst, 0x049c, 0x00000101);
54 nv_wo32(chan, 0x049c, 0x00000101); 46 nvkm_wo32(chan->inst, 0x04b0, 0x00000111);
55 nv_wo32(chan, 0x04b0, 0x00000111); 47 nvkm_wo32(chan->inst, 0x04c8, 0x00000080);
56 nv_wo32(chan, 0x04c8, 0x00000080); 48 nvkm_wo32(chan->inst, 0x04cc, 0xffff0000);
57 nv_wo32(chan, 0x04cc, 0xffff0000); 49 nvkm_wo32(chan->inst, 0x04d0, 0x00000001);
58 nv_wo32(chan, 0x04d0, 0x00000001); 50 nvkm_wo32(chan->inst, 0x04e4, 0x44400000);
59 nv_wo32(chan, 0x04e4, 0x44400000); 51 nvkm_wo32(chan->inst, 0x04fc, 0x4b800000);
60 nv_wo32(chan, 0x04fc, 0x4b800000);
61 for (i = 0x0510; i <= 0x051c; i += 4) 52 for (i = 0x0510; i <= 0x051c; i += 4)
62 nv_wo32(chan, i, 0x00030303); 53 nvkm_wo32(chan->inst, i, 0x00030303);
63 for (i = 0x0530; i <= 0x053c; i += 4) 54 for (i = 0x0530; i <= 0x053c; i += 4)
64 nv_wo32(chan, i, 0x00080000); 55 nvkm_wo32(chan->inst, i, 0x00080000);
65 for (i = 0x0548; i <= 0x0554; i += 4) 56 for (i = 0x0548; i <= 0x0554; i += 4)
66 nv_wo32(chan, i, 0x01012000); 57 nvkm_wo32(chan->inst, i, 0x01012000);
67 for (i = 0x0558; i <= 0x0564; i += 4) 58 for (i = 0x0558; i <= 0x0564; i += 4)
68 nv_wo32(chan, i, 0x000105b8); 59 nvkm_wo32(chan->inst, i, 0x000105b8);
69 for (i = 0x0568; i <= 0x0574; i += 4) 60 for (i = 0x0568; i <= 0x0574; i += 4)
70 nv_wo32(chan, i, 0x00080008); 61 nvkm_wo32(chan->inst, i, 0x00080008);
71 for (i = 0x0598; i <= 0x05d4; i += 4) 62 for (i = 0x0598; i <= 0x05d4; i += 4)
72 nv_wo32(chan, i, 0x07ff0000); 63 nvkm_wo32(chan->inst, i, 0x07ff0000);
73 nv_wo32(chan, 0x05e0, 0x4b7fffff); 64 nvkm_wo32(chan->inst, 0x05e0, 0x4b7fffff);
74 nv_wo32(chan, 0x0620, 0x00000080); 65 nvkm_wo32(chan->inst, 0x0620, 0x00000080);
75 nv_wo32(chan, 0x0624, 0x30201000); 66 nvkm_wo32(chan->inst, 0x0624, 0x30201000);
76 nv_wo32(chan, 0x0628, 0x70605040); 67 nvkm_wo32(chan->inst, 0x0628, 0x70605040);
77 nv_wo32(chan, 0x062c, 0xb0a09080); 68 nvkm_wo32(chan->inst, 0x062c, 0xb0a09080);
78 nv_wo32(chan, 0x0630, 0xf0e0d0c0); 69 nvkm_wo32(chan->inst, 0x0630, 0xf0e0d0c0);
79 nv_wo32(chan, 0x0664, 0x00000001); 70 nvkm_wo32(chan->inst, 0x0664, 0x00000001);
80 nv_wo32(chan, 0x066c, 0x00004000); 71 nvkm_wo32(chan->inst, 0x066c, 0x00004000);
81 nv_wo32(chan, 0x0678, 0x00000001); 72 nvkm_wo32(chan->inst, 0x0678, 0x00000001);
82 nv_wo32(chan, 0x0680, 0x00040000); 73 nvkm_wo32(chan->inst, 0x0680, 0x00040000);
83 nv_wo32(chan, 0x0684, 0x00010000); 74 nvkm_wo32(chan->inst, 0x0684, 0x00010000);
84 for (i = 0x1b04; i <= 0x2374; i += 16) { 75 for (i = 0x1b04; i <= 0x2374; i += 16) {
85 nv_wo32(chan, (i + 0), 0x10700ff9); 76 nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
86 nv_wo32(chan, (i + 4), 0x0436086c); 77 nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
87 nv_wo32(chan, (i + 8), 0x000c001b); 78 nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
88 } 79 }
89 nv_wo32(chan, 0x2704, 0x3f800000); 80 nvkm_wo32(chan->inst, 0x2704, 0x3f800000);
90 nv_wo32(chan, 0x2718, 0x3f800000); 81 nvkm_wo32(chan->inst, 0x2718, 0x3f800000);
91 nv_wo32(chan, 0x2744, 0x40000000); 82 nvkm_wo32(chan->inst, 0x2744, 0x40000000);
92 nv_wo32(chan, 0x2748, 0x3f800000); 83 nvkm_wo32(chan->inst, 0x2748, 0x3f800000);
93 nv_wo32(chan, 0x274c, 0x3f000000); 84 nvkm_wo32(chan->inst, 0x274c, 0x3f000000);
94 nv_wo32(chan, 0x2754, 0x40000000); 85 nvkm_wo32(chan->inst, 0x2754, 0x40000000);
95 nv_wo32(chan, 0x2758, 0x3f800000); 86 nvkm_wo32(chan->inst, 0x2758, 0x3f800000);
96 nv_wo32(chan, 0x2760, 0xbf800000); 87 nvkm_wo32(chan->inst, 0x2760, 0xbf800000);
97 nv_wo32(chan, 0x2768, 0xbf800000); 88 nvkm_wo32(chan->inst, 0x2768, 0xbf800000);
98 nv_wo32(chan, 0x308c, 0x000fe000); 89 nvkm_wo32(chan->inst, 0x308c, 0x000fe000);
99 nv_wo32(chan, 0x3108, 0x000003f8); 90 nvkm_wo32(chan->inst, 0x3108, 0x000003f8);
100 nv_wo32(chan, 0x3468, 0x002fe000); 91 nvkm_wo32(chan->inst, 0x3468, 0x002fe000);
101 for (i = 0x3484; i <= 0x34a0; i += 4) 92 for (i = 0x3484; i <= 0x34a0; i += 4)
102 nv_wo32(chan, i, 0x001c527c); 93 nvkm_wo32(chan->inst, i, 0x001c527c);
94 nvkm_done(chan->inst);
103 return 0; 95 return 0;
104} 96}
105 97
106static struct nvkm_oclass
107nv25_gr_cclass = {
108 .handle = NV_ENGCTX(GR, 0x25),
109 .ofuncs = &(struct nvkm_ofuncs) {
110 .ctor = nv25_gr_context_ctor,
111 .dtor = _nvkm_gr_context_dtor,
112 .init = nv20_gr_context_init,
113 .fini = nv20_gr_context_fini,
114 .rd32 = _nvkm_gr_context_rd32,
115 .wr32 = _nvkm_gr_context_wr32,
116 },
117};
118
119/******************************************************************************* 98/*******************************************************************************
120 * PGRAPH engine/subdev functions 99 * PGRAPH engine/subdev functions
121 ******************************************************************************/ 100 ******************************************************************************/
122 101
123static int 102static const struct nvkm_gr_func
124nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 103nv25_gr = {
125 struct nvkm_oclass *oclass, void *data, u32 size, 104 .dtor = nv20_gr_dtor,
126 struct nvkm_object **pobject) 105 .oneinit = nv20_gr_oneinit,
127{ 106 .init = nv20_gr_init,
128 struct nv20_gr_priv *priv; 107 .intr = nv20_gr_intr,
129 int ret; 108 .tile = nv20_gr_tile,
130 109 .chan_new = nv25_gr_chan_new,
131 ret = nvkm_gr_create(parent, engine, oclass, true, &priv); 110 .sclass = {
132 *pobject = nv_object(priv); 111 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
133 if (ret) 112 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
134 return ret; 113 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
135 114 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
136 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, 115 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
137 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 116 { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
138 if (ret) 117 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
139 return ret; 118 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
119 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
120 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
121 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
122 { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
123 { -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
124 { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
125 { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
126 {}
127 }
128};
140 129
141 nv_subdev(priv)->unit = 0x00001000; 130int
142 nv_subdev(priv)->intr = nv20_gr_intr; 131nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
143 nv_engine(priv)->cclass = &nv25_gr_cclass; 132{
144 nv_engine(priv)->sclass = nv25_gr_sclass; 133 return nv20_gr_new_(&nv25_gr, device, index, pgr);
145 nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
146 return 0;
147} 134}
148
149struct nvkm_oclass
150nv25_gr_oclass = {
151 .handle = NV_ENGINE(GR, 0x25),
152 .ofuncs = &(struct nvkm_ofuncs) {
153 .ctor = nv25_gr_ctor,
154 .dtor = nv20_gr_dtor,
155 .init = nv20_gr_init,
156 .fini = _nvkm_gr_fini,
157 },
158};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 22a5096e283d..3cad26dbc2b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,125 +1,125 @@
1#include "nv20.h" 1#include "nv20.h"
2#include "regs.h" 2#include "regs.h"
3 3
4#include <core/gpuobj.h>
4#include <engine/fifo.h> 5#include <engine/fifo.h>
6#include <engine/fifo/chan.h>
5 7
6/******************************************************************************* 8/*******************************************************************************
7 * PGRAPH context 9 * PGRAPH context
8 ******************************************************************************/ 10 ******************************************************************************/
9 11
12static const struct nvkm_object_func
13nv2a_gr_chan = {
14 .dtor = nv20_gr_chan_dtor,
15 .init = nv20_gr_chan_init,
16 .fini = nv20_gr_chan_fini,
17};
18
10static int 19static int
11nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 20nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
12 struct nvkm_oclass *oclass, void *data, u32 size, 21 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
13 struct nvkm_object **pobject)
14{ 22{
23 struct nv20_gr *gr = nv20_gr(base);
15 struct nv20_gr_chan *chan; 24 struct nv20_gr_chan *chan;
16 int ret, i; 25 int ret, i;
17 26
18 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0, 27 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
19 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 28 return -ENOMEM;
20 *pobject = nv_object(chan); 29 nvkm_object_ctor(&nv2a_gr_chan, oclass, &chan->object);
30 chan->gr = gr;
31 chan->chid = fifoch->chid;
32 *pobject = &chan->object;
33
34 ret = nvkm_memory_new(gr->base.engine.subdev.device,
35 NVKM_MEM_TARGET_INST, 0x36b0, 16, true,
36 &chan->inst);
21 if (ret) 37 if (ret)
22 return ret; 38 return ret;
23 39
24 chan->chid = nvkm_fifo_chan(parent)->chid; 40 nvkm_kmap(chan->inst);
25 41 nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
26 nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24)); 42 nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
27 nv_wo32(chan, 0x033c, 0xffff0000); 43 nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
28 nv_wo32(chan, 0x03a0, 0x0fff0000); 44 nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
29 nv_wo32(chan, 0x03a4, 0x0fff0000); 45 nvkm_wo32(chan->inst, 0x047c, 0x00000101);
30 nv_wo32(chan, 0x047c, 0x00000101); 46 nvkm_wo32(chan->inst, 0x0490, 0x00000111);
31 nv_wo32(chan, 0x0490, 0x00000111); 47 nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
32 nv_wo32(chan, 0x04a8, 0x44400000);
33 for (i = 0x04d4; i <= 0x04e0; i += 4) 48 for (i = 0x04d4; i <= 0x04e0; i += 4)
34 nv_wo32(chan, i, 0x00030303); 49 nvkm_wo32(chan->inst, i, 0x00030303);
35 for (i = 0x04f4; i <= 0x0500; i += 4) 50 for (i = 0x04f4; i <= 0x0500; i += 4)
36 nv_wo32(chan, i, 0x00080000); 51 nvkm_wo32(chan->inst, i, 0x00080000);
37 for (i = 0x050c; i <= 0x0518; i += 4) 52 for (i = 0x050c; i <= 0x0518; i += 4)
38 nv_wo32(chan, i, 0x01012000); 53 nvkm_wo32(chan->inst, i, 0x01012000);
39 for (i = 0x051c; i <= 0x0528; i += 4) 54 for (i = 0x051c; i <= 0x0528; i += 4)
40 nv_wo32(chan, i, 0x000105b8); 55 nvkm_wo32(chan->inst, i, 0x000105b8);
41 for (i = 0x052c; i <= 0x0538; i += 4) 56 for (i = 0x052c; i <= 0x0538; i += 4)
42 nv_wo32(chan, i, 0x00080008); 57 nvkm_wo32(chan->inst, i, 0x00080008);
43 for (i = 0x055c; i <= 0x0598; i += 4) 58 for (i = 0x055c; i <= 0x0598; i += 4)
44 nv_wo32(chan, i, 0x07ff0000); 59 nvkm_wo32(chan->inst, i, 0x07ff0000);
45 nv_wo32(chan, 0x05a4, 0x4b7fffff); 60 nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
46 nv_wo32(chan, 0x05fc, 0x00000001); 61 nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
47 nv_wo32(chan, 0x0604, 0x00004000); 62 nvkm_wo32(chan->inst, 0x0604, 0x00004000);
48 nv_wo32(chan, 0x0610, 0x00000001); 63 nvkm_wo32(chan->inst, 0x0610, 0x00000001);
49 nv_wo32(chan, 0x0618, 0x00040000); 64 nvkm_wo32(chan->inst, 0x0618, 0x00040000);
50 nv_wo32(chan, 0x061c, 0x00010000); 65 nvkm_wo32(chan->inst, 0x061c, 0x00010000);
51 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */ 66 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
52 nv_wo32(chan, (i + 0), 0x10700ff9); 67 nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
53 nv_wo32(chan, (i + 4), 0x0436086c); 68 nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
54 nv_wo32(chan, (i + 8), 0x000c001b); 69 nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
55 } 70 }
56 nv_wo32(chan, 0x269c, 0x3f800000); 71 nvkm_wo32(chan->inst, 0x269c, 0x3f800000);
57 nv_wo32(chan, 0x26b0, 0x3f800000); 72 nvkm_wo32(chan->inst, 0x26b0, 0x3f800000);
58 nv_wo32(chan, 0x26dc, 0x40000000); 73 nvkm_wo32(chan->inst, 0x26dc, 0x40000000);
59 nv_wo32(chan, 0x26e0, 0x3f800000); 74 nvkm_wo32(chan->inst, 0x26e0, 0x3f800000);
60 nv_wo32(chan, 0x26e4, 0x3f000000); 75 nvkm_wo32(chan->inst, 0x26e4, 0x3f000000);
61 nv_wo32(chan, 0x26ec, 0x40000000); 76 nvkm_wo32(chan->inst, 0x26ec, 0x40000000);
62 nv_wo32(chan, 0x26f0, 0x3f800000); 77 nvkm_wo32(chan->inst, 0x26f0, 0x3f800000);
63 nv_wo32(chan, 0x26f8, 0xbf800000); 78 nvkm_wo32(chan->inst, 0x26f8, 0xbf800000);
64 nv_wo32(chan, 0x2700, 0xbf800000); 79 nvkm_wo32(chan->inst, 0x2700, 0xbf800000);
65 nv_wo32(chan, 0x3024, 0x000fe000); 80 nvkm_wo32(chan->inst, 0x3024, 0x000fe000);
66 nv_wo32(chan, 0x30a0, 0x000003f8); 81 nvkm_wo32(chan->inst, 0x30a0, 0x000003f8);
67 nv_wo32(chan, 0x33fc, 0x002fe000); 82 nvkm_wo32(chan->inst, 0x33fc, 0x002fe000);
68 for (i = 0x341c; i <= 0x3438; i += 4) 83 for (i = 0x341c; i <= 0x3438; i += 4)
69 nv_wo32(chan, i, 0x001c527c); 84 nvkm_wo32(chan->inst, i, 0x001c527c);
85 nvkm_done(chan->inst);
70 return 0; 86 return 0;
71} 87}
72 88
73static struct nvkm_oclass
74nv2a_gr_cclass = {
75 .handle = NV_ENGCTX(GR, 0x2a),
76 .ofuncs = &(struct nvkm_ofuncs) {
77 .ctor = nv2a_gr_context_ctor,
78 .dtor = _nvkm_gr_context_dtor,
79 .init = nv20_gr_context_init,
80 .fini = nv20_gr_context_fini,
81 .rd32 = _nvkm_gr_context_rd32,
82 .wr32 = _nvkm_gr_context_wr32,
83 },
84};
85
86/******************************************************************************* 89/*******************************************************************************
87 * PGRAPH engine/subdev functions 90 * PGRAPH engine/subdev functions
88 ******************************************************************************/ 91 ******************************************************************************/
89 92
90static int 93static const struct nvkm_gr_func
91nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 94nv2a_gr = {
92 struct nvkm_oclass *oclass, void *data, u32 size, 95 .dtor = nv20_gr_dtor,
93 struct nvkm_object **pobject) 96 .oneinit = nv20_gr_oneinit,
94{ 97 .init = nv20_gr_init,
95 struct nv20_gr_priv *priv; 98 .intr = nv20_gr_intr,
96 int ret; 99 .tile = nv20_gr_tile,
97 100 .chan_new = nv2a_gr_chan_new,
98 ret = nvkm_gr_create(parent, engine, oclass, true, &priv); 101 .sclass = {
99 *pobject = nv_object(priv); 102 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
100 if (ret) 103 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
101 return ret; 104 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
102 105 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
103 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, 106 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
104 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 107 { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
105 if (ret) 108 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
106 return ret; 109 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
110 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
111 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
112 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
113 { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
114 { -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
115 { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
116 { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
117 {}
118 }
119};
107 120
108 nv_subdev(priv)->unit = 0x00001000; 121int
109 nv_subdev(priv)->intr = nv20_gr_intr; 122nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
110 nv_engine(priv)->cclass = &nv2a_gr_cclass; 123{
111 nv_engine(priv)->sclass = nv25_gr_sclass; 124 return nv20_gr_new_(&nv2a_gr, device, index, pgr);
112 nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
113 return 0;
114} 125}
115
116struct nvkm_oclass
117nv2a_gr_oclass = {
118 .handle = NV_ENGINE(GR, 0x2a),
119 .ofuncs = &(struct nvkm_ofuncs) {
120 .ctor = nv2a_gr_ctor,
121 .dtor = nv20_gr_dtor,
122 .init = nv20_gr_init,
123 .fini = _nvkm_gr_fini,
124 },
125};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index dcc84eb54fb6..69de8c6259fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,231 +1,198 @@
1#include "nv20.h" 1#include "nv20.h"
2#include "regs.h" 2#include "regs.h"
3 3
4#include <core/device.h> 4#include <core/gpuobj.h>
5#include <engine/fifo.h> 5#include <engine/fifo.h>
6#include <engine/fifo/chan.h>
6#include <subdev/fb.h> 7#include <subdev/fb.h>
7 8
8/******************************************************************************* 9/*******************************************************************************
9 * Graphics object classes 10 * PGRAPH context
10 ******************************************************************************/ 11 ******************************************************************************/
11 12
12static struct nvkm_oclass 13static const struct nvkm_object_func
13nv30_gr_sclass[] = { 14nv30_gr_chan = {
14 { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */ 15 .dtor = nv20_gr_chan_dtor,
15 { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */ 16 .init = nv20_gr_chan_init,
16 { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */ 17 .fini = nv20_gr_chan_fini,
17 { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
18 { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
19 { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
20 { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
21 { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
22 { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
23 { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
24 { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
25 { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
26 { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
27 { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
28 { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
29 { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
30 { 0x0397, &nv04_gr_ofuncs, NULL }, /* rankine */
31 {},
32}; 18};
33 19
34/*******************************************************************************
35 * PGRAPH context
36 ******************************************************************************/
37
38static int 20static int
39nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 21nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
40 struct nvkm_oclass *oclass, void *data, u32 size, 22 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
41 struct nvkm_object **pobject)
42{ 23{
24 struct nv20_gr *gr = nv20_gr(base);
43 struct nv20_gr_chan *chan; 25 struct nv20_gr_chan *chan;
44 int ret, i; 26 int ret, i;
45 27
46 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48, 28 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
47 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 29 return -ENOMEM;
48 *pobject = nv_object(chan); 30 nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object);
31 chan->gr = gr;
32 chan->chid = fifoch->chid;
33 *pobject = &chan->object;
34
35 ret = nvkm_memory_new(gr->base.engine.subdev.device,
36 NVKM_MEM_TARGET_INST, 0x5f48, 16, true,
37 &chan->inst);
49 if (ret) 38 if (ret)
50 return ret; 39 return ret;
51 40
52 chan->chid = nvkm_fifo_chan(parent)->chid; 41 nvkm_kmap(chan->inst);
53 42 nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
54 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); 43 nvkm_wo32(chan->inst, 0x0410, 0x00000101);
55 nv_wo32(chan, 0x0410, 0x00000101); 44 nvkm_wo32(chan->inst, 0x0424, 0x00000111);
56 nv_wo32(chan, 0x0424, 0x00000111); 45 nvkm_wo32(chan->inst, 0x0428, 0x00000060);
57 nv_wo32(chan, 0x0428, 0x00000060); 46 nvkm_wo32(chan->inst, 0x0444, 0x00000080);
58 nv_wo32(chan, 0x0444, 0x00000080); 47 nvkm_wo32(chan->inst, 0x0448, 0xffff0000);
59 nv_wo32(chan, 0x0448, 0xffff0000); 48 nvkm_wo32(chan->inst, 0x044c, 0x00000001);
60 nv_wo32(chan, 0x044c, 0x00000001); 49 nvkm_wo32(chan->inst, 0x0460, 0x44400000);
61 nv_wo32(chan, 0x0460, 0x44400000); 50 nvkm_wo32(chan->inst, 0x048c, 0xffff0000);
62 nv_wo32(chan, 0x048c, 0xffff0000);
63 for (i = 0x04e0; i < 0x04e8; i += 4) 51 for (i = 0x04e0; i < 0x04e8; i += 4)
64 nv_wo32(chan, i, 0x0fff0000); 52 nvkm_wo32(chan->inst, i, 0x0fff0000);
65 nv_wo32(chan, 0x04ec, 0x00011100); 53 nvkm_wo32(chan->inst, 0x04ec, 0x00011100);
66 for (i = 0x0508; i < 0x0548; i += 4) 54 for (i = 0x0508; i < 0x0548; i += 4)
67 nv_wo32(chan, i, 0x07ff0000); 55 nvkm_wo32(chan->inst, i, 0x07ff0000);
68 nv_wo32(chan, 0x0550, 0x4b7fffff); 56 nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff);
69 nv_wo32(chan, 0x058c, 0x00000080); 57 nvkm_wo32(chan->inst, 0x058c, 0x00000080);
70 nv_wo32(chan, 0x0590, 0x30201000); 58 nvkm_wo32(chan->inst, 0x0590, 0x30201000);
71 nv_wo32(chan, 0x0594, 0x70605040); 59 nvkm_wo32(chan->inst, 0x0594, 0x70605040);
72 nv_wo32(chan, 0x0598, 0xb8a89888); 60 nvkm_wo32(chan->inst, 0x0598, 0xb8a89888);
73 nv_wo32(chan, 0x059c, 0xf8e8d8c8); 61 nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8);
74 nv_wo32(chan, 0x05b0, 0xb0000000); 62 nvkm_wo32(chan->inst, 0x05b0, 0xb0000000);
75 for (i = 0x0600; i < 0x0640; i += 4) 63 for (i = 0x0600; i < 0x0640; i += 4)
76 nv_wo32(chan, i, 0x00010588); 64 nvkm_wo32(chan->inst, i, 0x00010588);
77 for (i = 0x0640; i < 0x0680; i += 4) 65 for (i = 0x0640; i < 0x0680; i += 4)
78 nv_wo32(chan, i, 0x00030303); 66 nvkm_wo32(chan->inst, i, 0x00030303);
79 for (i = 0x06c0; i < 0x0700; i += 4) 67 for (i = 0x06c0; i < 0x0700; i += 4)
80 nv_wo32(chan, i, 0x0008aae4); 68 nvkm_wo32(chan->inst, i, 0x0008aae4);
81 for (i = 0x0700; i < 0x0740; i += 4) 69 for (i = 0x0700; i < 0x0740; i += 4)
82 nv_wo32(chan, i, 0x01012000); 70 nvkm_wo32(chan->inst, i, 0x01012000);
83 for (i = 0x0740; i < 0x0780; i += 4) 71 for (i = 0x0740; i < 0x0780; i += 4)
84 nv_wo32(chan, i, 0x00080008); 72 nvkm_wo32(chan->inst, i, 0x00080008);
85 nv_wo32(chan, 0x085c, 0x00040000); 73 nvkm_wo32(chan->inst, 0x085c, 0x00040000);
86 nv_wo32(chan, 0x0860, 0x00010000); 74 nvkm_wo32(chan->inst, 0x0860, 0x00010000);
87 for (i = 0x0864; i < 0x0874; i += 4) 75 for (i = 0x0864; i < 0x0874; i += 4)
88 nv_wo32(chan, i, 0x00040004); 76 nvkm_wo32(chan->inst, i, 0x00040004);
89 for (i = 0x1f18; i <= 0x3088 ; i += 16) { 77 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
90 nv_wo32(chan, i + 0, 0x10700ff9); 78 nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
91 nv_wo32(chan, i + 1, 0x0436086c); 79 nvkm_wo32(chan->inst, i + 1, 0x0436086c);
92 nv_wo32(chan, i + 2, 0x000c001b); 80 nvkm_wo32(chan->inst, i + 2, 0x000c001b);
93 } 81 }
94 for (i = 0x30b8; i < 0x30c8; i += 4) 82 for (i = 0x30b8; i < 0x30c8; i += 4)
95 nv_wo32(chan, i, 0x0000ffff); 83 nvkm_wo32(chan->inst, i, 0x0000ffff);
96 nv_wo32(chan, 0x344c, 0x3f800000); 84 nvkm_wo32(chan->inst, 0x344c, 0x3f800000);
97 nv_wo32(chan, 0x3808, 0x3f800000); 85 nvkm_wo32(chan->inst, 0x3808, 0x3f800000);
98 nv_wo32(chan, 0x381c, 0x3f800000); 86 nvkm_wo32(chan->inst, 0x381c, 0x3f800000);
99 nv_wo32(chan, 0x3848, 0x40000000); 87 nvkm_wo32(chan->inst, 0x3848, 0x40000000);
100 nv_wo32(chan, 0x384c, 0x3f800000); 88 nvkm_wo32(chan->inst, 0x384c, 0x3f800000);
101 nv_wo32(chan, 0x3850, 0x3f000000); 89 nvkm_wo32(chan->inst, 0x3850, 0x3f000000);
102 nv_wo32(chan, 0x3858, 0x40000000); 90 nvkm_wo32(chan->inst, 0x3858, 0x40000000);
103 nv_wo32(chan, 0x385c, 0x3f800000); 91 nvkm_wo32(chan->inst, 0x385c, 0x3f800000);
104 nv_wo32(chan, 0x3864, 0xbf800000); 92 nvkm_wo32(chan->inst, 0x3864, 0xbf800000);
105 nv_wo32(chan, 0x386c, 0xbf800000); 93 nvkm_wo32(chan->inst, 0x386c, 0xbf800000);
94 nvkm_done(chan->inst);
106 return 0; 95 return 0;
107} 96}
108 97
109static struct nvkm_oclass
110nv30_gr_cclass = {
111 .handle = NV_ENGCTX(GR, 0x30),
112 .ofuncs = &(struct nvkm_ofuncs) {
113 .ctor = nv30_gr_context_ctor,
114 .dtor = _nvkm_gr_context_dtor,
115 .init = nv20_gr_context_init,
116 .fini = nv20_gr_context_fini,
117 .rd32 = _nvkm_gr_context_rd32,
118 .wr32 = _nvkm_gr_context_wr32,
119 },
120};
121
122/******************************************************************************* 98/*******************************************************************************
123 * PGRAPH engine/subdev functions 99 * PGRAPH engine/subdev functions
124 ******************************************************************************/ 100 ******************************************************************************/
125 101
126static int
127nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
128 struct nvkm_oclass *oclass, void *data, u32 size,
129 struct nvkm_object **pobject)
130{
131 struct nv20_gr_priv *priv;
132 int ret;
133
134 ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
135 *pobject = nv_object(priv);
136 if (ret)
137 return ret;
138
139 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
140 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
141 if (ret)
142 return ret;
143
144 nv_subdev(priv)->unit = 0x00001000;
145 nv_subdev(priv)->intr = nv20_gr_intr;
146 nv_engine(priv)->cclass = &nv30_gr_cclass;
147 nv_engine(priv)->sclass = nv30_gr_sclass;
148 nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
149 return 0;
150}
151
152int 102int
153nv30_gr_init(struct nvkm_object *object) 103nv30_gr_init(struct nvkm_gr *base)
154{ 104{
155 struct nvkm_engine *engine = nv_engine(object); 105 struct nv20_gr *gr = nv20_gr(base);
156 struct nv20_gr_priv *priv = (void *)engine; 106 struct nvkm_device *device = gr->base.engine.subdev.device;
157 struct nvkm_fb *pfb = nvkm_fb(object); 107
158 int ret, i; 108 nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
159 109 nvkm_memory_addr(gr->ctxtab) >> 4);
160 ret = nvkm_gr_init(&priv->base); 110
161 if (ret) 111 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
162 return ret; 112 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
163 113
164 nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4); 114 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
165 115 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
166 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF); 116 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
167 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 117 nvkm_wr32(device, 0x400890, 0x01b463ff);
168 118 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
169 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); 119 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
170 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000); 120 nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
171 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0); 121 nvkm_wr32(device, 0x400B80, 0x1003d888);
172 nv_wr32(priv, 0x400890, 0x01b463ff); 122 nvkm_wr32(device, 0x400B84, 0x0c000000);
173 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475); 123 nvkm_wr32(device, 0x400098, 0x00000000);
174 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000); 124 nvkm_wr32(device, 0x40009C, 0x0005ad00);
175 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); 125 nvkm_wr32(device, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
176 nv_wr32(priv, 0x400B80, 0x1003d888); 126 nvkm_wr32(device, 0x4000a0, 0x00000000);
177 nv_wr32(priv, 0x400B84, 0x0c000000); 127 nvkm_wr32(device, 0x4000a4, 0x00000008);
178 nv_wr32(priv, 0x400098, 0x00000000); 128 nvkm_wr32(device, 0x4008a8, 0xb784a400);
179 nv_wr32(priv, 0x40009C, 0x0005ad00); 129 nvkm_wr32(device, 0x400ba0, 0x002f8685);
180 nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */ 130 nvkm_wr32(device, 0x400ba4, 0x00231f3f);
181 nv_wr32(priv, 0x4000a0, 0x00000000); 131 nvkm_wr32(device, 0x4008a4, 0x40000020);
182 nv_wr32(priv, 0x4000a4, 0x00000008); 132
183 nv_wr32(priv, 0x4008a8, 0xb784a400); 133 if (device->chipset == 0x34) {
184 nv_wr32(priv, 0x400ba0, 0x002f8685); 134 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
185 nv_wr32(priv, 0x400ba4, 0x00231f3f); 135 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00200201);
186 nv_wr32(priv, 0x4008a4, 0x40000020); 136 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
187 137 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000008);
188 if (nv_device(priv)->chipset == 0x34) { 138 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
189 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); 139 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000032);
190 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201); 140 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
191 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008); 141 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000002);
192 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
193 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
194 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
195 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
196 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
197 } 142 }
198 143
199 nv_wr32(priv, 0x4000c0, 0x00000016); 144 nvkm_wr32(device, 0x4000c0, 0x00000016);
200
201 /* Turn all the tiling regions off. */
202 for (i = 0; i < pfb->tile.regions; i++)
203 engine->tile_prog(engine, i);
204 145
205 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 146 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
206 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF); 147 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
207 nv_wr32(priv, 0x0040075c , 0x00000001); 148 nvkm_wr32(device, 0x0040075c , 0x00000001);
208 149
209 /* begin RAM config */ 150 /* begin RAM config */
210 /* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */ 151 /* vramsz = pci_resource_len(gr->dev->pdev, 1) - 1; */
211 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); 152 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
212 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204)); 153 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
213 if (nv_device(priv)->chipset != 0x34) { 154 if (device->chipset != 0x34) {
214 nv_wr32(priv, 0x400750, 0x00EA0000); 155 nvkm_wr32(device, 0x400750, 0x00EA0000);
215 nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200)); 156 nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100200));
216 nv_wr32(priv, 0x400750, 0x00EA0004); 157 nvkm_wr32(device, 0x400750, 0x00EA0004);
217 nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204)); 158 nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100204));
218 } 159 }
160
219 return 0; 161 return 0;
220} 162}
221 163
222struct nvkm_oclass 164static const struct nvkm_gr_func
223nv30_gr_oclass = { 165nv30_gr = {
224 .handle = NV_ENGINE(GR, 0x30), 166 .dtor = nv20_gr_dtor,
225 .ofuncs = &(struct nvkm_ofuncs) { 167 .oneinit = nv20_gr_oneinit,
226 .ctor = nv30_gr_ctor, 168 .init = nv30_gr_init,
227 .dtor = nv20_gr_dtor, 169 .intr = nv20_gr_intr,
228 .init = nv30_gr_init, 170 .tile = nv20_gr_tile,
229 .fini = _nvkm_gr_fini, 171 .chan_new = nv30_gr_chan_new,
230 }, 172 .sclass = {
173 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
174 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
175 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
176 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
177 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
178 { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
179 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
180 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
181 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
182 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
183 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
184 { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
185 { -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
186 { -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
187 { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
188 { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
189 { -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
190 {}
191 }
231}; 192};
193
194int
195nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
196{
197 return nv20_gr_new_(&nv30_gr, device, index, pgr);
198}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 985b7f3306ae..2207dac23981 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,159 +1,135 @@
1#include "nv20.h" 1#include "nv20.h"
2#include "regs.h" 2#include "regs.h"
3 3
4#include <core/gpuobj.h>
4#include <engine/fifo.h> 5#include <engine/fifo.h>
6#include <engine/fifo/chan.h>
5 7
6/******************************************************************************* 8/*******************************************************************************
7 * Graphics object classes 9 * PGRAPH context
8 ******************************************************************************/ 10 ******************************************************************************/
9 11
10static struct nvkm_oclass 12static const struct nvkm_object_func
11nv34_gr_sclass[] = { 13nv34_gr_chan = {
12 { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */ 14 .dtor = nv20_gr_chan_dtor,
13 { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */ 15 .init = nv20_gr_chan_init,
14 { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */ 16 .fini = nv20_gr_chan_fini,
15 { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
16 { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
17 { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
18 { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
19 { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
20 { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
21 { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
22 { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
23 { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
24 { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
25 { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
26 { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
27 { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
28 { 0x0697, &nv04_gr_ofuncs, NULL }, /* rankine */
29 {},
30}; 17};
31 18
32/*******************************************************************************
33 * PGRAPH context
34 ******************************************************************************/
35
36static int 19static int
37nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 20nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
38 struct nvkm_oclass *oclass, void *data, u32 size, 21 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
39 struct nvkm_object **pobject)
40{ 22{
23 struct nv20_gr *gr = nv20_gr(base);
41 struct nv20_gr_chan *chan; 24 struct nv20_gr_chan *chan;
42 int ret, i; 25 int ret, i;
43 26
44 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc, 27 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
45 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 28 return -ENOMEM;
46 *pobject = nv_object(chan); 29 nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object);
30 chan->gr = gr;
31 chan->chid = fifoch->chid;
32 *pobject = &chan->object;
33
34 ret = nvkm_memory_new(gr->base.engine.subdev.device,
35 NVKM_MEM_TARGET_INST, 0x46dc, 16, true,
36 &chan->inst);
47 if (ret) 37 if (ret)
48 return ret; 38 return ret;
49 39
50 chan->chid = nvkm_fifo_chan(parent)->chid; 40 nvkm_kmap(chan->inst);
51 41 nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
52 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); 42 nvkm_wo32(chan->inst, 0x040c, 0x01000101);
53 nv_wo32(chan, 0x040c, 0x01000101); 43 nvkm_wo32(chan->inst, 0x0420, 0x00000111);
54 nv_wo32(chan, 0x0420, 0x00000111); 44 nvkm_wo32(chan->inst, 0x0424, 0x00000060);
55 nv_wo32(chan, 0x0424, 0x00000060); 45 nvkm_wo32(chan->inst, 0x0440, 0x00000080);
56 nv_wo32(chan, 0x0440, 0x00000080); 46 nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
57 nv_wo32(chan, 0x0444, 0xffff0000); 47 nvkm_wo32(chan->inst, 0x0448, 0x00000001);
58 nv_wo32(chan, 0x0448, 0x00000001); 48 nvkm_wo32(chan->inst, 0x045c, 0x44400000);
59 nv_wo32(chan, 0x045c, 0x44400000); 49 nvkm_wo32(chan->inst, 0x0480, 0xffff0000);
60 nv_wo32(chan, 0x0480, 0xffff0000);
61 for (i = 0x04d4; i < 0x04dc; i += 4) 50 for (i = 0x04d4; i < 0x04dc; i += 4)
62 nv_wo32(chan, i, 0x0fff0000); 51 nvkm_wo32(chan->inst, i, 0x0fff0000);
63 nv_wo32(chan, 0x04e0, 0x00011100); 52 nvkm_wo32(chan->inst, 0x04e0, 0x00011100);
64 for (i = 0x04fc; i < 0x053c; i += 4) 53 for (i = 0x04fc; i < 0x053c; i += 4)
65 nv_wo32(chan, i, 0x07ff0000); 54 nvkm_wo32(chan->inst, i, 0x07ff0000);
66 nv_wo32(chan, 0x0544, 0x4b7fffff); 55 nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff);
67 nv_wo32(chan, 0x057c, 0x00000080); 56 nvkm_wo32(chan->inst, 0x057c, 0x00000080);
68 nv_wo32(chan, 0x0580, 0x30201000); 57 nvkm_wo32(chan->inst, 0x0580, 0x30201000);
69 nv_wo32(chan, 0x0584, 0x70605040); 58 nvkm_wo32(chan->inst, 0x0584, 0x70605040);
70 nv_wo32(chan, 0x0588, 0xb8a89888); 59 nvkm_wo32(chan->inst, 0x0588, 0xb8a89888);
71 nv_wo32(chan, 0x058c, 0xf8e8d8c8); 60 nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8);
72 nv_wo32(chan, 0x05a0, 0xb0000000); 61 nvkm_wo32(chan->inst, 0x05a0, 0xb0000000);
73 for (i = 0x05f0; i < 0x0630; i += 4) 62 for (i = 0x05f0; i < 0x0630; i += 4)
74 nv_wo32(chan, i, 0x00010588); 63 nvkm_wo32(chan->inst, i, 0x00010588);
75 for (i = 0x0630; i < 0x0670; i += 4) 64 for (i = 0x0630; i < 0x0670; i += 4)
76 nv_wo32(chan, i, 0x00030303); 65 nvkm_wo32(chan->inst, i, 0x00030303);
77 for (i = 0x06b0; i < 0x06f0; i += 4) 66 for (i = 0x06b0; i < 0x06f0; i += 4)
78 nv_wo32(chan, i, 0x0008aae4); 67 nvkm_wo32(chan->inst, i, 0x0008aae4);
79 for (i = 0x06f0; i < 0x0730; i += 4) 68 for (i = 0x06f0; i < 0x0730; i += 4)
80 nv_wo32(chan, i, 0x01012000); 69 nvkm_wo32(chan->inst, i, 0x01012000);
81 for (i = 0x0730; i < 0x0770; i += 4) 70 for (i = 0x0730; i < 0x0770; i += 4)
82 nv_wo32(chan, i, 0x00080008); 71 nvkm_wo32(chan->inst, i, 0x00080008);
83 nv_wo32(chan, 0x0850, 0x00040000); 72 nvkm_wo32(chan->inst, 0x0850, 0x00040000);
84 nv_wo32(chan, 0x0854, 0x00010000); 73 nvkm_wo32(chan->inst, 0x0854, 0x00010000);
85 for (i = 0x0858; i < 0x0868; i += 4) 74 for (i = 0x0858; i < 0x0868; i += 4)
86 nv_wo32(chan, i, 0x00040004); 75 nvkm_wo32(chan->inst, i, 0x00040004);
87 for (i = 0x15ac; i <= 0x271c ; i += 16) { 76 for (i = 0x15ac; i <= 0x271c ; i += 16) {
88 nv_wo32(chan, i + 0, 0x10700ff9); 77 nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
89 nv_wo32(chan, i + 1, 0x0436086c); 78 nvkm_wo32(chan->inst, i + 1, 0x0436086c);
90 nv_wo32(chan, i + 2, 0x000c001b); 79 nvkm_wo32(chan->inst, i + 2, 0x000c001b);
91 } 80 }
92 for (i = 0x274c; i < 0x275c; i += 4) 81 for (i = 0x274c; i < 0x275c; i += 4)
93 nv_wo32(chan, i, 0x0000ffff); 82 nvkm_wo32(chan->inst, i, 0x0000ffff);
94 nv_wo32(chan, 0x2ae0, 0x3f800000); 83 nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000);
95 nv_wo32(chan, 0x2e9c, 0x3f800000); 84 nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000);
96 nv_wo32(chan, 0x2eb0, 0x3f800000); 85 nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000);
97 nv_wo32(chan, 0x2edc, 0x40000000); 86 nvkm_wo32(chan->inst, 0x2edc, 0x40000000);
98 nv_wo32(chan, 0x2ee0, 0x3f800000); 87 nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000);
99 nv_wo32(chan, 0x2ee4, 0x3f000000); 88 nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000);
100 nv_wo32(chan, 0x2eec, 0x40000000); 89 nvkm_wo32(chan->inst, 0x2eec, 0x40000000);
101 nv_wo32(chan, 0x2ef0, 0x3f800000); 90 nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000);
102 nv_wo32(chan, 0x2ef8, 0xbf800000); 91 nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000);
103 nv_wo32(chan, 0x2f00, 0xbf800000); 92 nvkm_wo32(chan->inst, 0x2f00, 0xbf800000);
93 nvkm_done(chan->inst);
104 return 0; 94 return 0;
105} 95}
106 96
107static struct nvkm_oclass
108nv34_gr_cclass = {
109 .handle = NV_ENGCTX(GR, 0x34),
110 .ofuncs = &(struct nvkm_ofuncs) {
111 .ctor = nv34_gr_context_ctor,
112 .dtor = _nvkm_gr_context_dtor,
113 .init = nv20_gr_context_init,
114 .fini = nv20_gr_context_fini,
115 .rd32 = _nvkm_gr_context_rd32,
116 .wr32 = _nvkm_gr_context_wr32,
117 },
118};
119
120/******************************************************************************* 97/*******************************************************************************
121 * PGRAPH engine/subdev functions 98 * PGRAPH engine/subdev functions
122 ******************************************************************************/ 99 ******************************************************************************/
123 100
124static int 101static const struct nvkm_gr_func
125nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 102nv34_gr = {
126 struct nvkm_oclass *oclass, void *data, u32 size, 103 .dtor = nv20_gr_dtor,
127 struct nvkm_object **pobject) 104 .oneinit = nv20_gr_oneinit,
128{ 105 .init = nv30_gr_init,
129 struct nv20_gr_priv *priv; 106 .intr = nv20_gr_intr,
130 int ret; 107 .tile = nv20_gr_tile,
131 108 .chan_new = nv34_gr_chan_new,
132 ret = nvkm_gr_create(parent, engine, oclass, true, &priv); 109 .sclass = {
133 *pobject = nv_object(priv); 110 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
134 if (ret) 111 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
135 return ret; 112 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
136 113 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
137 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, 114 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
138 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 115 { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
139 if (ret) 116 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
140 return ret; 117 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
118 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
119 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
120 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
121 { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
122 { -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
123 { -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
124 { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
125 { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
126 { -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
127 {}
128 }
129};
141 130
142 nv_subdev(priv)->unit = 0x00001000; 131int
143 nv_subdev(priv)->intr = nv20_gr_intr; 132nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
144 nv_engine(priv)->cclass = &nv34_gr_cclass; 133{
145 nv_engine(priv)->sclass = nv34_gr_sclass; 134 return nv20_gr_new_(&nv34_gr, device, index, pgr);
146 nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
147 return 0;
148} 135}
149
150struct nvkm_oclass
151nv34_gr_oclass = {
152 .handle = NV_ENGINE(GR, 0x34),
153 .ofuncs = &(struct nvkm_ofuncs) {
154 .ctor = nv34_gr_ctor,
155 .dtor = nv20_gr_dtor,
156 .init = nv30_gr_init,
157 .fini = _nvkm_gr_fini,
158 },
159};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 707625f19ff5..740df0f52c38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,159 +1,135 @@
1#include "nv20.h" 1#include "nv20.h"
2#include "regs.h" 2#include "regs.h"
3 3
4#include <core/gpuobj.h>
4#include <engine/fifo.h> 5#include <engine/fifo.h>
6#include <engine/fifo/chan.h>
5 7
6/******************************************************************************* 8/*******************************************************************************
7 * Graphics object classes 9 * PGRAPH context
8 ******************************************************************************/ 10 ******************************************************************************/
9 11
10static struct nvkm_oclass 12static const struct nvkm_object_func
11nv35_gr_sclass[] = { 13nv35_gr_chan = {
12 { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */ 14 .dtor = nv20_gr_chan_dtor,
13 { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */ 15 .init = nv20_gr_chan_init,
14 { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */ 16 .fini = nv20_gr_chan_fini,
15 { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
16 { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
17 { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
18 { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
19 { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
20 { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
21 { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
22 { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
23 { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
24 { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
25 { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
26 { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
27 { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
28 { 0x0497, &nv04_gr_ofuncs, NULL }, /* rankine */
29 {},
30}; 17};
31 18
32/*******************************************************************************
33 * PGRAPH context
34 ******************************************************************************/
35
36static int 19static int
37nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 20nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
38 struct nvkm_oclass *oclass, void *data, u32 size, 21 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
39 struct nvkm_object **pobject)
40{ 22{
23 struct nv20_gr *gr = nv20_gr(base);
41 struct nv20_gr_chan *chan; 24 struct nv20_gr_chan *chan;
42 int ret, i; 25 int ret, i;
43 26
44 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c, 27 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
45 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 28 return -ENOMEM;
46 *pobject = nv_object(chan); 29 nvkm_object_ctor(&nv35_gr_chan, oclass, &chan->object);
30 chan->gr = gr;
31 chan->chid = fifoch->chid;
32 *pobject = &chan->object;
33
34 ret = nvkm_memory_new(gr->base.engine.subdev.device,
35 NVKM_MEM_TARGET_INST, 0x577c, 16, true,
36 &chan->inst);
47 if (ret) 37 if (ret)
48 return ret; 38 return ret;
49 39
50 chan->chid = nvkm_fifo_chan(parent)->chid; 40 nvkm_kmap(chan->inst);
51 41 nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
52 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); 42 nvkm_wo32(chan->inst, 0x040c, 0x00000101);
53 nv_wo32(chan, 0x040c, 0x00000101); 43 nvkm_wo32(chan->inst, 0x0420, 0x00000111);
54 nv_wo32(chan, 0x0420, 0x00000111); 44 nvkm_wo32(chan->inst, 0x0424, 0x00000060);
55 nv_wo32(chan, 0x0424, 0x00000060); 45 nvkm_wo32(chan->inst, 0x0440, 0x00000080);
56 nv_wo32(chan, 0x0440, 0x00000080); 46 nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
57 nv_wo32(chan, 0x0444, 0xffff0000); 47 nvkm_wo32(chan->inst, 0x0448, 0x00000001);
58 nv_wo32(chan, 0x0448, 0x00000001); 48 nvkm_wo32(chan->inst, 0x045c, 0x44400000);
59 nv_wo32(chan, 0x045c, 0x44400000); 49 nvkm_wo32(chan->inst, 0x0488, 0xffff0000);
60 nv_wo32(chan, 0x0488, 0xffff0000);
61 for (i = 0x04dc; i < 0x04e4; i += 4) 50 for (i = 0x04dc; i < 0x04e4; i += 4)
62 nv_wo32(chan, i, 0x0fff0000); 51 nvkm_wo32(chan->inst, i, 0x0fff0000);
63 nv_wo32(chan, 0x04e8, 0x00011100); 52 nvkm_wo32(chan->inst, 0x04e8, 0x00011100);
64 for (i = 0x0504; i < 0x0544; i += 4) 53 for (i = 0x0504; i < 0x0544; i += 4)
65 nv_wo32(chan, i, 0x07ff0000); 54 nvkm_wo32(chan->inst, i, 0x07ff0000);
66 nv_wo32(chan, 0x054c, 0x4b7fffff); 55 nvkm_wo32(chan->inst, 0x054c, 0x4b7fffff);
67 nv_wo32(chan, 0x0588, 0x00000080); 56 nvkm_wo32(chan->inst, 0x0588, 0x00000080);
68 nv_wo32(chan, 0x058c, 0x30201000); 57 nvkm_wo32(chan->inst, 0x058c, 0x30201000);
69 nv_wo32(chan, 0x0590, 0x70605040); 58 nvkm_wo32(chan->inst, 0x0590, 0x70605040);
70 nv_wo32(chan, 0x0594, 0xb8a89888); 59 nvkm_wo32(chan->inst, 0x0594, 0xb8a89888);
71 nv_wo32(chan, 0x0598, 0xf8e8d8c8); 60 nvkm_wo32(chan->inst, 0x0598, 0xf8e8d8c8);
72 nv_wo32(chan, 0x05ac, 0xb0000000); 61 nvkm_wo32(chan->inst, 0x05ac, 0xb0000000);
73 for (i = 0x0604; i < 0x0644; i += 4) 62 for (i = 0x0604; i < 0x0644; i += 4)
74 nv_wo32(chan, i, 0x00010588); 63 nvkm_wo32(chan->inst, i, 0x00010588);
75 for (i = 0x0644; i < 0x0684; i += 4) 64 for (i = 0x0644; i < 0x0684; i += 4)
76 nv_wo32(chan, i, 0x00030303); 65 nvkm_wo32(chan->inst, i, 0x00030303);
77 for (i = 0x06c4; i < 0x0704; i += 4) 66 for (i = 0x06c4; i < 0x0704; i += 4)
78 nv_wo32(chan, i, 0x0008aae4); 67 nvkm_wo32(chan->inst, i, 0x0008aae4);
79 for (i = 0x0704; i < 0x0744; i += 4) 68 for (i = 0x0704; i < 0x0744; i += 4)
80 nv_wo32(chan, i, 0x01012000); 69 nvkm_wo32(chan->inst, i, 0x01012000);
81 for (i = 0x0744; i < 0x0784; i += 4) 70 for (i = 0x0744; i < 0x0784; i += 4)
82 nv_wo32(chan, i, 0x00080008); 71 nvkm_wo32(chan->inst, i, 0x00080008);
83 nv_wo32(chan, 0x0860, 0x00040000); 72 nvkm_wo32(chan->inst, 0x0860, 0x00040000);
84 nv_wo32(chan, 0x0864, 0x00010000); 73 nvkm_wo32(chan->inst, 0x0864, 0x00010000);
85 for (i = 0x0868; i < 0x0878; i += 4) 74 for (i = 0x0868; i < 0x0878; i += 4)
86 nv_wo32(chan, i, 0x00040004); 75 nvkm_wo32(chan->inst, i, 0x00040004);
87 for (i = 0x1f1c; i <= 0x308c ; i += 16) { 76 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
88 nv_wo32(chan, i + 0, 0x10700ff9); 77 nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
89 nv_wo32(chan, i + 4, 0x0436086c); 78 nvkm_wo32(chan->inst, i + 4, 0x0436086c);
90 nv_wo32(chan, i + 8, 0x000c001b); 79 nvkm_wo32(chan->inst, i + 8, 0x000c001b);
91 } 80 }
92 for (i = 0x30bc; i < 0x30cc; i += 4) 81 for (i = 0x30bc; i < 0x30cc; i += 4)
93 nv_wo32(chan, i, 0x0000ffff); 82 nvkm_wo32(chan->inst, i, 0x0000ffff);
94 nv_wo32(chan, 0x3450, 0x3f800000); 83 nvkm_wo32(chan->inst, 0x3450, 0x3f800000);
95 nv_wo32(chan, 0x380c, 0x3f800000); 84 nvkm_wo32(chan->inst, 0x380c, 0x3f800000);
96 nv_wo32(chan, 0x3820, 0x3f800000); 85 nvkm_wo32(chan->inst, 0x3820, 0x3f800000);
97 nv_wo32(chan, 0x384c, 0x40000000); 86 nvkm_wo32(chan->inst, 0x384c, 0x40000000);
98 nv_wo32(chan, 0x3850, 0x3f800000); 87 nvkm_wo32(chan->inst, 0x3850, 0x3f800000);
99 nv_wo32(chan, 0x3854, 0x3f000000); 88 nvkm_wo32(chan->inst, 0x3854, 0x3f000000);
100 nv_wo32(chan, 0x385c, 0x40000000); 89 nvkm_wo32(chan->inst, 0x385c, 0x40000000);
101 nv_wo32(chan, 0x3860, 0x3f800000); 90 nvkm_wo32(chan->inst, 0x3860, 0x3f800000);
102 nv_wo32(chan, 0x3868, 0xbf800000); 91 nvkm_wo32(chan->inst, 0x3868, 0xbf800000);
103 nv_wo32(chan, 0x3870, 0xbf800000); 92 nvkm_wo32(chan->inst, 0x3870, 0xbf800000);
93 nvkm_done(chan->inst);
104 return 0; 94 return 0;
105} 95}
106 96
107static struct nvkm_oclass
108nv35_gr_cclass = {
109 .handle = NV_ENGCTX(GR, 0x35),
110 .ofuncs = &(struct nvkm_ofuncs) {
111 .ctor = nv35_gr_context_ctor,
112 .dtor = _nvkm_gr_context_dtor,
113 .init = nv20_gr_context_init,
114 .fini = nv20_gr_context_fini,
115 .rd32 = _nvkm_gr_context_rd32,
116 .wr32 = _nvkm_gr_context_wr32,
117 },
118};
119
120/******************************************************************************* 97/*******************************************************************************
121 * PGRAPH engine/subdev functions 98 * PGRAPH engine/subdev functions
122 ******************************************************************************/ 99 ******************************************************************************/
123 100
124static int 101static const struct nvkm_gr_func
125nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 102nv35_gr = {
126 struct nvkm_oclass *oclass, void *data, u32 size, 103 .dtor = nv20_gr_dtor,
127 struct nvkm_object **pobject) 104 .oneinit = nv20_gr_oneinit,
128{ 105 .init = nv30_gr_init,
129 struct nv20_gr_priv *priv; 106 .intr = nv20_gr_intr,
130 int ret; 107 .tile = nv20_gr_tile,
131 108 .chan_new = nv35_gr_chan_new,
132 ret = nvkm_gr_create(parent, engine, oclass, true, &priv); 109 .sclass = {
133 *pobject = nv_object(priv); 110 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
134 if (ret) 111 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
135 return ret; 112 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
136 113 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
137 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, 114 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
138 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); 115 { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
139 if (ret) 116 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
140 return ret; 117 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
118 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
119 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
120 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
121 { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
122 { -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
123 { -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
124 { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
125 { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
126 { -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
127 {}
128 }
129};
141 130
142 nv_subdev(priv)->unit = 0x00001000; 131int
143 nv_subdev(priv)->intr = nv20_gr_intr; 132nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
144 nv_engine(priv)->cclass = &nv35_gr_cclass; 133{
145 nv_engine(priv)->sclass = nv35_gr_sclass; 134 return nv20_gr_new_(&nv35_gr, device, index, pgr);
146 nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
147 return 0;
148} 135}
149
150struct nvkm_oclass
151nv35_gr_oclass = {
152 .handle = NV_ENGINE(GR, 0x35),
153 .ofuncs = &(struct nvkm_ofuncs) {
154 .ctor = nv35_gr_ctor,
155 .dtor = nv20_gr_dtor,
156 .init = nv30_gr_init,
157 .fini = _nvkm_gr_fini,
158 },
159};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 7e1937980e3f..ffa902ece872 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -25,26 +25,15 @@
25#include "regs.h" 25#include "regs.h"
26 26
27#include <core/client.h> 27#include <core/client.h>
28#include <core/handle.h> 28#include <core/gpuobj.h>
29#include <subdev/fb.h> 29#include <subdev/fb.h>
30#include <subdev/timer.h> 30#include <subdev/timer.h>
31#include <engine/fifo.h> 31#include <engine/fifo.h>
32 32
33struct nv40_gr_priv { 33u64
34 struct nvkm_gr base;
35 u32 size;
36};
37
38struct nv40_gr_chan {
39 struct nvkm_gr_chan base;
40};
41
42static u64
43nv40_gr_units(struct nvkm_gr *gr) 34nv40_gr_units(struct nvkm_gr *gr)
44{ 35{
45 struct nv40_gr_priv *priv = (void *)gr; 36 return nvkm_rd32(gr->engine.subdev.device, 0x1540);
46
47 return nv_rd32(priv, 0x1540);
48} 37}
49 38
50/******************************************************************************* 39/*******************************************************************************
@@ -52,80 +41,29 @@ nv40_gr_units(struct nvkm_gr *gr)
52 ******************************************************************************/ 41 ******************************************************************************/
53 42
54static int 43static int
55nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 44nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
56 struct nvkm_oclass *oclass, void *data, u32 size, 45 int align, struct nvkm_gpuobj **pgpuobj)
57 struct nvkm_object **pobject)
58{ 46{
59 struct nvkm_gpuobj *obj; 47 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
60 int ret; 48 false, parent, pgpuobj);
61 49 if (ret == 0) {
62 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent, 50 nvkm_kmap(*pgpuobj);
63 20, 16, 0, &obj); 51 nvkm_wo32(*pgpuobj, 0x00, object->oclass);
64 *pobject = nv_object(obj); 52 nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
65 if (ret) 53 nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
66 return ret;
67
68 nv_wo32(obj, 0x00, nv_mclass(obj));
69 nv_wo32(obj, 0x04, 0x00000000);
70 nv_wo32(obj, 0x08, 0x00000000);
71#ifdef __BIG_ENDIAN 54#ifdef __BIG_ENDIAN
72 nv_mo32(obj, 0x08, 0x01000000, 0x01000000); 55 nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
73#endif 56#endif
74 nv_wo32(obj, 0x0c, 0x00000000); 57 nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
75 nv_wo32(obj, 0x10, 0x00000000); 58 nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
76 return 0; 59 nvkm_done(*pgpuobj);
60 }
61 return ret;
77} 62}
78 63
79static struct nvkm_ofuncs 64const struct nvkm_object_func
80nv40_gr_ofuncs = { 65nv40_gr_object = {
81 .ctor = nv40_gr_object_ctor, 66 .bind = nv40_gr_object_bind,
82 .dtor = _nvkm_gpuobj_dtor,
83 .init = _nvkm_gpuobj_init,
84 .fini = _nvkm_gpuobj_fini,
85 .rd32 = _nvkm_gpuobj_rd32,
86 .wr32 = _nvkm_gpuobj_wr32,
87};
88
89static struct nvkm_oclass
90nv40_gr_sclass[] = {
91 { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
92 { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
93 { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
94 { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
95 { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
96 { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
97 { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
98 { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
99 { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
100 { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
101 { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
102 { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
103 { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
104 { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
105 { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
106 { 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
107 {},
108};
109
110static struct nvkm_oclass
111nv44_gr_sclass[] = {
112 { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
113 { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
114 { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
115 { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
116 { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
117 { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
118 { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
119 { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
120 { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
121 { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
122 { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
123 { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
124 { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
125 { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
126 { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
127 { 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
128 {},
129}; 67};
130 68
131/******************************************************************************* 69/*******************************************************************************
@@ -133,361 +71,334 @@ nv44_gr_sclass[] = {
133 ******************************************************************************/ 71 ******************************************************************************/
134 72
135static int 73static int
136nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 74nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
137 struct nvkm_oclass *oclass, void *data, u32 size, 75 int align, struct nvkm_gpuobj **pgpuobj)
138 struct nvkm_object **pobject)
139{ 76{
140 struct nv40_gr_priv *priv = (void *)engine; 77 struct nv40_gr_chan *chan = nv40_gr_chan(object);
141 struct nv40_gr_chan *chan; 78 struct nv40_gr *gr = chan->gr;
142 int ret; 79 int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
143 80 align, true, parent, pgpuobj);
144 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size, 81 if (ret == 0) {
145 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 82 chan->inst = (*pgpuobj)->addr;
146 *pobject = nv_object(chan); 83 nvkm_kmap(*pgpuobj);
147 if (ret) 84 nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
148 return ret; 85 nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
149 86 nvkm_done(*pgpuobj);
150 nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan)); 87 }
151 nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4); 88 return ret;
152 return 0;
153} 89}
154 90
155static int 91static int
156nv40_gr_context_fini(struct nvkm_object *object, bool suspend) 92nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
157{ 93{
158 struct nv40_gr_priv *priv = (void *)object->engine; 94 struct nv40_gr_chan *chan = nv40_gr_chan(object);
159 struct nv40_gr_chan *chan = (void *)object; 95 struct nv40_gr *gr = chan->gr;
160 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4; 96 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
97 struct nvkm_device *device = subdev->device;
98 u32 inst = 0x01000000 | chan->inst >> 4;
161 int ret = 0; 99 int ret = 0;
162 100
163 nv_mask(priv, 0x400720, 0x00000001, 0x00000000); 101 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
164 102
165 if (nv_rd32(priv, 0x40032c) == inst) { 103 if (nvkm_rd32(device, 0x40032c) == inst) {
166 if (suspend) { 104 if (suspend) {
167 nv_wr32(priv, 0x400720, 0x00000000); 105 nvkm_wr32(device, 0x400720, 0x00000000);
168 nv_wr32(priv, 0x400784, inst); 106 nvkm_wr32(device, 0x400784, inst);
169 nv_mask(priv, 0x400310, 0x00000020, 0x00000020); 107 nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
170 nv_mask(priv, 0x400304, 0x00000001, 0x00000001); 108 nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
171 if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) { 109 if (nvkm_msec(device, 2000,
172 u32 insn = nv_rd32(priv, 0x400308); 110 if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
173 nv_warn(priv, "ctxprog timeout 0x%08x\n", insn); 111 break;
112 ) < 0) {
113 u32 insn = nvkm_rd32(device, 0x400308);
114 nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
174 ret = -EBUSY; 115 ret = -EBUSY;
175 } 116 }
176 } 117 }
177 118
178 nv_mask(priv, 0x40032c, 0x01000000, 0x00000000); 119 nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
179 } 120 }
180 121
181 if (nv_rd32(priv, 0x400330) == inst) 122 if (nvkm_rd32(device, 0x400330) == inst)
182 nv_mask(priv, 0x400330, 0x01000000, 0x00000000); 123 nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
183 124
184 nv_mask(priv, 0x400720, 0x00000001, 0x00000001); 125 nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
185 return ret; 126 return ret;
186} 127}
187 128
188static struct nvkm_oclass 129static void *
189nv40_gr_cclass = { 130nv40_gr_chan_dtor(struct nvkm_object *object)
190 .handle = NV_ENGCTX(GR, 0x40), 131{
191 .ofuncs = &(struct nvkm_ofuncs) { 132 struct nv40_gr_chan *chan = nv40_gr_chan(object);
192 .ctor = nv40_gr_context_ctor, 133 unsigned long flags;
193 .dtor = _nvkm_gr_context_dtor, 134 spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
194 .init = _nvkm_gr_context_init, 135 list_del(&chan->head);
195 .fini = nv40_gr_context_fini, 136 spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
196 .rd32 = _nvkm_gr_context_rd32, 137 return chan;
197 .wr32 = _nvkm_gr_context_wr32, 138}
198 }, 139
140static const struct nvkm_object_func
141nv40_gr_chan = {
142 .dtor = nv40_gr_chan_dtor,
143 .fini = nv40_gr_chan_fini,
144 .bind = nv40_gr_chan_bind,
199}; 145};
200 146
147int
148nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
149 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
150{
151 struct nv40_gr *gr = nv40_gr(base);
152 struct nv40_gr_chan *chan;
153 unsigned long flags;
154
155 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
156 return -ENOMEM;
157 nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
158 chan->gr = gr;
159 *pobject = &chan->object;
160
161 spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
162 list_add(&chan->head, &gr->chan);
163 spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
164 return 0;
165}
166
201/******************************************************************************* 167/*******************************************************************************
202 * PGRAPH engine/subdev functions 168 * PGRAPH engine/subdev functions
203 ******************************************************************************/ 169 ******************************************************************************/
204 170
205static void 171static void
206nv40_gr_tile_prog(struct nvkm_engine *engine, int i) 172nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
207{ 173{
208 struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i]; 174 struct nv40_gr *gr = nv40_gr(base);
209 struct nvkm_fifo *pfifo = nvkm_fifo(engine); 175 struct nvkm_device *device = gr->base.engine.subdev.device;
210 struct nv40_gr_priv *priv = (void *)engine; 176 struct nvkm_fifo *fifo = device->fifo;
211 unsigned long flags; 177 unsigned long flags;
212 178
213 pfifo->pause(pfifo, &flags); 179 nvkm_fifo_pause(fifo, &flags);
214 nv04_gr_idle(priv); 180 nv04_gr_idle(&gr->base);
215 181
216 switch (nv_device(priv)->chipset) { 182 switch (device->chipset) {
217 case 0x40: 183 case 0x40:
218 case 0x41: 184 case 0x41:
219 case 0x42: 185 case 0x42:
220 case 0x43: 186 case 0x43:
221 case 0x45: 187 case 0x45:
222 case 0x4e: 188 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
223 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); 189 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
224 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); 190 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
225 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); 191 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
226 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); 192 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
227 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); 193 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
228 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); 194 switch (device->chipset) {
229 switch (nv_device(priv)->chipset) {
230 case 0x40: 195 case 0x40:
231 case 0x45: 196 case 0x45:
232 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); 197 nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
233 nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp); 198 nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
234 break; 199 break;
235 case 0x41: 200 case 0x41:
236 case 0x42: 201 case 0x42:
237 case 0x43: 202 case 0x43:
238 nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp); 203 nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
239 nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp); 204 nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
240 break; 205 break;
241 default: 206 default:
242 break; 207 break;
243 } 208 }
244 break; 209 break;
245 case 0x44:
246 case 0x4a:
247 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
248 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
249 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
250 break;
251 case 0x46:
252 case 0x4c:
253 case 0x47: 210 case 0x47:
254 case 0x49: 211 case 0x49:
255 case 0x4b: 212 case 0x4b:
256 case 0x63: 213 nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
257 case 0x67: 214 nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
258 case 0x68: 215 nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
259 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch); 216 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
260 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit); 217 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
261 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr); 218 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
262 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); 219 nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
263 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); 220 nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
264 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
265 switch (nv_device(priv)->chipset) {
266 case 0x47:
267 case 0x49:
268 case 0x4b:
269 nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
270 nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
271 break;
272 default:
273 break;
274 }
275 break; 221 break;
276 default: 222 default:
223 WARN_ON(1);
277 break; 224 break;
278 } 225 }
279 226
280 pfifo->start(pfifo, &flags); 227 nvkm_fifo_start(fifo, &flags);
281} 228}
282 229
283static void 230void
284nv40_gr_intr(struct nvkm_subdev *subdev) 231nv40_gr_intr(struct nvkm_gr *base)
285{ 232{
286 struct nvkm_fifo *pfifo = nvkm_fifo(subdev); 233 struct nv40_gr *gr = nv40_gr(base);
287 struct nvkm_engine *engine = nv_engine(subdev); 234 struct nv40_gr_chan *temp, *chan = NULL;
288 struct nvkm_object *engctx; 235 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
289 struct nvkm_handle *handle = NULL; 236 struct nvkm_device *device = subdev->device;
290 struct nv40_gr_priv *priv = (void *)subdev; 237 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
291 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); 238 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
292 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); 239 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
293 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); 240 u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
294 u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff; 241 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
295 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
296 u32 subc = (addr & 0x00070000) >> 16; 242 u32 subc = (addr & 0x00070000) >> 16;
297 u32 mthd = (addr & 0x00001ffc); 243 u32 mthd = (addr & 0x00001ffc);
298 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); 244 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
299 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff; 245 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
300 u32 show = stat; 246 u32 show = stat;
301 int chid; 247 char msg[128], src[128], sta[128];
302 248 unsigned long flags;
303 engctx = nvkm_engctx_get(engine, inst);
304 chid = pfifo->chid(pfifo, engctx);
305 249
306 if (stat & NV_PGRAPH_INTR_ERROR) { 250 spin_lock_irqsave(&gr->base.engine.lock, flags);
307 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 251 list_for_each_entry(temp, &gr->chan, head) {
308 handle = nvkm_handle_get_class(engctx, class); 252 if (temp->inst >> 4 == inst) {
309 if (handle && !nv_call(handle->object, mthd, data)) 253 chan = temp;
310 show &= ~NV_PGRAPH_INTR_ERROR; 254 list_del(&chan->head);
311 nvkm_handle_put(handle); 255 list_add(&chan->head, &gr->chan);
256 break;
312 } 257 }
258 }
313 259
260 if (stat & NV_PGRAPH_INTR_ERROR) {
314 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { 261 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
315 nv_mask(priv, 0x402000, 0, 0); 262 nvkm_mask(device, 0x402000, 0, 0);
316 } 263 }
317 } 264 }
318 265
319 nv_wr32(priv, NV03_PGRAPH_INTR, stat); 266 nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
320 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 267 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
321 268
322 if (show) { 269 if (show) {
323 nv_error(priv, "%s", ""); 270 nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
324 nvkm_bitfield_print(nv10_gr_intr_name, show); 271 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
325 pr_cont(" nsource:"); 272 nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
326 nvkm_bitfield_print(nv04_gr_nsource, nsource); 273 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
327 pr_cont(" nstatus:"); 274 "nstatus %08x [%s] ch %d [%08x %s] subc %d "
328 nvkm_bitfield_print(nv10_gr_nstatus, nstatus); 275 "class %04x mthd %04x data %08x\n",
329 pr_cont("\n"); 276 show, msg, nsource, src, nstatus, sta,
330 nv_error(priv, 277 chan ? chan->fifo->chid : -1, inst << 4,
331 "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", 278 chan ? chan->fifo->object.client->name : "unknown",
332 chid, inst << 4, nvkm_client_name(engctx), subc, 279 subc, class, mthd, data);
333 class, mthd, data);
334 } 280 }
335 281
336 nvkm_engctx_put(engctx); 282 spin_unlock_irqrestore(&gr->base.engine.lock, flags);
337} 283}
338 284
339static int 285int
340nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 286nv40_gr_init(struct nvkm_gr *base)
341 struct nvkm_oclass *oclass, void *data, u32 size,
342 struct nvkm_object **pobject)
343{ 287{
344 struct nv40_gr_priv *priv; 288 struct nv40_gr *gr = nv40_gr(base);
345 int ret; 289 struct nvkm_device *device = gr->base.engine.subdev.device;
346
347 ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
348 *pobject = nv_object(priv);
349 if (ret)
350 return ret;
351
352 nv_subdev(priv)->unit = 0x00001000;
353 nv_subdev(priv)->intr = nv40_gr_intr;
354 nv_engine(priv)->cclass = &nv40_gr_cclass;
355 if (nv44_gr_class(priv))
356 nv_engine(priv)->sclass = nv44_gr_sclass;
357 else
358 nv_engine(priv)->sclass = nv40_gr_sclass;
359 nv_engine(priv)->tile_prog = nv40_gr_tile_prog;
360
361 priv->base.units = nv40_gr_units;
362 return 0;
363}
364
365static int
366nv40_gr_init(struct nvkm_object *object)
367{
368 struct nvkm_engine *engine = nv_engine(object);
369 struct nvkm_fb *pfb = nvkm_fb(object);
370 struct nv40_gr_priv *priv = (void *)engine;
371 int ret, i, j; 290 int ret, i, j;
372 u32 vramsz; 291 u32 vramsz;
373 292
374 ret = nvkm_gr_init(&priv->base);
375 if (ret)
376 return ret;
377
378 /* generate and upload context program */ 293 /* generate and upload context program */
379 ret = nv40_grctx_init(nv_device(priv), &priv->size); 294 ret = nv40_grctx_init(device, &gr->size);
380 if (ret) 295 if (ret)
381 return ret; 296 return ret;
382 297
383 /* No context present currently */ 298 /* No context present currently */
384 nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 299 nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
385 300
386 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF); 301 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
387 nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); 302 nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
388 303
389 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); 304 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
390 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000); 305 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
391 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0); 306 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
392 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055); 307 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
393 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000); 308 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
394 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); 309 nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
395 310
396 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100); 311 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
397 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF); 312 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
398 313
399 j = nv_rd32(priv, 0x1540) & 0xff; 314 j = nvkm_rd32(device, 0x1540) & 0xff;
400 if (j) { 315 if (j) {
401 for (i = 0; !(j & 1); j >>= 1, i++) 316 for (i = 0; !(j & 1); j >>= 1, i++)
402 ; 317 ;
403 nv_wr32(priv, 0x405000, i); 318 nvkm_wr32(device, 0x405000, i);
404 } 319 }
405 320
406 if (nv_device(priv)->chipset == 0x40) { 321 if (device->chipset == 0x40) {
407 nv_wr32(priv, 0x4009b0, 0x83280fff); 322 nvkm_wr32(device, 0x4009b0, 0x83280fff);
408 nv_wr32(priv, 0x4009b4, 0x000000a0); 323 nvkm_wr32(device, 0x4009b4, 0x000000a0);
409 } else { 324 } else {
410 nv_wr32(priv, 0x400820, 0x83280eff); 325 nvkm_wr32(device, 0x400820, 0x83280eff);
411 nv_wr32(priv, 0x400824, 0x000000a0); 326 nvkm_wr32(device, 0x400824, 0x000000a0);
412 } 327 }
413 328
414 switch (nv_device(priv)->chipset) { 329 switch (device->chipset) {
415 case 0x40: 330 case 0x40:
416 case 0x45: 331 case 0x45:
417 nv_wr32(priv, 0x4009b8, 0x0078e366); 332 nvkm_wr32(device, 0x4009b8, 0x0078e366);
418 nv_wr32(priv, 0x4009bc, 0x0000014c); 333 nvkm_wr32(device, 0x4009bc, 0x0000014c);
419 break; 334 break;
420 case 0x41: 335 case 0x41:
421 case 0x42: /* pciid also 0x00Cx */ 336 case 0x42: /* pciid also 0x00Cx */
422 /* case 0x0120: XXX (pciid) */ 337 /* case 0x0120: XXX (pciid) */
423 nv_wr32(priv, 0x400828, 0x007596ff); 338 nvkm_wr32(device, 0x400828, 0x007596ff);
424 nv_wr32(priv, 0x40082c, 0x00000108); 339 nvkm_wr32(device, 0x40082c, 0x00000108);
425 break; 340 break;
426 case 0x43: 341 case 0x43:
427 nv_wr32(priv, 0x400828, 0x0072cb77); 342 nvkm_wr32(device, 0x400828, 0x0072cb77);
428 nv_wr32(priv, 0x40082c, 0x00000108); 343 nvkm_wr32(device, 0x40082c, 0x00000108);
429 break; 344 break;
430 case 0x44: 345 case 0x44:
431 case 0x46: /* G72 */ 346 case 0x46: /* G72 */
432 case 0x4a: 347 case 0x4a:
433 case 0x4c: /* G7x-based C51 */ 348 case 0x4c: /* G7x-based C51 */
434 case 0x4e: 349 case 0x4e:
435 nv_wr32(priv, 0x400860, 0); 350 nvkm_wr32(device, 0x400860, 0);
436 nv_wr32(priv, 0x400864, 0); 351 nvkm_wr32(device, 0x400864, 0);
437 break; 352 break;
438 case 0x47: /* G70 */ 353 case 0x47: /* G70 */
439 case 0x49: /* G71 */ 354 case 0x49: /* G71 */
440 case 0x4b: /* G73 */ 355 case 0x4b: /* G73 */
441 nv_wr32(priv, 0x400828, 0x07830610); 356 nvkm_wr32(device, 0x400828, 0x07830610);
442 nv_wr32(priv, 0x40082c, 0x0000016A); 357 nvkm_wr32(device, 0x40082c, 0x0000016A);
443 break; 358 break;
444 default: 359 default:
445 break; 360 break;
446 } 361 }
447 362
448 nv_wr32(priv, 0x400b38, 0x2ffff800); 363 nvkm_wr32(device, 0x400b38, 0x2ffff800);
449 nv_wr32(priv, 0x400b3c, 0x00006000); 364 nvkm_wr32(device, 0x400b3c, 0x00006000);
450 365
451 /* Tiling related stuff. */ 366 /* Tiling related stuff. */
452 switch (nv_device(priv)->chipset) { 367 switch (device->chipset) {
453 case 0x44: 368 case 0x44:
454 case 0x4a: 369 case 0x4a:
455 nv_wr32(priv, 0x400bc4, 0x1003d888); 370 nvkm_wr32(device, 0x400bc4, 0x1003d888);
456 nv_wr32(priv, 0x400bbc, 0xb7a7b500); 371 nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
457 break; 372 break;
458 case 0x46: 373 case 0x46:
459 nv_wr32(priv, 0x400bc4, 0x0000e024); 374 nvkm_wr32(device, 0x400bc4, 0x0000e024);
460 nv_wr32(priv, 0x400bbc, 0xb7a7b520); 375 nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
461 break; 376 break;
462 case 0x4c: 377 case 0x4c:
463 case 0x4e: 378 case 0x4e:
464 case 0x67: 379 case 0x67:
465 nv_wr32(priv, 0x400bc4, 0x1003d888); 380 nvkm_wr32(device, 0x400bc4, 0x1003d888);
466 nv_wr32(priv, 0x400bbc, 0xb7a7b540); 381 nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
467 break; 382 break;
468 default: 383 default:
469 break; 384 break;
470 } 385 }
471 386
472 /* Turn all the tiling regions off. */
473 for (i = 0; i < pfb->tile.regions; i++)
474 engine->tile_prog(engine, i);
475
476 /* begin RAM config */ 387 /* begin RAM config */
477 vramsz = nv_device_resource_len(nv_device(priv), 0) - 1; 388 vramsz = device->func->resource_size(device, 1) - 1;
478 switch (nv_device(priv)->chipset) { 389 switch (device->chipset) {
479 case 0x40: 390 case 0x40:
480 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); 391 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
481 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204)); 392 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
482 nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200)); 393 nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
483 nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204)); 394 nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
484 nv_wr32(priv, 0x400820, 0); 395 nvkm_wr32(device, 0x400820, 0);
485 nv_wr32(priv, 0x400824, 0); 396 nvkm_wr32(device, 0x400824, 0);
486 nv_wr32(priv, 0x400864, vramsz); 397 nvkm_wr32(device, 0x400864, vramsz);
487 nv_wr32(priv, 0x400868, vramsz); 398 nvkm_wr32(device, 0x400868, vramsz);
488 break; 399 break;
489 default: 400 default:
490 switch (nv_device(priv)->chipset) { 401 switch (device->chipset) {
491 case 0x41: 402 case 0x41:
492 case 0x42: 403 case 0x42:
493 case 0x43: 404 case 0x43:
@@ -495,33 +406,70 @@ nv40_gr_init(struct nvkm_object *object)
495 case 0x4e: 406 case 0x4e:
496 case 0x44: 407 case 0x44:
497 case 0x4a: 408 case 0x4a:
498 nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200)); 409 nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
499 nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204)); 410 nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
500 break; 411 break;
501 default: 412 default:
502 nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200)); 413 nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
503 nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204)); 414 nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
504 break; 415 break;
505 } 416 }
506 nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200)); 417 nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
507 nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204)); 418 nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
508 nv_wr32(priv, 0x400840, 0); 419 nvkm_wr32(device, 0x400840, 0);
509 nv_wr32(priv, 0x400844, 0); 420 nvkm_wr32(device, 0x400844, 0);
510 nv_wr32(priv, 0x4008A0, vramsz); 421 nvkm_wr32(device, 0x4008A0, vramsz);
511 nv_wr32(priv, 0x4008A4, vramsz); 422 nvkm_wr32(device, 0x4008A4, vramsz);
512 break; 423 break;
513 } 424 }
514 425
515 return 0; 426 return 0;
516} 427}
517 428
518struct nvkm_oclass 429int
519nv40_gr_oclass = { 430nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
520 .handle = NV_ENGINE(GR, 0x40), 431 int index, struct nvkm_gr **pgr)
521 .ofuncs = &(struct nvkm_ofuncs) { 432{
522 .ctor = nv40_gr_ctor, 433 struct nv40_gr *gr;
523 .dtor = _nvkm_gr_dtor, 434
524 .init = nv40_gr_init, 435 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
525 .fini = _nvkm_gr_fini, 436 return -ENOMEM;
526 }, 437 *pgr = &gr->base;
438 INIT_LIST_HEAD(&gr->chan);
439
440 return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
441}
442
443static const struct nvkm_gr_func
444nv40_gr = {
445 .init = nv40_gr_init,
446 .intr = nv40_gr_intr,
447 .tile = nv40_gr_tile,
448 .units = nv40_gr_units,
449 .chan_new = nv40_gr_chan_new,
450 .sclass = {
451 { -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
452 { -1, -1, 0x0019, &nv40_gr_object }, /* clip */
453 { -1, -1, 0x0030, &nv40_gr_object }, /* null */
454 { -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
455 { -1, -1, 0x0043, &nv40_gr_object }, /* rop */
456 { -1, -1, 0x0044, &nv40_gr_object }, /* patt */
457 { -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
458 { -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
459 { -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
460 { -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
461 { -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
462 { -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
463 { -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
464 { -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
465 { -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
466 { -1, -1, 0x4097, &nv40_gr_object }, /* curie */
467 {}
468 }
527}; 469};
470
471int
472nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
473{
474 return nv40_gr_new_(&nv40_gr, device, index, pgr);
475}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index d852bd6de571..2812ed11f877 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,22 +1,45 @@
1#ifndef __NV40_GR_H__ 1#ifndef __NV40_GR_H__
2#define __NV40_GR_H__ 2#define __NV40_GR_H__
3#include <engine/gr.h> 3#define nv40_gr(p) container_of((p), struct nv40_gr, base)
4#include "priv.h"
4 5
5#include <core/device.h> 6struct nv40_gr {
6struct nvkm_gpuobj; 7 struct nvkm_gr base;
8 u32 size;
9 struct list_head chan;
10};
11
12int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
13 struct nvkm_gr **);
14int nv40_gr_init(struct nvkm_gr *);
15void nv40_gr_intr(struct nvkm_gr *);
16u64 nv40_gr_units(struct nvkm_gr *);
17
18#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
19
20struct nv40_gr_chan {
21 struct nvkm_object object;
22 struct nv40_gr *gr;
23 struct nvkm_fifo_chan *fifo;
24 u32 inst;
25 struct list_head head;
26};
27
28int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
29 const struct nvkm_oclass *, struct nvkm_object **);
30
31extern const struct nvkm_object_func nv40_gr_object;
7 32
8/* returns 1 if device is one of the nv4x using the 0x4497 object class, 33/* returns 1 if device is one of the nv4x using the 0x4497 object class,
9 * helpful to determine a number of other hardware features 34 * helpful to determine a number of other hardware features
10 */ 35 */
11static inline int 36static inline int
12nv44_gr_class(void *priv) 37nv44_gr_class(struct nvkm_device *device)
13{ 38{
14 struct nvkm_device *device = nv_device(priv);
15
16 if ((device->chipset & 0xf0) == 0x60) 39 if ((device->chipset & 0xf0) == 0x60)
17 return 1; 40 return 1;
18 41
19 return !(0x0baf & (1 << (device->chipset & 0x0f))); 42 return !(0x0aaf & (1 << (device->chipset & 0x0f)));
20} 43}
21 44
22int nv40_grctx_init(struct nvkm_device *, u32 *size); 45int nv40_grctx_init(struct nvkm_device *, u32 *size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
new file mode 100644
index 000000000000..45ff80254eb4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv40.h"
25#include "regs.h"
26
27#include <subdev/fb.h>
28#include <engine/fifo.h>
29
30static void
31nv44_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
32{
33 struct nv40_gr *gr = nv40_gr(base);
34 struct nvkm_device *device = gr->base.engine.subdev.device;
35 struct nvkm_fifo *fifo = device->fifo;
36 unsigned long flags;
37
38 nvkm_fifo_pause(fifo, &flags);
39 nv04_gr_idle(&gr->base);
40
41 switch (device->chipset) {
42 case 0x44:
43 case 0x4a:
44 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
45 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
46 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
47 break;
48 case 0x46:
49 case 0x4c:
50 case 0x63:
51 case 0x67:
52 case 0x68:
53 nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
54 nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
55 nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
56 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
57 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
58 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
59 break;
60 case 0x4e:
61 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
62 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
63 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
64 nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
65 nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
66 nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
67 break;
68 default:
69 WARN_ON(1);
70 break;
71 }
72
73 nvkm_fifo_start(fifo, &flags);
74}
75
76static const struct nvkm_gr_func
77nv44_gr = {
78 .init = nv40_gr_init,
79 .intr = nv40_gr_intr,
80 .tile = nv44_gr_tile,
81 .units = nv40_gr_units,
82 .chan_new = nv40_gr_chan_new,
83 .sclass = {
84 { -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
85 { -1, -1, 0x0019, &nv40_gr_object }, /* clip */
86 { -1, -1, 0x0030, &nv40_gr_object }, /* null */
87 { -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
88 { -1, -1, 0x0043, &nv40_gr_object }, /* rop */
89 { -1, -1, 0x0044, &nv40_gr_object }, /* patt */
90 { -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
91 { -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
92 { -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
93 { -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
94 { -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
95 { -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
96 { -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
97 { -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
98 { -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
99 { -1, -1, 0x4497, &nv40_gr_object }, /* curie */
100 {}
101 }
102};
103
104int
105nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
106{
107 return nv40_gr_new_(&nv44_gr, device, index, pgr);
108}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index 270d7cd63fc7..b19b912d5787 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -24,27 +24,13 @@
24#include "nv50.h" 24#include "nv50.h"
25 25
26#include <core/client.h> 26#include <core/client.h>
27#include <core/device.h> 27#include <core/gpuobj.h>
28#include <core/handle.h>
29#include <engine/fifo.h> 28#include <engine/fifo.h>
30#include <subdev/timer.h>
31 29
32struct nv50_gr_priv { 30u64
33 struct nvkm_gr base;
34 spinlock_t lock;
35 u32 size;
36};
37
38struct nv50_gr_chan {
39 struct nvkm_gr_chan base;
40};
41
42static u64
43nv50_gr_units(struct nvkm_gr *gr) 31nv50_gr_units(struct nvkm_gr *gr)
44{ 32{
45 struct nv50_gr_priv *priv = (void *)gr; 33 return nvkm_rd32(gr->engine.subdev.device, 0x1540);
46
47 return nv_rd32(priv, 0x1540);
48} 34}
49 35
50/******************************************************************************* 36/*******************************************************************************
@@ -52,86 +38,25 @@ nv50_gr_units(struct nvkm_gr *gr)
52 ******************************************************************************/ 38 ******************************************************************************/
53 39
54static int 40static int
55nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 41nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
56 struct nvkm_oclass *oclass, void *data, u32 size, 42 int align, struct nvkm_gpuobj **pgpuobj)
57 struct nvkm_object **pobject)
58{ 43{
59 struct nvkm_gpuobj *obj; 44 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
60 int ret; 45 align, false, parent, pgpuobj);
61 46 if (ret == 0) {
62 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent, 47 nvkm_kmap(*pgpuobj);
63 16, 16, 0, &obj); 48 nvkm_wo32(*pgpuobj, 0x00, object->oclass);
64 *pobject = nv_object(obj); 49 nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
65 if (ret) 50 nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
66 return ret; 51 nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
67 52 nvkm_done(*pgpuobj);
68 nv_wo32(obj, 0x00, nv_mclass(obj)); 53 }
69 nv_wo32(obj, 0x04, 0x00000000); 54 return ret;
70 nv_wo32(obj, 0x08, 0x00000000);
71 nv_wo32(obj, 0x0c, 0x00000000);
72 return 0;
73} 55}
74 56
75static struct nvkm_ofuncs 57const struct nvkm_object_func
76nv50_gr_ofuncs = { 58nv50_gr_object = {
77 .ctor = nv50_gr_object_ctor, 59 .bind = nv50_gr_object_bind,
78 .dtor = _nvkm_gpuobj_dtor,
79 .init = _nvkm_gpuobj_init,
80 .fini = _nvkm_gpuobj_fini,
81 .rd32 = _nvkm_gpuobj_rd32,
82 .wr32 = _nvkm_gpuobj_wr32,
83};
84
85static struct nvkm_oclass
86nv50_gr_sclass[] = {
87 { 0x0030, &nv50_gr_ofuncs },
88 { 0x502d, &nv50_gr_ofuncs },
89 { 0x5039, &nv50_gr_ofuncs },
90 { 0x5097, &nv50_gr_ofuncs },
91 { 0x50c0, &nv50_gr_ofuncs },
92 {}
93};
94
95static struct nvkm_oclass
96g84_gr_sclass[] = {
97 { 0x0030, &nv50_gr_ofuncs },
98 { 0x502d, &nv50_gr_ofuncs },
99 { 0x5039, &nv50_gr_ofuncs },
100 { 0x50c0, &nv50_gr_ofuncs },
101 { 0x8297, &nv50_gr_ofuncs },
102 {}
103};
104
105static struct nvkm_oclass
106gt200_gr_sclass[] = {
107 { 0x0030, &nv50_gr_ofuncs },
108 { 0x502d, &nv50_gr_ofuncs },
109 { 0x5039, &nv50_gr_ofuncs },
110 { 0x50c0, &nv50_gr_ofuncs },
111 { 0x8397, &nv50_gr_ofuncs },
112 {}
113};
114
115static struct nvkm_oclass
116gt215_gr_sclass[] = {
117 { 0x0030, &nv50_gr_ofuncs },
118 { 0x502d, &nv50_gr_ofuncs },
119 { 0x5039, &nv50_gr_ofuncs },
120 { 0x50c0, &nv50_gr_ofuncs },
121 { 0x8597, &nv50_gr_ofuncs },
122 { 0x85c0, &nv50_gr_ofuncs },
123 {}
124};
125
126static struct nvkm_oclass
127mcp89_gr_sclass[] = {
128 { 0x0030, &nv50_gr_ofuncs },
129 { 0x502d, &nv50_gr_ofuncs },
130 { 0x5039, &nv50_gr_ofuncs },
131 { 0x50c0, &nv50_gr_ofuncs },
132 { 0x85c0, &nv50_gr_ofuncs },
133 { 0x8697, &nv50_gr_ofuncs },
134 {}
135}; 60};
136 61
137/******************************************************************************* 62/*******************************************************************************
@@ -139,160 +64,43 @@ mcp89_gr_sclass[] = {
139 ******************************************************************************/ 64 ******************************************************************************/
140 65
141static int 66static int
142nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 67nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
143 struct nvkm_oclass *oclass, void *data, u32 size, 68 int align, struct nvkm_gpuobj **pgpuobj)
144 struct nvkm_object **pobject)
145{ 69{
146 struct nv50_gr_priv *priv = (void *)engine; 70 struct nv50_gr *gr = nv50_gr_chan(object)->gr;
147 struct nv50_gr_chan *chan; 71 int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
148 int ret; 72 align, true, parent, pgpuobj);
149 73 if (ret == 0) {
150 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size, 74 nvkm_kmap(*pgpuobj);
151 0, NVOBJ_FLAG_ZERO_ALLOC, &chan); 75 nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
152 *pobject = nv_object(chan); 76 nvkm_done(*pgpuobj);
153 if (ret) 77 }
154 return ret; 78 return ret;
155
156 nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
157 return 0;
158} 79}
159 80
160static struct nvkm_oclass 81static const struct nvkm_object_func
161nv50_gr_cclass = { 82nv50_gr_chan = {
162 .handle = NV_ENGCTX(GR, 0x50), 83 .bind = nv50_gr_chan_bind,
163 .ofuncs = &(struct nvkm_ofuncs) {
164 .ctor = nv50_gr_context_ctor,
165 .dtor = _nvkm_gr_context_dtor,
166 .init = _nvkm_gr_context_init,
167 .fini = _nvkm_gr_context_fini,
168 .rd32 = _nvkm_gr_context_rd32,
169 .wr32 = _nvkm_gr_context_wr32,
170 },
171};
172
173/*******************************************************************************
174 * PGRAPH engine/subdev functions
175 ******************************************************************************/
176
177static const struct nvkm_bitfield nv50_pgr_status[] = {
178 { 0x00000001, "BUSY" }, /* set when any bit is set */
179 { 0x00000002, "DISPATCH" },
180 { 0x00000004, "UNK2" },
181 { 0x00000008, "UNK3" },
182 { 0x00000010, "UNK4" },
183 { 0x00000020, "UNK5" },
184 { 0x00000040, "M2MF" },
185 { 0x00000080, "UNK7" },
186 { 0x00000100, "CTXPROG" },
187 { 0x00000200, "VFETCH" },
188 { 0x00000400, "CCACHE_PREGEOM" },
189 { 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
190 { 0x00001000, "VCLIP" },
191 { 0x00002000, "RATTR_APLANE" },
192 { 0x00004000, "TRAST" },
193 { 0x00008000, "CLIPID" },
194 { 0x00010000, "ZCULL" },
195 { 0x00020000, "ENG2D" },
196 { 0x00040000, "RMASK" },
197 { 0x00080000, "TPC_RAST" },
198 { 0x00100000, "TPC_PROP" },
199 { 0x00200000, "TPC_TEX" },
200 { 0x00400000, "TPC_GEOM" },
201 { 0x00800000, "TPC_MP" },
202 { 0x01000000, "ROP" },
203 {}
204};
205
206static const char *const nv50_pgr_vstatus_0[] = {
207 "VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
208 NULL
209};
210
211static const char *const nv50_pgr_vstatus_1[] = {
212 "TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
213};
214
215static const char *const nv50_pgr_vstatus_2[] = {
216 "RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
217 "ROP", NULL
218}; 84};
219 85
220static void 86int
221nvkm_pgr_vstatus_print(struct nv50_gr_priv *priv, int r, 87nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
222 const char *const units[], u32 status) 88 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
223{ 89{
224 int i; 90 struct nv50_gr *gr = nv50_gr(base);
225 91 struct nv50_gr_chan *chan;
226 nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
227 92
228 for (i = 0; units[i] && status; i++) { 93 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
229 if ((status & 7) == 1) 94 return -ENOMEM;
230 pr_cont(" %s", units[i]); 95 nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
231 status >>= 3; 96 chan->gr = gr;
232 } 97 *pobject = &chan->object;
233 if (status) 98 return 0;
234 pr_cont(" (invalid: 0x%x)", status);
235 pr_cont("\n");
236} 99}
237 100
238static int 101/*******************************************************************************
239g84_gr_tlb_flush(struct nvkm_engine *engine) 102 * PGRAPH engine/subdev functions
240{ 103 ******************************************************************************/
241 struct nvkm_timer *ptimer = nvkm_timer(engine);
242 struct nv50_gr_priv *priv = (void *)engine;
243 bool idle, timeout = false;
244 unsigned long flags;
245 u64 start;
246 u32 tmp;
247
248 spin_lock_irqsave(&priv->lock, flags);
249 nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
250
251 start = ptimer->read(ptimer);
252 do {
253 idle = true;
254
255 for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
256 if ((tmp & 7) == 1)
257 idle = false;
258 }
259
260 for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
261 if ((tmp & 7) == 1)
262 idle = false;
263 }
264
265 for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
266 if ((tmp & 7) == 1)
267 idle = false;
268 }
269 } while (!idle &&
270 !(timeout = ptimer->read(ptimer) - start > 2000000000));
271
272 if (timeout) {
273 nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
274
275 tmp = nv_rd32(priv, 0x400700);
276 nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
277 nvkm_bitfield_print(nv50_pgr_status, tmp);
278 pr_cont("\n");
279
280 nvkm_pgr_vstatus_print(priv, 0, nv50_pgr_vstatus_0,
281 nv_rd32(priv, 0x400380));
282 nvkm_pgr_vstatus_print(priv, 1, nv50_pgr_vstatus_1,
283 nv_rd32(priv, 0x400384));
284 nvkm_pgr_vstatus_print(priv, 2, nv50_pgr_vstatus_2,
285 nv_rd32(priv, 0x400388));
286 }
287
288
289 nv_wr32(priv, 0x100c80, 0x00000001);
290 if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
291 nv_error(priv, "vm flush timeout\n");
292 nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
293 spin_unlock_irqrestore(&priv->lock, flags);
294 return timeout ? -EBUSY : 0;
295}
296 104
297static const struct nvkm_bitfield nv50_mp_exec_errors[] = { 105static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
298 { 0x01, "STACK_UNDERFLOW" }, 106 { 0x01, "STACK_UNDERFLOW" },
@@ -427,157 +235,172 @@ static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
427}; 235};
428 236
429static void 237static void
430nv50_priv_prop_trap(struct nv50_gr_priv *priv, 238nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
431 u32 ustatus_addr, u32 ustatus, u32 tp)
432{ 239{
433 u32 e0c = nv_rd32(priv, ustatus_addr + 0x04); 240 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
434 u32 e10 = nv_rd32(priv, ustatus_addr + 0x08); 241 struct nvkm_device *device = subdev->device;
435 u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c); 242 u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
436 u32 e18 = nv_rd32(priv, ustatus_addr + 0x10); 243 u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
437 u32 e1c = nv_rd32(priv, ustatus_addr + 0x14); 244 u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
438 u32 e20 = nv_rd32(priv, ustatus_addr + 0x18); 245 u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
439 u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c); 246 u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
247 u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
248 u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
249 char msg[128];
440 250
441 /* CUDA memory: l[], g[] or stack. */ 251 /* CUDA memory: l[], g[] or stack. */
442 if (ustatus & 0x00000080) { 252 if (ustatus & 0x00000080) {
443 if (e18 & 0x80000000) { 253 if (e18 & 0x80000000) {
444 /* g[] read fault? */ 254 /* g[] read fault? */
445 nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n", 255 nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
446 tp, e14, e10 | ((e18 >> 24) & 0x1f)); 256 tp, e14, e10 | ((e18 >> 24) & 0x1f));
447 e18 &= ~0x1f000000; 257 e18 &= ~0x1f000000;
448 } else if (e18 & 0xc) { 258 } else if (e18 & 0xc) {
449 /* g[] write fault? */ 259 /* g[] write fault? */
450 nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n", 260 nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
451 tp, e14, e10 | ((e18 >> 7) & 0x1f)); 261 tp, e14, e10 | ((e18 >> 7) & 0x1f));
452 e18 &= ~0x00000f80; 262 e18 &= ~0x00000f80;
453 } else { 263 } else {
454 nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n", 264 nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
455 tp, e14, e10); 265 tp, e14, e10);
456 } 266 }
457 ustatus &= ~0x00000080; 267 ustatus &= ~0x00000080;
458 } 268 }
459 if (ustatus) { 269 if (ustatus) {
460 nv_error(priv, "TRAP_PROP - TP %d -", tp); 270 nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
461 nvkm_bitfield_print(nv50_gr_trap_prop, ustatus); 271 nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
462 pr_cont(" - Address %02x%08x\n", e14, e10); 272 "Address %02x%08x\n",
273 tp, ustatus, msg, e14, e10);
463 } 274 }
464 nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", 275 nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
465 tp, e0c, e18, e1c, e20, e24); 276 tp, e0c, e18, e1c, e20, e24);
466} 277}
467 278
468static void 279static void
469nv50_priv_mp_trap(struct nv50_gr_priv *priv, int tpid, int display) 280nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
470{ 281{
471 u32 units = nv_rd32(priv, 0x1540); 282 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
283 struct nvkm_device *device = subdev->device;
284 u32 units = nvkm_rd32(device, 0x1540);
472 u32 addr, mp10, status, pc, oplow, ophigh; 285 u32 addr, mp10, status, pc, oplow, ophigh;
286 char msg[128];
473 int i; 287 int i;
474 int mps = 0; 288 int mps = 0;
475 for (i = 0; i < 4; i++) { 289 for (i = 0; i < 4; i++) {
476 if (!(units & 1 << (i+24))) 290 if (!(units & 1 << (i+24)))
477 continue; 291 continue;
478 if (nv_device(priv)->chipset < 0xa0) 292 if (device->chipset < 0xa0)
479 addr = 0x408200 + (tpid << 12) + (i << 7); 293 addr = 0x408200 + (tpid << 12) + (i << 7);
480 else 294 else
481 addr = 0x408100 + (tpid << 11) + (i << 7); 295 addr = 0x408100 + (tpid << 11) + (i << 7);
482 mp10 = nv_rd32(priv, addr + 0x10); 296 mp10 = nvkm_rd32(device, addr + 0x10);
483 status = nv_rd32(priv, addr + 0x14); 297 status = nvkm_rd32(device, addr + 0x14);
484 if (!status) 298 if (!status)
485 continue; 299 continue;
486 if (display) { 300 if (display) {
487 nv_rd32(priv, addr + 0x20); 301 nvkm_rd32(device, addr + 0x20);
488 pc = nv_rd32(priv, addr + 0x24); 302 pc = nvkm_rd32(device, addr + 0x24);
489 oplow = nv_rd32(priv, addr + 0x70); 303 oplow = nvkm_rd32(device, addr + 0x70);
490 ophigh = nv_rd32(priv, addr + 0x74); 304 ophigh = nvkm_rd32(device, addr + 0x74);
491 nv_error(priv, "TRAP_MP_EXEC - " 305 nvkm_snprintbf(msg, sizeof(msg),
492 "TP %d MP %d:", tpid, i); 306 nv50_mp_exec_errors, status);
493 nvkm_bitfield_print(nv50_mp_exec_errors, status); 307 nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
494 pr_cont(" at %06x warp %d, opcode %08x %08x\n", 308 "%08x [%s] at %06x warp %d, "
495 pc&0xffffff, pc >> 24, 309 "opcode %08x %08x\n",
496 oplow, ophigh); 310 tpid, i, status, msg, pc & 0xffffff,
311 pc >> 24, oplow, ophigh);
497 } 312 }
498 nv_wr32(priv, addr + 0x10, mp10); 313 nvkm_wr32(device, addr + 0x10, mp10);
499 nv_wr32(priv, addr + 0x14, 0); 314 nvkm_wr32(device, addr + 0x14, 0);
500 mps++; 315 mps++;
501 } 316 }
502 if (!mps && display) 317 if (!mps && display)
503 nv_error(priv, "TRAP_MP_EXEC - TP %d: " 318 nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
504 "No MPs claiming errors?\n", tpid); 319 "No MPs claiming errors?\n", tpid);
505} 320}
506 321
507static void 322static void
508nv50_priv_tp_trap(struct nv50_gr_priv *priv, int type, u32 ustatus_old, 323nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
509 u32 ustatus_new, int display, const char *name) 324 u32 ustatus_new, int display, const char *name)
510{ 325{
326 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
327 struct nvkm_device *device = subdev->device;
328 u32 units = nvkm_rd32(device, 0x1540);
511 int tps = 0; 329 int tps = 0;
512 u32 units = nv_rd32(priv, 0x1540);
513 int i, r; 330 int i, r;
331 char msg[128];
514 u32 ustatus_addr, ustatus; 332 u32 ustatus_addr, ustatus;
515 for (i = 0; i < 16; i++) { 333 for (i = 0; i < 16; i++) {
516 if (!(units & (1 << i))) 334 if (!(units & (1 << i)))
517 continue; 335 continue;
518 if (nv_device(priv)->chipset < 0xa0) 336 if (device->chipset < 0xa0)
519 ustatus_addr = ustatus_old + (i << 12); 337 ustatus_addr = ustatus_old + (i << 12);
520 else 338 else
521 ustatus_addr = ustatus_new + (i << 11); 339 ustatus_addr = ustatus_new + (i << 11);
522 ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff; 340 ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
523 if (!ustatus) 341 if (!ustatus)
524 continue; 342 continue;
525 tps++; 343 tps++;
526 switch (type) { 344 switch (type) {
527 case 6: /* texture error... unknown for now */ 345 case 6: /* texture error... unknown for now */
528 if (display) { 346 if (display) {
529 nv_error(priv, "magic set %d:\n", i); 347 nvkm_error(subdev, "magic set %d:\n", i);
530 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) 348 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
531 nv_error(priv, "\t0x%08x: 0x%08x\n", r, 349 nvkm_error(subdev, "\t%08x: %08x\n", r,
532 nv_rd32(priv, r)); 350 nvkm_rd32(device, r));
533 if (ustatus) { 351 if (ustatus) {
534 nv_error(priv, "%s - TP%d:", name, i); 352 nvkm_snprintbf(msg, sizeof(msg),
535 nvkm_bitfield_print(nv50_tex_traps, 353 nv50_tex_traps, ustatus);
536 ustatus); 354 nvkm_error(subdev,
537 pr_cont("\n"); 355 "%s - TP%d: %08x [%s]\n",
356 name, i, ustatus, msg);
538 ustatus = 0; 357 ustatus = 0;
539 } 358 }
540 } 359 }
541 break; 360 break;
542 case 7: /* MP error */ 361 case 7: /* MP error */
543 if (ustatus & 0x04030000) { 362 if (ustatus & 0x04030000) {
544 nv50_priv_mp_trap(priv, i, display); 363 nv50_gr_mp_trap(gr, i, display);
545 ustatus &= ~0x04030000; 364 ustatus &= ~0x04030000;
546 } 365 }
547 if (ustatus && display) { 366 if (ustatus && display) {
548 nv_error(priv, "%s - TP%d:", name, i); 367 nvkm_snprintbf(msg, sizeof(msg),
549 nvkm_bitfield_print(nv50_mpc_traps, ustatus); 368 nv50_mpc_traps, ustatus);
550 pr_cont("\n"); 369 nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
370 name, i, ustatus, msg);
551 ustatus = 0; 371 ustatus = 0;
552 } 372 }
553 break; 373 break;
554 case 8: /* PROP error */ 374 case 8: /* PROP error */
555 if (display) 375 if (display)
556 nv50_priv_prop_trap( 376 nv50_gr_prop_trap(
557 priv, ustatus_addr, ustatus, i); 377 gr, ustatus_addr, ustatus, i);
558 ustatus = 0; 378 ustatus = 0;
559 break; 379 break;
560 } 380 }
561 if (ustatus) { 381 if (ustatus) {
562 if (display) 382 if (display)
563 nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); 383 nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
564 } 384 }
565 nv_wr32(priv, ustatus_addr, 0xc0000000); 385 nvkm_wr32(device, ustatus_addr, 0xc0000000);
566 } 386 }
567 387
568 if (!tps && display) 388 if (!tps && display)
569 nv_warn(priv, "%s - No TPs claiming errors?\n", name); 389 nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
570} 390}
571 391
572static int 392static int
573nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display, 393nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
574 int chid, u64 inst, struct nvkm_object *engctx) 394 int chid, u64 inst, const char *name)
575{ 395{
576 u32 status = nv_rd32(priv, 0x400108); 396 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
397 struct nvkm_device *device = subdev->device;
398 u32 status = nvkm_rd32(device, 0x400108);
577 u32 ustatus; 399 u32 ustatus;
400 char msg[128];
578 401
579 if (!status && display) { 402 if (!status && display) {
580 nv_error(priv, "TRAP: no units reporting traps?\n"); 403 nvkm_error(subdev, "TRAP: no units reporting traps?\n");
581 return 1; 404 return 1;
582 } 405 }
583 406
@@ -585,71 +408,72 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
585 * COND, QUERY. If you get a trap from it, the command is still stuck 408 * COND, QUERY. If you get a trap from it, the command is still stuck
586 * in DISPATCH and you need to do something about it. */ 409 * in DISPATCH and you need to do something about it. */
587 if (status & 0x001) { 410 if (status & 0x001) {
588 ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff; 411 ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
589 if (!ustatus && display) { 412 if (!ustatus && display) {
590 nv_error(priv, "TRAP_DISPATCH - no ustatus?\n"); 413 nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
591 } 414 }
592 415
593 nv_wr32(priv, 0x400500, 0x00000000); 416 nvkm_wr32(device, 0x400500, 0x00000000);
594 417
595 /* Known to be triggered by screwed up NOTIFY and COND... */ 418 /* Known to be triggered by screwed up NOTIFY and COND... */
596 if (ustatus & 0x00000001) { 419 if (ustatus & 0x00000001) {
597 u32 addr = nv_rd32(priv, 0x400808); 420 u32 addr = nvkm_rd32(device, 0x400808);
598 u32 subc = (addr & 0x00070000) >> 16; 421 u32 subc = (addr & 0x00070000) >> 16;
599 u32 mthd = (addr & 0x00001ffc); 422 u32 mthd = (addr & 0x00001ffc);
600 u32 datal = nv_rd32(priv, 0x40080c); 423 u32 datal = nvkm_rd32(device, 0x40080c);
601 u32 datah = nv_rd32(priv, 0x400810); 424 u32 datah = nvkm_rd32(device, 0x400810);
602 u32 class = nv_rd32(priv, 0x400814); 425 u32 class = nvkm_rd32(device, 0x400814);
603 u32 r848 = nv_rd32(priv, 0x400848); 426 u32 r848 = nvkm_rd32(device, 0x400848);
604 427
605 nv_error(priv, "TRAP DISPATCH_FAULT\n"); 428 nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
606 if (display && (addr & 0x80000000)) { 429 if (display && (addr & 0x80000000)) {
607 nv_error(priv, 430 nvkm_error(subdev,
608 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n", 431 "ch %d [%010llx %s] subc %d "
609 chid, inst, 432 "class %04x mthd %04x data %08x%08x "
610 nvkm_client_name(engctx), subc, 433 "400808 %08x 400848 %08x\n",
611 class, mthd, datah, datal, addr, r848); 434 chid, inst, name, subc, class, mthd,
435 datah, datal, addr, r848);
612 } else 436 } else
613 if (display) { 437 if (display) {
614 nv_error(priv, "no stuck command?\n"); 438 nvkm_error(subdev, "no stuck command?\n");
615 } 439 }
616 440
617 nv_wr32(priv, 0x400808, 0); 441 nvkm_wr32(device, 0x400808, 0);
618 nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3); 442 nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
619 nv_wr32(priv, 0x400848, 0); 443 nvkm_wr32(device, 0x400848, 0);
620 ustatus &= ~0x00000001; 444 ustatus &= ~0x00000001;
621 } 445 }
622 446
623 if (ustatus & 0x00000002) { 447 if (ustatus & 0x00000002) {
624 u32 addr = nv_rd32(priv, 0x40084c); 448 u32 addr = nvkm_rd32(device, 0x40084c);
625 u32 subc = (addr & 0x00070000) >> 16; 449 u32 subc = (addr & 0x00070000) >> 16;
626 u32 mthd = (addr & 0x00001ffc); 450 u32 mthd = (addr & 0x00001ffc);
627 u32 data = nv_rd32(priv, 0x40085c); 451 u32 data = nvkm_rd32(device, 0x40085c);
628 u32 class = nv_rd32(priv, 0x400814); 452 u32 class = nvkm_rd32(device, 0x400814);
629 453
630 nv_error(priv, "TRAP DISPATCH_QUERY\n"); 454 nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
631 if (display && (addr & 0x80000000)) { 455 if (display && (addr & 0x80000000)) {
632 nv_error(priv, 456 nvkm_error(subdev,
633 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n", 457 "ch %d [%010llx %s] subc %d "
634 chid, inst, 458 "class %04x mthd %04x data %08x "
635 nvkm_client_name(engctx), subc, 459 "40084c %08x\n", chid, inst, name,
636 class, mthd, data, addr); 460 subc, class, mthd, data, addr);
637 } else 461 } else
638 if (display) { 462 if (display) {
639 nv_error(priv, "no stuck command?\n"); 463 nvkm_error(subdev, "no stuck command?\n");
640 } 464 }
641 465
642 nv_wr32(priv, 0x40084c, 0); 466 nvkm_wr32(device, 0x40084c, 0);
643 ustatus &= ~0x00000002; 467 ustatus &= ~0x00000002;
644 } 468 }
645 469
646 if (ustatus && display) { 470 if (ustatus && display) {
647 nv_error(priv, "TRAP_DISPATCH (unknown " 471 nvkm_error(subdev, "TRAP_DISPATCH "
648 "0x%08x)\n", ustatus); 472 "(unknown %08x)\n", ustatus);
649 } 473 }
650 474
651 nv_wr32(priv, 0x400804, 0xc0000000); 475 nvkm_wr32(device, 0x400804, 0xc0000000);
652 nv_wr32(priv, 0x400108, 0x001); 476 nvkm_wr32(device, 0x400108, 0x001);
653 status &= ~0x001; 477 status &= ~0x001;
654 if (!status) 478 if (!status)
655 return 0; 479 return 0;
@@ -657,81 +481,91 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
657 481
658 /* M2MF: Memory to memory copy engine. */ 482 /* M2MF: Memory to memory copy engine. */
659 if (status & 0x002) { 483 if (status & 0x002) {
660 u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff; 484 u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
661 if (display) { 485 if (display) {
662 nv_error(priv, "TRAP_M2MF"); 486 nvkm_snprintbf(msg, sizeof(msg),
663 nvkm_bitfield_print(nv50_gr_trap_m2mf, ustatus); 487 nv50_gr_trap_m2mf, ustatus);
664 pr_cont("\n"); 488 nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
665 nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n", 489 ustatus, msg);
666 nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808), 490 nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
667 nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810)); 491 nvkm_rd32(device, 0x406804),
668 492 nvkm_rd32(device, 0x406808),
493 nvkm_rd32(device, 0x40680c),
494 nvkm_rd32(device, 0x406810));
669 } 495 }
670 496
671 /* No sane way found yet -- just reset the bugger. */ 497 /* No sane way found yet -- just reset the bugger. */
672 nv_wr32(priv, 0x400040, 2); 498 nvkm_wr32(device, 0x400040, 2);
673 nv_wr32(priv, 0x400040, 0); 499 nvkm_wr32(device, 0x400040, 0);
674 nv_wr32(priv, 0x406800, 0xc0000000); 500 nvkm_wr32(device, 0x406800, 0xc0000000);
675 nv_wr32(priv, 0x400108, 0x002); 501 nvkm_wr32(device, 0x400108, 0x002);
676 status &= ~0x002; 502 status &= ~0x002;
677 } 503 }
678 504
679 /* VFETCH: Fetches data from vertex buffers. */ 505 /* VFETCH: Fetches data from vertex buffers. */
680 if (status & 0x004) { 506 if (status & 0x004) {
681 u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff; 507 u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
682 if (display) { 508 if (display) {
683 nv_error(priv, "TRAP_VFETCH"); 509 nvkm_snprintbf(msg, sizeof(msg),
684 nvkm_bitfield_print(nv50_gr_trap_vfetch, ustatus); 510 nv50_gr_trap_vfetch, ustatus);
685 pr_cont("\n"); 511 nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
686 nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n", 512 ustatus, msg);
687 nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08), 513 nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
688 nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10)); 514 nvkm_rd32(device, 0x400c00),
515 nvkm_rd32(device, 0x400c08),
516 nvkm_rd32(device, 0x400c0c),
517 nvkm_rd32(device, 0x400c10));
689 } 518 }
690 519
691 nv_wr32(priv, 0x400c04, 0xc0000000); 520 nvkm_wr32(device, 0x400c04, 0xc0000000);
692 nv_wr32(priv, 0x400108, 0x004); 521 nvkm_wr32(device, 0x400108, 0x004);
693 status &= ~0x004; 522 status &= ~0x004;
694 } 523 }
695 524
696 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ 525 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
697 if (status & 0x008) { 526 if (status & 0x008) {
698 ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff; 527 ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
699 if (display) { 528 if (display) {
700 nv_error(priv, "TRAP_STRMOUT"); 529 nvkm_snprintbf(msg, sizeof(msg),
701 nvkm_bitfield_print(nv50_gr_trap_strmout, ustatus); 530 nv50_gr_trap_strmout, ustatus);
702 pr_cont("\n"); 531 nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
703 nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n", 532 ustatus, msg);
704 nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808), 533 nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
705 nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810)); 534 nvkm_rd32(device, 0x401804),
706 535 nvkm_rd32(device, 0x401808),
536 nvkm_rd32(device, 0x40180c),
537 nvkm_rd32(device, 0x401810));
707 } 538 }
708 539
709 /* No sane way found yet -- just reset the bugger. */ 540 /* No sane way found yet -- just reset the bugger. */
710 nv_wr32(priv, 0x400040, 0x80); 541 nvkm_wr32(device, 0x400040, 0x80);
711 nv_wr32(priv, 0x400040, 0); 542 nvkm_wr32(device, 0x400040, 0);
712 nv_wr32(priv, 0x401800, 0xc0000000); 543 nvkm_wr32(device, 0x401800, 0xc0000000);
713 nv_wr32(priv, 0x400108, 0x008); 544 nvkm_wr32(device, 0x400108, 0x008);
714 status &= ~0x008; 545 status &= ~0x008;
715 } 546 }
716 547
717 /* CCACHE: Handles code and c[] caches and fills them. */ 548 /* CCACHE: Handles code and c[] caches and fills them. */
718 if (status & 0x010) { 549 if (status & 0x010) {
719 ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff; 550 ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
720 if (display) { 551 if (display) {
721 nv_error(priv, "TRAP_CCACHE"); 552 nvkm_snprintbf(msg, sizeof(msg),
722 nvkm_bitfield_print(nv50_gr_trap_ccache, ustatus); 553 nv50_gr_trap_ccache, ustatus);
723 pr_cont("\n"); 554 nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
724 nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x" 555 ustatus, msg);
725 " %08x %08x %08x\n", 556 nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
726 nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004), 557 "%08x %08x %08x\n",
727 nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c), 558 nvkm_rd32(device, 0x405000),
728 nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014), 559 nvkm_rd32(device, 0x405004),
729 nv_rd32(priv, 0x40501c)); 560 nvkm_rd32(device, 0x405008),
730 561 nvkm_rd32(device, 0x40500c),
562 nvkm_rd32(device, 0x405010),
563 nvkm_rd32(device, 0x405014),
564 nvkm_rd32(device, 0x40501c));
731 } 565 }
732 566
733 nv_wr32(priv, 0x405018, 0xc0000000); 567 nvkm_wr32(device, 0x405018, 0xc0000000);
734 nv_wr32(priv, 0x400108, 0x010); 568 nvkm_wr32(device, 0x400108, 0x010);
735 status &= ~0x010; 569 status &= ~0x010;
736 } 570 }
737 571
@@ -739,239 +573,174 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
739 * remaining, so try to handle it anyway. Perhaps related to that 573 * remaining, so try to handle it anyway. Perhaps related to that
740 * unknown DMA slot on tesla? */ 574 * unknown DMA slot on tesla? */
741 if (status & 0x20) { 575 if (status & 0x20) {
742 ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff; 576 ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
743 if (display) 577 if (display)
744 nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus); 578 nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
745 nv_wr32(priv, 0x402000, 0xc0000000); 579 nvkm_wr32(device, 0x402000, 0xc0000000);
746 /* no status modifiction on purpose */ 580 /* no status modifiction on purpose */
747 } 581 }
748 582
749 /* TEXTURE: CUDA texturing units */ 583 /* TEXTURE: CUDA texturing units */
750 if (status & 0x040) { 584 if (status & 0x040) {
751 nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display, 585 nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
752 "TRAP_TEXTURE"); 586 "TRAP_TEXTURE");
753 nv_wr32(priv, 0x400108, 0x040); 587 nvkm_wr32(device, 0x400108, 0x040);
754 status &= ~0x040; 588 status &= ~0x040;
755 } 589 }
756 590
757 /* MP: CUDA execution engines. */ 591 /* MP: CUDA execution engines. */
758 if (status & 0x080) { 592 if (status & 0x080) {
759 nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display, 593 nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
760 "TRAP_MP"); 594 "TRAP_MP");
761 nv_wr32(priv, 0x400108, 0x080); 595 nvkm_wr32(device, 0x400108, 0x080);
762 status &= ~0x080; 596 status &= ~0x080;
763 } 597 }
764 598
765 /* PROP: Handles TP-initiated uncached memory accesses: 599 /* PROP: Handles TP-initiated uncached memory accesses:
766 * l[], g[], stack, 2d surfaces, render targets. */ 600 * l[], g[], stack, 2d surfaces, render targets. */
767 if (status & 0x100) { 601 if (status & 0x100) {
768 nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display, 602 nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
769 "TRAP_PROP"); 603 "TRAP_PROP");
770 nv_wr32(priv, 0x400108, 0x100); 604 nvkm_wr32(device, 0x400108, 0x100);
771 status &= ~0x100; 605 status &= ~0x100;
772 } 606 }
773 607
774 if (status) { 608 if (status) {
775 if (display) 609 if (display)
776 nv_error(priv, "TRAP: unknown 0x%08x\n", status); 610 nvkm_error(subdev, "TRAP: unknown %08x\n", status);
777 nv_wr32(priv, 0x400108, status); 611 nvkm_wr32(device, 0x400108, status);
778 } 612 }
779 613
780 return 1; 614 return 1;
781} 615}
782 616
783static void 617void
784nv50_gr_intr(struct nvkm_subdev *subdev) 618nv50_gr_intr(struct nvkm_gr *base)
785{ 619{
786 struct nvkm_fifo *pfifo = nvkm_fifo(subdev); 620 struct nv50_gr *gr = nv50_gr(base);
787 struct nvkm_engine *engine = nv_engine(subdev); 621 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
788 struct nvkm_object *engctx; 622 struct nvkm_device *device = subdev->device;
789 struct nvkm_handle *handle = NULL; 623 struct nvkm_fifo_chan *chan;
790 struct nv50_gr_priv *priv = (void *)subdev; 624 u32 stat = nvkm_rd32(device, 0x400100);
791 u32 stat = nv_rd32(priv, 0x400100); 625 u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
792 u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff; 626 u32 addr = nvkm_rd32(device, 0x400704);
793 u32 addr = nv_rd32(priv, 0x400704);
794 u32 subc = (addr & 0x00070000) >> 16; 627 u32 subc = (addr & 0x00070000) >> 16;
795 u32 mthd = (addr & 0x00001ffc); 628 u32 mthd = (addr & 0x00001ffc);
796 u32 data = nv_rd32(priv, 0x400708); 629 u32 data = nvkm_rd32(device, 0x400708);
797 u32 class = nv_rd32(priv, 0x400814); 630 u32 class = nvkm_rd32(device, 0x400814);
798 u32 show = stat, show_bitfield = stat; 631 u32 show = stat, show_bitfield = stat;
799 int chid; 632 const struct nvkm_enum *en;
800 633 unsigned long flags;
801 engctx = nvkm_engctx_get(engine, inst); 634 const char *name = "unknown";
802 chid = pfifo->chid(pfifo, engctx); 635 char msg[128];
803 636 int chid = -1;
804 if (stat & 0x00000010) { 637
805 handle = nvkm_handle_get_class(engctx, class); 638 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
806 if (handle && !nv_call(handle->object, mthd, data)) 639 if (chan) {
807 show &= ~0x00000010; 640 name = chan->object.client->name;
808 nvkm_handle_put(handle); 641 chid = chan->chid;
809 } 642 }
810 643
811 if (show & 0x00100000) { 644 if (show & 0x00100000) {
812 u32 ecode = nv_rd32(priv, 0x400110); 645 u32 ecode = nvkm_rd32(device, 0x400110);
813 nv_error(priv, "DATA_ERROR "); 646 en = nvkm_enum_find(nv50_data_error_names, ecode);
814 nvkm_enum_print(nv50_data_error_names, ecode); 647 nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
815 pr_cont("\n"); 648 ecode, en ? en->name : "");
816 show_bitfield &= ~0x00100000; 649 show_bitfield &= ~0x00100000;
817 } 650 }
818 651
819 if (stat & 0x00200000) { 652 if (stat & 0x00200000) {
820 if (!nv50_gr_trap_handler(priv, show, chid, (u64)inst << 12, 653 if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
821 engctx))
822 show &= ~0x00200000; 654 show &= ~0x00200000;
823 show_bitfield &= ~0x00200000; 655 show_bitfield &= ~0x00200000;
824 } 656 }
825 657
826 nv_wr32(priv, 0x400100, stat); 658 nvkm_wr32(device, 0x400100, stat);
827 nv_wr32(priv, 0x400500, 0x00010001); 659 nvkm_wr32(device, 0x400500, 0x00010001);
828 660
829 if (show) { 661 if (show) {
830 show &= show_bitfield; 662 show &= show_bitfield;
831 if (show) { 663 nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
832 nv_error(priv, "%s", ""); 664 nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
833 nvkm_bitfield_print(nv50_gr_intr_name, show); 665 "class %04x mthd %04x data %08x\n",
834 pr_cont("\n"); 666 stat, msg, chid, (u64)inst << 12, name,
835 } 667 subc, class, mthd, data);
836 nv_error(priv,
837 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
838 chid, (u64)inst << 12, nvkm_client_name(engctx),
839 subc, class, mthd, data);
840 } 668 }
841 669
842 if (nv_rd32(priv, 0x400824) & (1 << 31)) 670 if (nvkm_rd32(device, 0x400824) & (1 << 31))
843 nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31)); 671 nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
844 672
845 nvkm_engctx_put(engctx); 673 nvkm_fifo_chan_put(device->fifo, flags, &chan);
846} 674}
847 675
848static int 676int
849nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 677nv50_gr_init(struct nvkm_gr *base)
850 struct nvkm_oclass *oclass, void *data, u32 size,
851 struct nvkm_object **pobject)
852{ 678{
853 struct nv50_gr_priv *priv; 679 struct nv50_gr *gr = nv50_gr(base);
854 int ret; 680 struct nvkm_device *device = gr->base.engine.subdev.device;
855
856 ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
857 *pobject = nv_object(priv);
858 if (ret)
859 return ret;
860
861 nv_subdev(priv)->unit = 0x00201000;
862 nv_subdev(priv)->intr = nv50_gr_intr;
863 nv_engine(priv)->cclass = &nv50_gr_cclass;
864
865 priv->base.units = nv50_gr_units;
866
867 switch (nv_device(priv)->chipset) {
868 case 0x50:
869 nv_engine(priv)->sclass = nv50_gr_sclass;
870 break;
871 case 0x84:
872 case 0x86:
873 case 0x92:
874 case 0x94:
875 case 0x96:
876 case 0x98:
877 nv_engine(priv)->sclass = g84_gr_sclass;
878 break;
879 case 0xa0:
880 case 0xaa:
881 case 0xac:
882 nv_engine(priv)->sclass = gt200_gr_sclass;
883 break;
884 case 0xa3:
885 case 0xa5:
886 case 0xa8:
887 nv_engine(priv)->sclass = gt215_gr_sclass;
888 break;
889 case 0xaf:
890 nv_engine(priv)->sclass = mcp89_gr_sclass;
891 break;
892
893 }
894
895 /* unfortunate hw bug workaround... */
896 if (nv_device(priv)->chipset != 0x50 &&
897 nv_device(priv)->chipset != 0xac)
898 nv_engine(priv)->tlb_flush = g84_gr_tlb_flush;
899
900 spin_lock_init(&priv->lock);
901 return 0;
902}
903
904static int
905nv50_gr_init(struct nvkm_object *object)
906{
907 struct nv50_gr_priv *priv = (void *)object;
908 int ret, units, i; 681 int ret, units, i;
909 682
910 ret = nvkm_gr_init(&priv->base);
911 if (ret)
912 return ret;
913
914 /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */ 683 /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
915 nv_wr32(priv, 0x40008c, 0x00000004); 684 nvkm_wr32(device, 0x40008c, 0x00000004);
916 685
917 /* reset/enable traps and interrupts */ 686 /* reset/enable traps and interrupts */
918 nv_wr32(priv, 0x400804, 0xc0000000); 687 nvkm_wr32(device, 0x400804, 0xc0000000);
919 nv_wr32(priv, 0x406800, 0xc0000000); 688 nvkm_wr32(device, 0x406800, 0xc0000000);
920 nv_wr32(priv, 0x400c04, 0xc0000000); 689 nvkm_wr32(device, 0x400c04, 0xc0000000);
921 nv_wr32(priv, 0x401800, 0xc0000000); 690 nvkm_wr32(device, 0x401800, 0xc0000000);
922 nv_wr32(priv, 0x405018, 0xc0000000); 691 nvkm_wr32(device, 0x405018, 0xc0000000);
923 nv_wr32(priv, 0x402000, 0xc0000000); 692 nvkm_wr32(device, 0x402000, 0xc0000000);
924 693
925 units = nv_rd32(priv, 0x001540); 694 units = nvkm_rd32(device, 0x001540);
926 for (i = 0; i < 16; i++) { 695 for (i = 0; i < 16; i++) {
927 if (!(units & (1 << i))) 696 if (!(units & (1 << i)))
928 continue; 697 continue;
929 698
930 if (nv_device(priv)->chipset < 0xa0) { 699 if (device->chipset < 0xa0) {
931 nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000); 700 nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
932 nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000); 701 nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
933 nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000); 702 nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
934 } else { 703 } else {
935 nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000); 704 nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
936 nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000); 705 nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
937 nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000); 706 nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
938 } 707 }
939 } 708 }
940 709
941 nv_wr32(priv, 0x400108, 0xffffffff); 710 nvkm_wr32(device, 0x400108, 0xffffffff);
942 nv_wr32(priv, 0x400138, 0xffffffff); 711 nvkm_wr32(device, 0x400138, 0xffffffff);
943 nv_wr32(priv, 0x400100, 0xffffffff); 712 nvkm_wr32(device, 0x400100, 0xffffffff);
944 nv_wr32(priv, 0x40013c, 0xffffffff); 713 nvkm_wr32(device, 0x40013c, 0xffffffff);
945 nv_wr32(priv, 0x400500, 0x00010001); 714 nvkm_wr32(device, 0x400500, 0x00010001);
946 715
947 /* upload context program, initialise ctxctl defaults */ 716 /* upload context program, initialise ctxctl defaults */
948 ret = nv50_grctx_init(nv_device(priv), &priv->size); 717 ret = nv50_grctx_init(device, &gr->size);
949 if (ret) 718 if (ret)
950 return ret; 719 return ret;
951 720
952 nv_wr32(priv, 0x400824, 0x00000000); 721 nvkm_wr32(device, 0x400824, 0x00000000);
953 nv_wr32(priv, 0x400828, 0x00000000); 722 nvkm_wr32(device, 0x400828, 0x00000000);
954 nv_wr32(priv, 0x40082c, 0x00000000); 723 nvkm_wr32(device, 0x40082c, 0x00000000);
955 nv_wr32(priv, 0x400830, 0x00000000); 724 nvkm_wr32(device, 0x400830, 0x00000000);
956 nv_wr32(priv, 0x40032c, 0x00000000); 725 nvkm_wr32(device, 0x40032c, 0x00000000);
957 nv_wr32(priv, 0x400330, 0x00000000); 726 nvkm_wr32(device, 0x400330, 0x00000000);
958 727
959 /* some unknown zcull magic */ 728 /* some unknown zcull magic */
960 switch (nv_device(priv)->chipset & 0xf0) { 729 switch (device->chipset & 0xf0) {
961 case 0x50: 730 case 0x50:
962 case 0x80: 731 case 0x80:
963 case 0x90: 732 case 0x90:
964 nv_wr32(priv, 0x402ca8, 0x00000800); 733 nvkm_wr32(device, 0x402ca8, 0x00000800);
965 break; 734 break;
966 case 0xa0: 735 case 0xa0:
967 default: 736 default:
968 if (nv_device(priv)->chipset == 0xa0 || 737 if (device->chipset == 0xa0 ||
969 nv_device(priv)->chipset == 0xaa || 738 device->chipset == 0xaa ||
970 nv_device(priv)->chipset == 0xac) { 739 device->chipset == 0xac) {
971 nv_wr32(priv, 0x402ca8, 0x00000802); 740 nvkm_wr32(device, 0x402ca8, 0x00000802);
972 } else { 741 } else {
973 nv_wr32(priv, 0x402cc0, 0x00000000); 742 nvkm_wr32(device, 0x402cc0, 0x00000000);
974 nv_wr32(priv, 0x402ca8, 0x00000002); 743 nvkm_wr32(device, 0x402ca8, 0x00000002);
975 } 744 }
976 745
977 break; 746 break;
@@ -979,21 +748,47 @@ nv50_gr_init(struct nvkm_object *object)
979 748
980 /* zero out zcull regions */ 749 /* zero out zcull regions */
981 for (i = 0; i < 8; i++) { 750 for (i = 0; i < 8; i++) {
982 nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000); 751 nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
983 nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000); 752 nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
984 nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000); 753 nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
985 nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000); 754 nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
986 } 755 }
756
987 return 0; 757 return 0;
988} 758}
989 759
990struct nvkm_oclass 760int
991nv50_gr_oclass = { 761nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
992 .handle = NV_ENGINE(GR, 0x50), 762 int index, struct nvkm_gr **pgr)
993 .ofuncs = &(struct nvkm_ofuncs) { 763{
994 .ctor = nv50_gr_ctor, 764 struct nv50_gr *gr;
995 .dtor = _nvkm_gr_dtor, 765
996 .init = nv50_gr_init, 766 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
997 .fini = _nvkm_gr_fini, 767 return -ENOMEM;
998 }, 768 spin_lock_init(&gr->lock);
769 *pgr = &gr->base;
770
771 return nvkm_gr_ctor(func, device, index, 0x00201000, true, &gr->base);
772}
773
774static const struct nvkm_gr_func
775nv50_gr = {
776 .init = nv50_gr_init,
777 .intr = nv50_gr_intr,
778 .chan_new = nv50_gr_chan_new,
779 .units = nv50_gr_units,
780 .sclass = {
781 { -1, -1, 0x0030, &nv50_gr_object },
782 { -1, -1, 0x502d, &nv50_gr_object },
783 { -1, -1, 0x5039, &nv50_gr_object },
784 { -1, -1, 0x5097, &nv50_gr_object },
785 { -1, -1, 0x50c0, &nv50_gr_object },
786 {}
787 }
999}; 788};
789
790int
791nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
792{
793 return nv50_gr_new_(&nv50_gr, device, index, pgr);
794}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index bcf786f6b731..45eec83a5969 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -1,8 +1,34 @@
1#ifndef __NV50_GR_H__ 1#ifndef __NV50_GR_H__
2#define __NV50_GR_H__ 2#define __NV50_GR_H__
3#include <engine/gr.h> 3#define nv50_gr(p) container_of((p), struct nv50_gr, base)
4struct nvkm_device; 4#include "priv.h"
5struct nvkm_gpuobj; 5
6struct nv50_gr {
7 struct nvkm_gr base;
8 const struct nv50_gr_func *func;
9 spinlock_t lock;
10 u32 size;
11};
12
13int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
14 struct nvkm_gr **);
15int nv50_gr_init(struct nvkm_gr *);
16void nv50_gr_intr(struct nvkm_gr *);
17u64 nv50_gr_units(struct nvkm_gr *);
18
19int g84_gr_tlb_flush(struct nvkm_gr *);
20
21#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
22
23struct nv50_gr_chan {
24 struct nvkm_object object;
25 struct nv50_gr *gr;
26};
27
28int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
29 const struct nvkm_oclass *, struct nvkm_object **);
30
31extern const struct nvkm_object_func nv50_gr_object;
6 32
7int nv50_grctx_init(struct nvkm_device *, u32 *size); 33int nv50_grctx_init(struct nvkm_device *, u32 *size);
8void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *); 34void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
new file mode 100644
index 000000000000..a234590be88e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -0,0 +1,38 @@
1#ifndef __NVKM_GR_PRIV_H__
2#define __NVKM_GR_PRIV_H__
3#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
4#include <engine/gr.h>
5#include <core/enum.h>
6struct nvkm_fb_tile;
7struct nvkm_fifo_chan;
8
9int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
10 int index, u32 pmc_enable, bool enable,
11 struct nvkm_gr *);
12
13bool nv04_gr_idle(struct nvkm_gr *);
14
15struct nvkm_gr_func {
16 void *(*dtor)(struct nvkm_gr *);
17 int (*oneinit)(struct nvkm_gr *);
18 int (*init)(struct nvkm_gr *);
19 void (*intr)(struct nvkm_gr *);
20 void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
21 int (*tlb_flush)(struct nvkm_gr *);
22 int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
23 const struct nvkm_oclass *, struct nvkm_object **);
24 int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
25 /* Returns chipset-specific counts of units packed into an u64.
26 */
27 u64 (*units)(struct nvkm_gr *);
28 struct nvkm_sclass sclass[];
29};
30
31extern const struct nvkm_bitfield nv04_gr_nsource[];
32extern const struct nvkm_object_func nv04_gr_object;
33
34extern const struct nvkm_bitfield nv10_gr_intr_name[];
35extern const struct nvkm_bitfield nv10_gr_nstatus[];
36
37extern const struct nvkm_enum nv50_data_error_names[];
38#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index 0df889fa2611..34ff0014a6c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -21,74 +21,24 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/mpeg.h> 24#include "priv.h"
25 25
26struct g84_mpeg_priv { 26#include <nvif/class.h>
27 struct nvkm_mpeg base; 27
28}; 28static const struct nvkm_engine_func
29 29g84_mpeg = {
30struct g84_mpeg_chan { 30 .init = nv50_mpeg_init,
31 struct nvkm_mpeg_chan base; 31 .intr = nv50_mpeg_intr,
32}; 32 .cclass = &nv50_mpeg_cclass,
33 33 .sclass = {
34/******************************************************************************* 34 { -1, -1, G82_MPEG, &nv31_mpeg_object },
35 * MPEG object classes 35 {}
36 ******************************************************************************/ 36 }
37
38static struct nvkm_oclass
39g84_mpeg_sclass[] = {
40 { 0x8274, &nv50_mpeg_ofuncs },
41 {}
42};
43
44/*******************************************************************************
45 * PMPEG context
46 ******************************************************************************/
47
48static struct nvkm_oclass
49g84_mpeg_cclass = {
50 .handle = NV_ENGCTX(MPEG, 0x84),
51 .ofuncs = &(struct nvkm_ofuncs) {
52 .ctor = nv50_mpeg_context_ctor,
53 .dtor = _nvkm_mpeg_context_dtor,
54 .init = _nvkm_mpeg_context_init,
55 .fini = _nvkm_mpeg_context_fini,
56 .rd32 = _nvkm_mpeg_context_rd32,
57 .wr32 = _nvkm_mpeg_context_wr32,
58 },
59}; 37};
60 38
61/******************************************************************************* 39int
62 * PMPEG engine/subdev functions 40g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
63 ******************************************************************************/
64
65static int
66g84_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
67 struct nvkm_oclass *oclass, void *data, u32 size,
68 struct nvkm_object **pobject)
69{ 41{
70 struct g84_mpeg_priv *priv; 42 return nvkm_engine_new_(&g84_mpeg, device, index, 0x00000002,
71 int ret; 43 true, pmpeg);
72
73 ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
74 *pobject = nv_object(priv);
75 if (ret)
76 return ret;
77
78 nv_subdev(priv)->unit = 0x00000002;
79 nv_subdev(priv)->intr = nv50_mpeg_intr;
80 nv_engine(priv)->cclass = &g84_mpeg_cclass;
81 nv_engine(priv)->sclass = g84_mpeg_sclass;
82 return 0;
83} 44}
84
85struct nvkm_oclass
86g84_mpeg_oclass = {
87 .handle = NV_ENGINE(MPEG, 0x84),
88 .ofuncs = &(struct nvkm_ofuncs) {
89 .ctor = g84_mpeg_ctor,
90 .dtor = _nvkm_mpeg_dtor,
91 .init = nv50_mpeg_init,
92 .fini = _nvkm_mpeg_fini,
93 },
94};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index b5bef0718359..d4d8942b1347 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -24,281 +24,271 @@
24#include "nv31.h" 24#include "nv31.h"
25 25
26#include <core/client.h> 26#include <core/client.h>
27#include <core/handle.h> 27#include <core/gpuobj.h>
28#include <engine/fifo.h>
29#include <subdev/instmem.h>
30#include <subdev/fb.h> 28#include <subdev/fb.h>
31#include <subdev/timer.h> 29#include <subdev/timer.h>
30#include <engine/fifo.h>
31
32#include <nvif/class.h>
32 33
33/******************************************************************************* 34/*******************************************************************************
34 * MPEG object classes 35 * MPEG object classes
35 ******************************************************************************/ 36 ******************************************************************************/
36 37
37static int 38static int
38nv31_mpeg_object_ctor(struct nvkm_object *parent, 39nv31_mpeg_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
39 struct nvkm_object *engine, 40 int align, struct nvkm_gpuobj **pgpuobj)
40 struct nvkm_oclass *oclass, void *data, u32 size,
41 struct nvkm_object **pobject)
42{ 41{
43 struct nvkm_gpuobj *obj; 42 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
44 int ret; 43 false, parent, pgpuobj);
45 44 if (ret == 0) {
46 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent, 45 nvkm_kmap(*pgpuobj);
47 20, 16, 0, &obj); 46 nvkm_wo32(*pgpuobj, 0x00, object->oclass);
48 *pobject = nv_object(obj); 47 nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
49 if (ret) 48 nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
50 return ret; 49 nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
51 50 nvkm_done(*pgpuobj);
52 nv_wo32(obj, 0x00, nv_mclass(obj)); 51 }
53 nv_wo32(obj, 0x04, 0x00000000); 52 return ret;
54 nv_wo32(obj, 0x08, 0x00000000);
55 nv_wo32(obj, 0x0c, 0x00000000);
56 return 0;
57} 53}
58 54
59static int 55const struct nvkm_object_func
60nv31_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len) 56nv31_mpeg_object = {
57 .bind = nv31_mpeg_object_bind,
58};
59
60/*******************************************************************************
61 * PMPEG context
62 ******************************************************************************/
63
64static void *
65nv31_mpeg_chan_dtor(struct nvkm_object *object)
61{ 66{
62 struct nvkm_instmem *imem = nvkm_instmem(object); 67 struct nv31_mpeg_chan *chan = nv31_mpeg_chan(object);
63 struct nv31_mpeg_priv *priv = (void *)object->engine; 68 struct nv31_mpeg *mpeg = chan->mpeg;
64 u32 inst = *(u32 *)arg << 4; 69 unsigned long flags;
65 u32 dma0 = nv_ro32(imem, inst + 0); 70
66 u32 dma1 = nv_ro32(imem, inst + 4); 71 spin_lock_irqsave(&mpeg->engine.lock, flags);
67 u32 dma2 = nv_ro32(imem, inst + 8); 72 if (mpeg->chan == chan)
73 mpeg->chan = NULL;
74 spin_unlock_irqrestore(&mpeg->engine.lock, flags);
75 return chan;
76}
77
78static const struct nvkm_object_func
79nv31_mpeg_chan = {
80 .dtor = nv31_mpeg_chan_dtor,
81};
82
83int
84nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
85 const struct nvkm_oclass *oclass,
86 struct nvkm_object **pobject)
87{
88 struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
89 struct nv31_mpeg_chan *chan;
90 unsigned long flags;
91 int ret = -EBUSY;
92
93 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
94 return -ENOMEM;
95 nvkm_object_ctor(&nv31_mpeg_chan, oclass, &chan->object);
96 chan->mpeg = mpeg;
97 chan->fifo = fifoch;
98 *pobject = &chan->object;
99
100 spin_lock_irqsave(&mpeg->engine.lock, flags);
101 if (!mpeg->chan) {
102 mpeg->chan = chan;
103 ret = 0;
104 }
105 spin_unlock_irqrestore(&mpeg->engine.lock, flags);
106 return ret;
107}
108
109/*******************************************************************************
110 * PMPEG engine/subdev functions
111 ******************************************************************************/
112
113void
114nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
115{
116 struct nv31_mpeg *mpeg = nv31_mpeg(engine);
117 struct nvkm_device *device = mpeg->engine.subdev.device;
118
119 nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
120 nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
121 nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
122}
123
124static bool
125nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
126{
127 u32 inst = data << 4;
128 u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
129 u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
130 u32 dma2 = nvkm_rd32(device, 0x700008 + inst);
68 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20); 131 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
69 u32 size = dma1 + 1; 132 u32 size = dma1 + 1;
70 133
71 /* only allow linear DMA objects */ 134 /* only allow linear DMA objects */
72 if (!(dma0 & 0x00002000)) 135 if (!(dma0 & 0x00002000))
73 return -EINVAL; 136 return false;
74 137
75 if (mthd == 0x0190) { 138 if (mthd == 0x0190) {
76 /* DMA_CMD */ 139 /* DMA_CMD */
77 nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0); 140 nvkm_mask(device, 0x00b300, 0x00010000,
78 nv_wr32(priv, 0x00b334, base); 141 (dma0 & 0x00030000) ? 0x00010000 : 0);
79 nv_wr32(priv, 0x00b324, size); 142 nvkm_wr32(device, 0x00b334, base);
143 nvkm_wr32(device, 0x00b324, size);
80 } else 144 } else
81 if (mthd == 0x01a0) { 145 if (mthd == 0x01a0) {
82 /* DMA_DATA */ 146 /* DMA_DATA */
83 nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0); 147 nvkm_mask(device, 0x00b300, 0x00020000,
84 nv_wr32(priv, 0x00b360, base); 148 (dma0 & 0x00030000) ? 0x00020000 : 0);
85 nv_wr32(priv, 0x00b364, size); 149 nvkm_wr32(device, 0x00b360, base);
150 nvkm_wr32(device, 0x00b364, size);
86 } else { 151 } else {
87 /* DMA_IMAGE, VRAM only */ 152 /* DMA_IMAGE, VRAM only */
88 if (dma0 & 0x00030000) 153 if (dma0 & 0x00030000)
89 return -EINVAL; 154 return false;
90 155
91 nv_wr32(priv, 0x00b370, base); 156 nvkm_wr32(device, 0x00b370, base);
92 nv_wr32(priv, 0x00b374, size); 157 nvkm_wr32(device, 0x00b374, size);
93 } 158 }
94 159
95 return 0; 160 return true;
96} 161}
97 162
98struct nvkm_ofuncs 163static bool
99nv31_mpeg_ofuncs = { 164nv31_mpeg_mthd(struct nv31_mpeg *mpeg, u32 mthd, u32 data)
100 .ctor = nv31_mpeg_object_ctor,
101 .dtor = _nvkm_gpuobj_dtor,
102 .init = _nvkm_gpuobj_init,
103 .fini = _nvkm_gpuobj_fini,
104 .rd32 = _nvkm_gpuobj_rd32,
105 .wr32 = _nvkm_gpuobj_wr32,
106};
107
108static struct nvkm_omthds
109nv31_mpeg_omthds[] = {
110 { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
111 { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
112 { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
113 {}
114};
115
116struct nvkm_oclass
117nv31_mpeg_sclass[] = {
118 { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
119 {}
120};
121
122/*******************************************************************************
123 * PMPEG context
124 ******************************************************************************/
125
126static int
127nv31_mpeg_context_ctor(struct nvkm_object *parent,
128 struct nvkm_object *engine,
129 struct nvkm_oclass *oclass, void *data, u32 size,
130 struct nvkm_object **pobject)
131{ 165{
132 struct nv31_mpeg_priv *priv = (void *)engine; 166 struct nvkm_device *device = mpeg->engine.subdev.device;
133 struct nv31_mpeg_chan *chan; 167 switch (mthd) {
134 unsigned long flags; 168 case 0x190:
135 int ret; 169 case 0x1a0:
136 170 case 0x1b0:
137 ret = nvkm_object_create(parent, engine, oclass, 0, &chan); 171 return mpeg->func->mthd_dma(device, mthd, data);
138 *pobject = nv_object(chan); 172 default:
139 if (ret) 173 break;
140 return ret;
141
142 spin_lock_irqsave(&nv_engine(priv)->lock, flags);
143 if (priv->chan) {
144 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
145 nvkm_object_destroy(&chan->base);
146 *pobject = NULL;
147 return -EBUSY;
148 } 174 }
149 priv->chan = chan; 175 return false;
150 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
151 return 0;
152} 176}
153 177
154static void 178static void
155nv31_mpeg_context_dtor(struct nvkm_object *object) 179nv31_mpeg_intr(struct nvkm_engine *engine)
156{ 180{
157 struct nv31_mpeg_priv *priv = (void *)object->engine; 181 struct nv31_mpeg *mpeg = nv31_mpeg(engine);
158 struct nv31_mpeg_chan *chan = (void *)object; 182 struct nvkm_subdev *subdev = &mpeg->engine.subdev;
159 unsigned long flags; 183 struct nvkm_device *device = subdev->device;
160 184 u32 stat = nvkm_rd32(device, 0x00b100);
161 spin_lock_irqsave(&nv_engine(priv)->lock, flags); 185 u32 type = nvkm_rd32(device, 0x00b230);
162 priv->chan = NULL; 186 u32 mthd = nvkm_rd32(device, 0x00b234);
163 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags); 187 u32 data = nvkm_rd32(device, 0x00b238);
164 nvkm_object_destroy(&chan->base);
165}
166
167struct nvkm_oclass
168nv31_mpeg_cclass = {
169 .handle = NV_ENGCTX(MPEG, 0x31),
170 .ofuncs = &(struct nvkm_ofuncs) {
171 .ctor = nv31_mpeg_context_ctor,
172 .dtor = nv31_mpeg_context_dtor,
173 .init = nvkm_object_init,
174 .fini = nvkm_object_fini,
175 },
176};
177
178/*******************************************************************************
179 * PMPEG engine/subdev functions
180 ******************************************************************************/
181
182void
183nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i)
184{
185 struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
186 struct nv31_mpeg_priv *priv = (void *)engine;
187
188 nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
189 nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
190 nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
191}
192
193void
194nv31_mpeg_intr(struct nvkm_subdev *subdev)
195{
196 struct nv31_mpeg_priv *priv = (void *)subdev;
197 struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
198 struct nvkm_handle *handle;
199 struct nvkm_object *engctx;
200 u32 stat = nv_rd32(priv, 0x00b100);
201 u32 type = nv_rd32(priv, 0x00b230);
202 u32 mthd = nv_rd32(priv, 0x00b234);
203 u32 data = nv_rd32(priv, 0x00b238);
204 u32 show = stat; 188 u32 show = stat;
205 unsigned long flags; 189 unsigned long flags;
206 190
207 spin_lock_irqsave(&nv_engine(priv)->lock, flags); 191 spin_lock_irqsave(&mpeg->engine.lock, flags);
208 engctx = nv_object(priv->chan);
209 192
210 if (stat & 0x01000000) { 193 if (stat & 0x01000000) {
211 /* happens on initial binding of the object */ 194 /* happens on initial binding of the object */
212 if (type == 0x00000020 && mthd == 0x0000) { 195 if (type == 0x00000020 && mthd == 0x0000) {
213 nv_mask(priv, 0x00b308, 0x00000000, 0x00000000); 196 nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
214 show &= ~0x01000000; 197 show &= ~0x01000000;
215 } 198 }
216 199
217 if (type == 0x00000010 && engctx) { 200 if (type == 0x00000010) {
218 handle = nvkm_handle_get_class(engctx, 0x3174); 201 if (!nv31_mpeg_mthd(mpeg, mthd, data))
219 if (handle && !nv_call(handle->object, mthd, data))
220 show &= ~0x01000000; 202 show &= ~0x01000000;
221 nvkm_handle_put(handle);
222 } 203 }
223 } 204 }
224 205
225 nv_wr32(priv, 0x00b100, stat); 206 nvkm_wr32(device, 0x00b100, stat);
226 nv_wr32(priv, 0x00b230, 0x00000001); 207 nvkm_wr32(device, 0x00b230, 0x00000001);
227 208
228 if (show) { 209 if (show) {
229 nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n", 210 nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n",
230 pfifo->chid(pfifo, engctx), 211 mpeg->chan ? mpeg->chan->fifo->chid : -1,
231 nvkm_client_name(engctx), stat, type, mthd, data); 212 mpeg->chan ? mpeg->chan->object.client->name :
213 "unknown", stat, type, mthd, data);
232 } 214 }
233 215
234 spin_unlock_irqrestore(&nv_engine(priv)->lock, flags); 216 spin_unlock_irqrestore(&mpeg->engine.lock, flags);
235}
236
237static int
238nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
239 struct nvkm_oclass *oclass, void *data, u32 size,
240 struct nvkm_object **pobject)
241{
242 struct nv31_mpeg_priv *priv;
243 int ret;
244
245 ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
246 *pobject = nv_object(priv);
247 if (ret)
248 return ret;
249
250 nv_subdev(priv)->unit = 0x00000002;
251 nv_subdev(priv)->intr = nv31_mpeg_intr;
252 nv_engine(priv)->cclass = &nv31_mpeg_cclass;
253 nv_engine(priv)->sclass = nv31_mpeg_sclass;
254 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
255 return 0;
256} 217}
257 218
258int 219int
259nv31_mpeg_init(struct nvkm_object *object) 220nv31_mpeg_init(struct nvkm_engine *mpeg)
260{ 221{
261 struct nvkm_engine *engine = nv_engine(object); 222 struct nvkm_subdev *subdev = &mpeg->subdev;
262 struct nv31_mpeg_priv *priv = (void *)object; 223 struct nvkm_device *device = subdev->device;
263 struct nvkm_fb *pfb = nvkm_fb(object);
264 int ret, i;
265
266 ret = nvkm_mpeg_init(&priv->base);
267 if (ret)
268 return ret;
269 224
270 /* VPE init */ 225 /* VPE init */
271 nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ 226 nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
272 nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ 227 nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
273
274 for (i = 0; i < pfb->tile.regions; i++)
275 engine->tile_prog(engine, i);
276 228
277 /* PMPEG init */ 229 /* PMPEG init */
278 nv_wr32(priv, 0x00b32c, 0x00000000); 230 nvkm_wr32(device, 0x00b32c, 0x00000000);
279 nv_wr32(priv, 0x00b314, 0x00000100); 231 nvkm_wr32(device, 0x00b314, 0x00000100);
280 nv_wr32(priv, 0x00b220, 0x00000031); 232 nvkm_wr32(device, 0x00b220, 0x00000031);
281 nv_wr32(priv, 0x00b300, 0x02001ec1); 233 nvkm_wr32(device, 0x00b300, 0x02001ec1);
282 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); 234 nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
283 235
284 nv_wr32(priv, 0x00b100, 0xffffffff); 236 nvkm_wr32(device, 0x00b100, 0xffffffff);
285 nv_wr32(priv, 0x00b140, 0xffffffff); 237 nvkm_wr32(device, 0x00b140, 0xffffffff);
286 238
287 if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) { 239 if (nvkm_msec(device, 2000,
288 nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200)); 240 if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
241 break;
242 ) < 0) {
243 nvkm_error(subdev, "timeout %08x\n",
244 nvkm_rd32(device, 0x00b200));
289 return -EBUSY; 245 return -EBUSY;
290 } 246 }
291 247
292 return 0; 248 return 0;
293} 249}
294 250
295struct nvkm_oclass 251static void *
296nv31_mpeg_oclass = { 252nv31_mpeg_dtor(struct nvkm_engine *engine)
297 .handle = NV_ENGINE(MPEG, 0x31), 253{
298 .ofuncs = &(struct nvkm_ofuncs) { 254 return nv31_mpeg(engine);
299 .ctor = nv31_mpeg_ctor, 255}
300 .dtor = _nvkm_mpeg_dtor, 256
301 .init = nv31_mpeg_init, 257static const struct nvkm_engine_func
302 .fini = _nvkm_mpeg_fini, 258nv31_mpeg_ = {
303 }, 259 .dtor = nv31_mpeg_dtor,
260 .init = nv31_mpeg_init,
261 .intr = nv31_mpeg_intr,
262 .tile = nv31_mpeg_tile,
263 .fifo.cclass = nv31_mpeg_chan_new,
264 .sclass = {
265 { -1, -1, NV31_MPEG, &nv31_mpeg_object },
266 {}
267 }
304}; 268};
269
270int
271nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
272 int index, struct nvkm_engine **pmpeg)
273{
274 struct nv31_mpeg *mpeg;
275
276 if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
277 return -ENOMEM;
278 mpeg->func = func;
279 *pmpeg = &mpeg->engine;
280
281 return nvkm_engine_ctor(&nv31_mpeg_, device, index, 0x00000002,
282 true, &mpeg->engine);
283}
284
285static const struct nv31_mpeg_func
286nv31_mpeg = {
287 .mthd_dma = nv31_mpeg_mthd_dma,
288};
289
290int
291nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
292{
293 return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
294}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index 782b796d7458..d3bb34fcdebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,13 +1,30 @@
1#ifndef __NV31_MPEG_H__ 1#ifndef __NV31_MPEG_H__
2#define __NV31_MPEG_H__ 2#define __NV31_MPEG_H__
3#define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
4#include "priv.h"
3#include <engine/mpeg.h> 5#include <engine/mpeg.h>
4 6
5struct nv31_mpeg_chan { 7struct nv31_mpeg {
6 struct nvkm_object base; 8 const struct nv31_mpeg_func *func;
9 struct nvkm_engine engine;
10 struct nv31_mpeg_chan *chan;
7}; 11};
8 12
9struct nv31_mpeg_priv { 13int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *,
10 struct nvkm_mpeg base; 14 int index, struct nvkm_engine **);
11 struct nv31_mpeg_chan *chan; 15
16struct nv31_mpeg_func {
17 bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data);
12}; 18};
19
20#define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
21
22struct nv31_mpeg_chan {
23 struct nvkm_object object;
24 struct nv31_mpeg *mpeg;
25 struct nvkm_fifo_chan *fifo;
26};
27
28int nv31_mpeg_chan_new(struct nvkm_fifo_chan *, const struct nvkm_oclass *,
29 struct nvkm_object **);
13#endif 30#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index 9508bf9e140f..16de5bd94b14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -25,110 +25,53 @@
25 25
26#include <subdev/instmem.h> 26#include <subdev/instmem.h>
27 27
28/******************************************************************************* 28#include <nvif/class.h>
29 * MPEG object classes
30 ******************************************************************************/
31 29
32static int 30bool
33nv40_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len) 31nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
34{ 32{
35 struct nvkm_instmem *imem = nvkm_instmem(object); 33 struct nvkm_instmem *imem = device->imem;
36 struct nv31_mpeg_priv *priv = (void *)object->engine; 34 u32 inst = data << 4;
37 u32 inst = *(u32 *)arg << 4; 35 u32 dma0 = nvkm_instmem_rd32(imem, inst + 0);
38 u32 dma0 = nv_ro32(imem, inst + 0); 36 u32 dma1 = nvkm_instmem_rd32(imem, inst + 4);
39 u32 dma1 = nv_ro32(imem, inst + 4); 37 u32 dma2 = nvkm_instmem_rd32(imem, inst + 8);
40 u32 dma2 = nv_ro32(imem, inst + 8);
41 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20); 38 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
42 u32 size = dma1 + 1; 39 u32 size = dma1 + 1;
43 40
44 /* only allow linear DMA objects */ 41 /* only allow linear DMA objects */
45 if (!(dma0 & 0x00002000)) 42 if (!(dma0 & 0x00002000))
46 return -EINVAL; 43 return false;
47 44
48 if (mthd == 0x0190) { 45 if (mthd == 0x0190) {
49 /* DMA_CMD */ 46 /* DMA_CMD */
50 nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000)); 47 nvkm_mask(device, 0x00b300, 0x00030000, (dma0 & 0x00030000));
51 nv_wr32(priv, 0x00b334, base); 48 nvkm_wr32(device, 0x00b334, base);
52 nv_wr32(priv, 0x00b324, size); 49 nvkm_wr32(device, 0x00b324, size);
53 } else 50 } else
54 if (mthd == 0x01a0) { 51 if (mthd == 0x01a0) {
55 /* DMA_DATA */ 52 /* DMA_DATA */
56 nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2); 53 nvkm_mask(device, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
57 nv_wr32(priv, 0x00b360, base); 54 nvkm_wr32(device, 0x00b360, base);
58 nv_wr32(priv, 0x00b364, size); 55 nvkm_wr32(device, 0x00b364, size);
59 } else { 56 } else {
60 /* DMA_IMAGE, VRAM only */ 57 /* DMA_IMAGE, VRAM only */
61 if (dma0 & 0x00030000) 58 if (dma0 & 0x00030000)
62 return -EINVAL; 59 return false;
63 60
64 nv_wr32(priv, 0x00b370, base); 61 nvkm_wr32(device, 0x00b370, base);
65 nv_wr32(priv, 0x00b374, size); 62 nvkm_wr32(device, 0x00b374, size);
66 } 63 }
67 64
68 return 0; 65 return true;
69} 66}
70 67
71static struct nvkm_omthds 68static const struct nv31_mpeg_func
72nv40_mpeg_omthds[] = { 69nv40_mpeg = {
73 { 0x0190, 0x0190, nv40_mpeg_mthd_dma }, 70 .mthd_dma = nv40_mpeg_mthd_dma,
74 { 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
75 { 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
76 {}
77}; 71};
78 72
79struct nvkm_oclass 73int
80nv40_mpeg_sclass[] = { 74nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
81 { 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
82 {}
83};
84
85/*******************************************************************************
86 * PMPEG engine/subdev functions
87 ******************************************************************************/
88
89static void
90nv40_mpeg_intr(struct nvkm_subdev *subdev)
91{ 75{
92 struct nv31_mpeg_priv *priv = (void *)subdev; 76 return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg);
93 u32 stat;
94
95 if ((stat = nv_rd32(priv, 0x00b100)))
96 nv31_mpeg_intr(subdev);
97
98 if ((stat = nv_rd32(priv, 0x00b800))) {
99 nv_error(priv, "PMSRCH 0x%08x\n", stat);
100 nv_wr32(priv, 0x00b800, stat);
101 }
102} 77}
103
104static int
105nv40_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
106 struct nvkm_oclass *oclass, void *data, u32 size,
107 struct nvkm_object **pobject)
108{
109 struct nv31_mpeg_priv *priv;
110 int ret;
111
112 ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
113 *pobject = nv_object(priv);
114 if (ret)
115 return ret;
116
117 nv_subdev(priv)->unit = 0x00000002;
118 nv_subdev(priv)->intr = nv40_mpeg_intr;
119 nv_engine(priv)->cclass = &nv31_mpeg_cclass;
120 nv_engine(priv)->sclass = nv40_mpeg_sclass;
121 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
122 return 0;
123}
124
125struct nvkm_oclass
126nv40_mpeg_oclass = {
127 .handle = NV_ENGINE(MPEG, 0x40),
128 .ofuncs = &(struct nvkm_ofuncs) {
129 .ctor = nv40_mpeg_ctor,
130 .dtor = _nvkm_mpeg_dtor,
131 .init = nv31_mpeg_init,
132 .fini = _nvkm_mpeg_fini,
133 },
134};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index 4720ac884468..d433cfa4a8ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -21,165 +21,197 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/mpeg.h> 24#define nv44_mpeg(p) container_of((p), struct nv44_mpeg, engine)
25#include "priv.h"
25 26
26#include <core/client.h> 27#include <core/client.h>
27#include <core/handle.h> 28#include <core/gpuobj.h>
28#include <engine/fifo.h> 29#include <engine/fifo.h>
29 30
30struct nv44_mpeg_priv { 31#include <nvif/class.h>
31 struct nvkm_mpeg base;
32};
33 32
34struct nv44_mpeg_chan { 33struct nv44_mpeg {
35 struct nvkm_mpeg_chan base; 34 struct nvkm_engine engine;
35 struct list_head chan;
36}; 36};
37 37
38/******************************************************************************* 38/*******************************************************************************
39 * PMPEG context 39 * PMPEG context
40 ******************************************************************************/ 40 ******************************************************************************/
41#define nv44_mpeg_chan(p) container_of((p), struct nv44_mpeg_chan, object)
42
43struct nv44_mpeg_chan {
44 struct nvkm_object object;
45 struct nv44_mpeg *mpeg;
46 struct nvkm_fifo_chan *fifo;
47 struct list_head head;
48 u32 inst;
49};
41 50
42static int 51static int
43nv44_mpeg_context_ctor(struct nvkm_object *parent, 52nv44_mpeg_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
44 struct nvkm_object *engine, 53 int align, struct nvkm_gpuobj **pgpuobj)
45 struct nvkm_oclass *oclass, void *data, u32 size,
46 struct nvkm_object **pobject)
47{ 54{
48 struct nv44_mpeg_chan *chan; 55 struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
49 int ret; 56 int ret = nvkm_gpuobj_new(chan->object.engine->subdev.device, 264 * 4,
50 57 align, true, parent, pgpuobj);
51 ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 264 * 4, 58 if (ret == 0) {
52 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); 59 chan->inst = (*pgpuobj)->addr;
53 *pobject = nv_object(chan); 60 nvkm_kmap(*pgpuobj);
54 if (ret) 61 nvkm_wo32(*pgpuobj, 0x78, 0x02001ec1);
55 return ret; 62 nvkm_done(*pgpuobj);
56 63 }
57 nv_wo32(&chan->base.base, 0x78, 0x02001ec1); 64 return ret;
58 return 0;
59} 65}
60 66
61static int 67static int
62nv44_mpeg_context_fini(struct nvkm_object *object, bool suspend) 68nv44_mpeg_chan_fini(struct nvkm_object *object, bool suspend)
63{ 69{
64 70
65 struct nv44_mpeg_priv *priv = (void *)object->engine; 71 struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
66 struct nv44_mpeg_chan *chan = (void *)object; 72 struct nv44_mpeg *mpeg = chan->mpeg;
67 u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4; 73 struct nvkm_device *device = mpeg->engine.subdev.device;
74 u32 inst = 0x80000000 | (chan->inst >> 4);
68 75
69 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000); 76 nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000000);
70 if (nv_rd32(priv, 0x00b318) == inst) 77 if (nvkm_rd32(device, 0x00b318) == inst)
71 nv_mask(priv, 0x00b318, 0x80000000, 0x00000000); 78 nvkm_mask(device, 0x00b318, 0x80000000, 0x00000000);
72 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); 79 nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
73 return 0; 80 return 0;
74} 81}
75 82
76static struct nvkm_oclass 83static void *
77nv44_mpeg_cclass = { 84nv44_mpeg_chan_dtor(struct nvkm_object *object)
78 .handle = NV_ENGCTX(MPEG, 0x44), 85{
79 .ofuncs = &(struct nvkm_ofuncs) { 86 struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
80 .ctor = nv44_mpeg_context_ctor, 87 struct nv44_mpeg *mpeg = chan->mpeg;
81 .dtor = _nvkm_mpeg_context_dtor, 88 unsigned long flags;
82 .init = _nvkm_mpeg_context_init, 89 spin_lock_irqsave(&mpeg->engine.lock, flags);
83 .fini = nv44_mpeg_context_fini, 90 list_del(&chan->head);
84 .rd32 = _nvkm_mpeg_context_rd32, 91 spin_unlock_irqrestore(&mpeg->engine.lock, flags);
85 .wr32 = _nvkm_mpeg_context_wr32, 92 return chan;
86 }, 93}
94
95static const struct nvkm_object_func
96nv44_mpeg_chan = {
97 .dtor = nv44_mpeg_chan_dtor,
98 .fini = nv44_mpeg_chan_fini,
99 .bind = nv44_mpeg_chan_bind,
87}; 100};
88 101
102static int
103nv44_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
104 const struct nvkm_oclass *oclass,
105 struct nvkm_object **pobject)
106{
107 struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine);
108 struct nv44_mpeg_chan *chan;
109 unsigned long flags;
110
111 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
112 return -ENOMEM;
113 nvkm_object_ctor(&nv44_mpeg_chan, oclass, &chan->object);
114 chan->mpeg = mpeg;
115 chan->fifo = fifoch;
116 *pobject = &chan->object;
117
118 spin_lock_irqsave(&mpeg->engine.lock, flags);
119 list_add(&chan->head, &mpeg->chan);
120 spin_unlock_irqrestore(&mpeg->engine.lock, flags);
121 return 0;
122}
123
89/******************************************************************************* 124/*******************************************************************************
90 * PMPEG engine/subdev functions 125 * PMPEG engine/subdev functions
91 ******************************************************************************/ 126 ******************************************************************************/
92 127
128static bool
129nv44_mpeg_mthd(struct nvkm_device *device, u32 mthd, u32 data)
130{
131 switch (mthd) {
132 case 0x190:
133 case 0x1a0:
134 case 0x1b0:
135 return nv40_mpeg_mthd_dma(device, mthd, data);
136 default:
137 break;
138 }
139 return false;
140}
141
93static void 142static void
94nv44_mpeg_intr(struct nvkm_subdev *subdev) 143nv44_mpeg_intr(struct nvkm_engine *engine)
95{ 144{
96 struct nvkm_fifo *pfifo = nvkm_fifo(subdev); 145 struct nv44_mpeg *mpeg = nv44_mpeg(engine);
97 struct nvkm_engine *engine = nv_engine(subdev); 146 struct nvkm_subdev *subdev = &mpeg->engine.subdev;
98 struct nvkm_object *engctx; 147 struct nvkm_device *device = subdev->device;
99 struct nvkm_handle *handle; 148 struct nv44_mpeg_chan *temp, *chan = NULL;
100 struct nv44_mpeg_priv *priv = (void *)subdev; 149 unsigned long flags;
101 u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff; 150 u32 inst = nvkm_rd32(device, 0x00b318) & 0x000fffff;
102 u32 stat = nv_rd32(priv, 0x00b100); 151 u32 stat = nvkm_rd32(device, 0x00b100);
103 u32 type = nv_rd32(priv, 0x00b230); 152 u32 type = nvkm_rd32(device, 0x00b230);
104 u32 mthd = nv_rd32(priv, 0x00b234); 153 u32 mthd = nvkm_rd32(device, 0x00b234);
105 u32 data = nv_rd32(priv, 0x00b238); 154 u32 data = nvkm_rd32(device, 0x00b238);
106 u32 show = stat; 155 u32 show = stat;
107 int chid;
108 156
109 engctx = nvkm_engctx_get(engine, inst); 157 spin_lock_irqsave(&mpeg->engine.lock, flags);
110 chid = pfifo->chid(pfifo, engctx); 158 list_for_each_entry(temp, &mpeg->chan, head) {
159 if (temp->inst >> 4 == inst) {
160 chan = temp;
161 list_del(&chan->head);
162 list_add(&chan->head, &mpeg->chan);
163 break;
164 }
165 }
111 166
112 if (stat & 0x01000000) { 167 if (stat & 0x01000000) {
113 /* happens on initial binding of the object */ 168 /* happens on initial binding of the object */
114 if (type == 0x00000020 && mthd == 0x0000) { 169 if (type == 0x00000020 && mthd == 0x0000) {
115 nv_mask(priv, 0x00b308, 0x00000000, 0x00000000); 170 nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
116 show &= ~0x01000000; 171 show &= ~0x01000000;
117 } 172 }
118 173
119 if (type == 0x00000010) { 174 if (type == 0x00000010) {
120 handle = nvkm_handle_get_class(engctx, 0x3174); 175 if (!nv44_mpeg_mthd(subdev->device, mthd, data))
121 if (handle && !nv_call(handle->object, mthd, data))
122 show &= ~0x01000000; 176 show &= ~0x01000000;
123 nvkm_handle_put(handle);
124 } 177 }
125 } 178 }
126 179
127 nv_wr32(priv, 0x00b100, stat); 180 nvkm_wr32(device, 0x00b100, stat);
128 nv_wr32(priv, 0x00b230, 0x00000001); 181 nvkm_wr32(device, 0x00b230, 0x00000001);
129 182
130 if (show) { 183 if (show) {
131 nv_error(priv, 184 nvkm_error(subdev, "ch %d [%08x %s] %08x %08x %08x %08x\n",
132 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n", 185 chan ? chan->fifo->chid : -1, inst << 4,
133 chid, inst << 4, nvkm_client_name(engctx), stat, 186 chan ? chan->object.client->name : "unknown",
134 type, mthd, data); 187 stat, type, mthd, data);
135 } 188 }
136 189
137 nvkm_engctx_put(engctx); 190 spin_unlock_irqrestore(&mpeg->engine.lock, flags);
138} 191}
139 192
140static void 193static const struct nvkm_engine_func
141nv44_mpeg_me_intr(struct nvkm_subdev *subdev) 194nv44_mpeg = {
142{ 195 .init = nv31_mpeg_init,
143 struct nv44_mpeg_priv *priv = (void *)subdev; 196 .intr = nv44_mpeg_intr,
144 u32 stat; 197 .tile = nv31_mpeg_tile,
145 198 .fifo.cclass = nv44_mpeg_chan_new,
146 if ((stat = nv_rd32(priv, 0x00b100))) 199 .sclass = {
147 nv44_mpeg_intr(subdev); 200 { -1, -1, NV31_MPEG, &nv31_mpeg_object },
148 201 {}
149 if ((stat = nv_rd32(priv, 0x00b800))) {
150 nv_error(priv, "PMSRCH 0x%08x\n", stat);
151 nv_wr32(priv, 0x00b800, stat);
152 } 202 }
153} 203};
154 204
155static int 205int
156nv44_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 206nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
157 struct nvkm_oclass *oclass, void *data, u32 size,
158 struct nvkm_object **pobject)
159{ 207{
160 struct nv44_mpeg_priv *priv; 208 struct nv44_mpeg *mpeg;
161 int ret;
162
163 ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
164 *pobject = nv_object(priv);
165 if (ret)
166 return ret;
167
168 nv_subdev(priv)->unit = 0x00000002;
169 nv_subdev(priv)->intr = nv44_mpeg_me_intr;
170 nv_engine(priv)->cclass = &nv44_mpeg_cclass;
171 nv_engine(priv)->sclass = nv40_mpeg_sclass;
172 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
173 return 0;
174}
175 209
176struct nvkm_oclass 210 if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
177nv44_mpeg_oclass = { 211 return -ENOMEM;
178 .handle = NV_ENGINE(MPEG, 0x44), 212 INIT_LIST_HEAD(&mpeg->chan);
179 .ofuncs = &(struct nvkm_ofuncs) { 213 *pmpeg = &mpeg->engine;
180 .ctor = nv44_mpeg_ctor, 214
181 .dtor = _nvkm_mpeg_dtor, 215 return nvkm_engine_ctor(&nv44_mpeg, device, index, 0x00000002,
182 .init = nv31_mpeg_init, 216 true, &mpeg->engine);
183 .fini = _nvkm_mpeg_fini, 217}
184 },
185};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index b3463f3739ce..c3a85dffc782 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -21,98 +21,35 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/mpeg.h> 24#include "priv.h"
25 25
26#include <subdev/bar.h> 26#include <core/gpuobj.h>
27#include <subdev/timer.h> 27#include <subdev/timer.h>
28 28
29struct nv50_mpeg_priv { 29#include <nvif/class.h>
30 struct nvkm_mpeg base;
31};
32
33struct nv50_mpeg_chan {
34 struct nvkm_mpeg_chan base;
35};
36
37/*******************************************************************************
38 * MPEG object classes
39 ******************************************************************************/
40
41static int
42nv50_mpeg_object_ctor(struct nvkm_object *parent,
43 struct nvkm_object *engine,
44 struct nvkm_oclass *oclass, void *data, u32 size,
45 struct nvkm_object **pobject)
46{
47 struct nvkm_gpuobj *obj;
48 int ret;
49
50 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
51 16, 16, 0, &obj);
52 *pobject = nv_object(obj);
53 if (ret)
54 return ret;
55
56 nv_wo32(obj, 0x00, nv_mclass(obj));
57 nv_wo32(obj, 0x04, 0x00000000);
58 nv_wo32(obj, 0x08, 0x00000000);
59 nv_wo32(obj, 0x0c, 0x00000000);
60 return 0;
61}
62
63struct nvkm_ofuncs
64nv50_mpeg_ofuncs = {
65 .ctor = nv50_mpeg_object_ctor,
66 .dtor = _nvkm_gpuobj_dtor,
67 .init = _nvkm_gpuobj_init,
68 .fini = _nvkm_gpuobj_fini,
69 .rd32 = _nvkm_gpuobj_rd32,
70 .wr32 = _nvkm_gpuobj_wr32,
71};
72
73static struct nvkm_oclass
74nv50_mpeg_sclass[] = {
75 { 0x3174, &nv50_mpeg_ofuncs },
76 {}
77};
78 30
79/******************************************************************************* 31/*******************************************************************************
80 * PMPEG context 32 * PMPEG context
81 ******************************************************************************/ 33 ******************************************************************************/
82 34
83int 35static int
84nv50_mpeg_context_ctor(struct nvkm_object *parent, 36nv50_mpeg_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
85 struct nvkm_object *engine, 37 int align, struct nvkm_gpuobj **pgpuobj)
86 struct nvkm_oclass *oclass, void *data, u32 size,
87 struct nvkm_object **pobject)
88{ 38{
89 struct nvkm_bar *bar = nvkm_bar(parent); 39 int ret = nvkm_gpuobj_new(object->engine->subdev.device, 128 * 4,
90 struct nv50_mpeg_chan *chan; 40 align, true, parent, pgpuobj);
91 int ret; 41 if (ret == 0) {
92 42 nvkm_kmap(*pgpuobj);
93 ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4, 43 nvkm_wo32(*pgpuobj, 0x70, 0x00801ec1);
94 0, NVOBJ_FLAG_ZERO_ALLOC, &chan); 44 nvkm_wo32(*pgpuobj, 0x7c, 0x0000037c);
95 *pobject = nv_object(chan); 45 nvkm_done(*pgpuobj);
96 if (ret) 46 }
97 return ret; 47 return ret;
98
99 nv_wo32(chan, 0x0070, 0x00801ec1);
100 nv_wo32(chan, 0x007c, 0x0000037c);
101 bar->flush(bar);
102 return 0;
103} 48}
104 49
105static struct nvkm_oclass 50const struct nvkm_object_func
106nv50_mpeg_cclass = { 51nv50_mpeg_cclass = {
107 .handle = NV_ENGCTX(MPEG, 0x50), 52 .bind = nv50_mpeg_cclass_bind,
108 .ofuncs = &(struct nvkm_ofuncs) {
109 .ctor = nv50_mpeg_context_ctor,
110 .dtor = _nvkm_mpeg_context_dtor,
111 .init = _nvkm_mpeg_context_init,
112 .fini = _nvkm_mpeg_context_fini,
113 .rd32 = _nvkm_mpeg_context_rd32,
114 .wr32 = _nvkm_mpeg_context_wr32,
115 },
116}; 53};
117 54
118/******************************************************************************* 55/*******************************************************************************
@@ -120,106 +57,79 @@ nv50_mpeg_cclass = {
120 ******************************************************************************/ 57 ******************************************************************************/
121 58
122void 59void
123nv50_mpeg_intr(struct nvkm_subdev *subdev) 60nv50_mpeg_intr(struct nvkm_engine *mpeg)
124{ 61{
125 struct nv50_mpeg_priv *priv = (void *)subdev; 62 struct nvkm_subdev *subdev = &mpeg->subdev;
126 u32 stat = nv_rd32(priv, 0x00b100); 63 struct nvkm_device *device = subdev->device;
127 u32 type = nv_rd32(priv, 0x00b230); 64 u32 stat = nvkm_rd32(device, 0x00b100);
128 u32 mthd = nv_rd32(priv, 0x00b234); 65 u32 type = nvkm_rd32(device, 0x00b230);
129 u32 data = nv_rd32(priv, 0x00b238); 66 u32 mthd = nvkm_rd32(device, 0x00b234);
67 u32 data = nvkm_rd32(device, 0x00b238);
130 u32 show = stat; 68 u32 show = stat;
131 69
132 if (stat & 0x01000000) { 70 if (stat & 0x01000000) {
133 /* happens on initial binding of the object */ 71 /* happens on initial binding of the object */
134 if (type == 0x00000020 && mthd == 0x0000) { 72 if (type == 0x00000020 && mthd == 0x0000) {
135 nv_wr32(priv, 0x00b308, 0x00000100); 73 nvkm_wr32(device, 0x00b308, 0x00000100);
136 show &= ~0x01000000; 74 show &= ~0x01000000;
137 } 75 }
138 } 76 }
139 77
140 if (show) { 78 if (show) {
141 nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n", 79 nvkm_info(subdev, "%08x %08x %08x %08x\n",
142 stat, type, mthd, data); 80 stat, type, mthd, data);
143 }
144
145 nv_wr32(priv, 0x00b100, stat);
146 nv_wr32(priv, 0x00b230, 0x00000001);
147}
148
149static void
150nv50_vpe_intr(struct nvkm_subdev *subdev)
151{
152 struct nv50_mpeg_priv *priv = (void *)subdev;
153
154 if (nv_rd32(priv, 0x00b100))
155 nv50_mpeg_intr(subdev);
156
157 if (nv_rd32(priv, 0x00b800)) {
158 u32 stat = nv_rd32(priv, 0x00b800);
159 nv_info(priv, "PMSRCH: 0x%08x\n", stat);
160 nv_wr32(priv, 0xb800, stat);
161 } 81 }
162}
163 82
164static int 83 nvkm_wr32(device, 0x00b100, stat);
165nv50_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 84 nvkm_wr32(device, 0x00b230, 0x00000001);
166 struct nvkm_oclass *oclass, void *data, u32 size,
167 struct nvkm_object **pobject)
168{
169 struct nv50_mpeg_priv *priv;
170 int ret;
171
172 ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
173 *pobject = nv_object(priv);
174 if (ret)
175 return ret;
176
177 nv_subdev(priv)->unit = 0x00400002;
178 nv_subdev(priv)->intr = nv50_vpe_intr;
179 nv_engine(priv)->cclass = &nv50_mpeg_cclass;
180 nv_engine(priv)->sclass = nv50_mpeg_sclass;
181 return 0;
182} 85}
183 86
184int 87int
185nv50_mpeg_init(struct nvkm_object *object) 88nv50_mpeg_init(struct nvkm_engine *mpeg)
186{ 89{
187 struct nv50_mpeg_priv *priv = (void *)object; 90 struct nvkm_subdev *subdev = &mpeg->subdev;
188 int ret; 91 struct nvkm_device *device = subdev->device;
189 92
190 ret = nvkm_mpeg_init(&priv->base); 93 nvkm_wr32(device, 0x00b32c, 0x00000000);
191 if (ret) 94 nvkm_wr32(device, 0x00b314, 0x00000100);
192 return ret; 95 nvkm_wr32(device, 0x00b0e0, 0x0000001a);
193 96
194 nv_wr32(priv, 0x00b32c, 0x00000000); 97 nvkm_wr32(device, 0x00b220, 0x00000044);
195 nv_wr32(priv, 0x00b314, 0x00000100); 98 nvkm_wr32(device, 0x00b300, 0x00801ec1);
196 nv_wr32(priv, 0x00b0e0, 0x0000001a); 99 nvkm_wr32(device, 0x00b390, 0x00000000);
197 100 nvkm_wr32(device, 0x00b394, 0x00000000);
198 nv_wr32(priv, 0x00b220, 0x00000044); 101 nvkm_wr32(device, 0x00b398, 0x00000000);
199 nv_wr32(priv, 0x00b300, 0x00801ec1); 102 nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
200 nv_wr32(priv, 0x00b390, 0x00000000); 103
201 nv_wr32(priv, 0x00b394, 0x00000000); 104 nvkm_wr32(device, 0x00b100, 0xffffffff);
202 nv_wr32(priv, 0x00b398, 0x00000000); 105 nvkm_wr32(device, 0x00b140, 0xffffffff);
203 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); 106
204 107 if (nvkm_msec(device, 2000,
205 nv_wr32(priv, 0x00b100, 0xffffffff); 108 if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
206 nv_wr32(priv, 0x00b140, 0xffffffff); 109 break;
207 110 ) < 0) {
208 if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) { 111 nvkm_error(subdev, "timeout %08x\n",
209 nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200)); 112 nvkm_rd32(device, 0x00b200));
210 return -EBUSY; 113 return -EBUSY;
211 } 114 }
212 115
213 return 0; 116 return 0;
214} 117}
215 118
216struct nvkm_oclass 119static const struct nvkm_engine_func
217nv50_mpeg_oclass = { 120nv50_mpeg = {
218 .handle = NV_ENGINE(MPEG, 0x50), 121 .init = nv50_mpeg_init,
219 .ofuncs = &(struct nvkm_ofuncs) { 122 .intr = nv50_mpeg_intr,
220 .ctor = nv50_mpeg_ctor, 123 .cclass = &nv50_mpeg_cclass,
221 .dtor = _nvkm_mpeg_dtor, 124 .sclass = {
222 .init = nv50_mpeg_init, 125 { -1, -1, NV31_MPEG, &nv31_mpeg_object },
223 .fini = _nvkm_mpeg_fini, 126 {}
224 }, 127 }
225}; 128};
129
130int
131nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
132{
133 return nvkm_engine_new_(&nv50_mpeg, device, index, 0x00400002,
134 true, pmpeg);
135}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
new file mode 100644
index 000000000000..d5753103ff63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -0,0 +1,16 @@
1#ifndef __NVKM_MPEG_PRIV_H__
2#define __NVKM_MPEG_PRIV_H__
3#include <engine/mpeg.h>
4struct nvkm_fifo_chan;
5
6int nv31_mpeg_init(struct nvkm_engine *);
7void nv31_mpeg_tile(struct nvkm_engine *, int, struct nvkm_fb_tile *);
8extern const struct nvkm_object_func nv31_mpeg_object;
9
10bool nv40_mpeg_mthd_dma(struct nvkm_device *, u32, u32);
11
12int nv50_mpeg_init(struct nvkm_engine *);
13void nv50_mpeg_intr(struct nvkm_engine *);
14
15extern const struct nvkm_object_func nv50_mpeg_cclass;
16#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
index c59c83a67315..1a7151146e9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
@@ -1,3 +1,5 @@
1nvkm-y += nvkm/engine/mspdec/base.o
1nvkm-y += nvkm/engine/mspdec/g98.o 2nvkm-y += nvkm/engine/mspdec/g98.o
3nvkm-y += nvkm/engine/mspdec/gt215.o
2nvkm-y += nvkm/engine/mspdec/gf100.o 4nvkm-y += nvkm/engine/mspdec/gf100.o
3nvkm-y += nvkm/engine/mspdec/gk104.o 5nvkm-y += nvkm/engine/mspdec/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
new file mode 100644
index 000000000000..80211f76093b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26int
27nvkm_mspdec_new_(const struct nvkm_falcon_func *func,
28 struct nvkm_device *device, int index,
29 struct nvkm_engine **pengine)
30{
31 return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine);
32}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index 2174577793a4..1f1a99e927b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -21,89 +21,31 @@
21 * 21 *
22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin 22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */ 23 */
24#include <engine/mspdec.h> 24#include "priv.h"
25#include <engine/falcon.h>
26 25
27struct g98_mspdec_priv { 26#include <nvif/class.h>
28 struct nvkm_falcon base;
29};
30
31/*******************************************************************************
32 * MSPDEC object classes
33 ******************************************************************************/
34
35static struct nvkm_oclass
36g98_mspdec_sclass[] = {
37 { 0x88b2, &nvkm_object_ofuncs },
38 { 0x85b2, &nvkm_object_ofuncs },
39 {},
40};
41
42/*******************************************************************************
43 * PMSPDEC context
44 ******************************************************************************/
45
46static struct nvkm_oclass
47g98_mspdec_cclass = {
48 .handle = NV_ENGCTX(MSPDEC, 0x98),
49 .ofuncs = &(struct nvkm_ofuncs) {
50 .ctor = _nvkm_falcon_context_ctor,
51 .dtor = _nvkm_falcon_context_dtor,
52 .init = _nvkm_falcon_context_init,
53 .fini = _nvkm_falcon_context_fini,
54 .rd32 = _nvkm_falcon_context_rd32,
55 .wr32 = _nvkm_falcon_context_wr32,
56 },
57};
58
59/*******************************************************************************
60 * PMSPDEC engine/subdev functions
61 ******************************************************************************/
62 27
63static int 28void
64g98_mspdec_init(struct nvkm_object *object) 29g98_mspdec_init(struct nvkm_falcon *mspdec)
65{ 30{
66 struct g98_mspdec_priv *priv = (void *)object; 31 struct nvkm_device *device = mspdec->engine.subdev.device;
67 int ret; 32 nvkm_wr32(device, 0x085010, 0x0000ffd2);
68 33 nvkm_wr32(device, 0x08501c, 0x0000fff2);
69 ret = nvkm_falcon_init(&priv->base);
70 if (ret)
71 return ret;
72
73 nv_wr32(priv, 0x085010, 0x0000ffd2);
74 nv_wr32(priv, 0x08501c, 0x0000fff2);
75 return 0;
76} 34}
77 35
78static int 36static const struct nvkm_falcon_func
79g98_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 37g98_mspdec = {
80 struct nvkm_oclass *oclass, void *data, u32 size, 38 .pmc_enable = 0x01020000,
81 struct nvkm_object **pobject) 39 .init = g98_mspdec_init,
82{ 40 .sclass = {
83 struct g98_mspdec_priv *priv; 41 { -1, -1, G98_MSPDEC },
84 int ret; 42 {}
85 43 }
86 ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true, 44};
87 "PMSPDEC", "mspdec", &priv);
88 *pobject = nv_object(priv);
89 if (ret)
90 return ret;
91 45
92 nv_subdev(priv)->unit = 0x01020000; 46int
93 nv_engine(priv)->cclass = &g98_mspdec_cclass; 47g98_mspdec_new(struct nvkm_device *device, int index,
94 nv_engine(priv)->sclass = g98_mspdec_sclass; 48 struct nvkm_engine **pengine)
95 return 0; 49{
50 return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine);
96} 51}
97
98struct nvkm_oclass
99g98_mspdec_oclass = {
100 .handle = NV_ENGINE(MSPDEC, 0x98),
101 .ofuncs = &(struct nvkm_ofuncs) {
102 .ctor = g98_mspdec_ctor,
103 .dtor = _nvkm_falcon_dtor,
104 .init = g98_mspdec_init,
105 .fini = _nvkm_falcon_fini,
106 .rd32 = _nvkm_falcon_rd32,
107 .wr32 = _nvkm_falcon_wr32,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index c814a5f65eb0..371fd6c3c663 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -21,89 +21,31 @@
21 * 21 *
22 * Authors: Maarten Lankhorst 22 * Authors: Maarten Lankhorst
23 */ 23 */
24#include <engine/mspdec.h> 24#include "priv.h"
25#include <engine/falcon.h>
26 25
27struct gf100_mspdec_priv { 26#include <nvif/class.h>
28 struct nvkm_falcon base;
29};
30
31/*******************************************************************************
32 * MSPDEC object classes
33 ******************************************************************************/
34
35static struct nvkm_oclass
36gf100_mspdec_sclass[] = {
37 { 0x90b2, &nvkm_object_ofuncs },
38 {},
39};
40
41/*******************************************************************************
42 * PMSPDEC context
43 ******************************************************************************/
44
45static struct nvkm_oclass
46gf100_mspdec_cclass = {
47 .handle = NV_ENGCTX(MSPDEC, 0xc0),
48 .ofuncs = &(struct nvkm_ofuncs) {
49 .ctor = _nvkm_falcon_context_ctor,
50 .dtor = _nvkm_falcon_context_dtor,
51 .init = _nvkm_falcon_context_init,
52 .fini = _nvkm_falcon_context_fini,
53 .rd32 = _nvkm_falcon_context_rd32,
54 .wr32 = _nvkm_falcon_context_wr32,
55 },
56};
57
58/*******************************************************************************
59 * PMSPDEC engine/subdev functions
60 ******************************************************************************/
61 27
62static int 28void
63gf100_mspdec_init(struct nvkm_object *object) 29gf100_mspdec_init(struct nvkm_falcon *mspdec)
64{ 30{
65 struct gf100_mspdec_priv *priv = (void *)object; 31 struct nvkm_device *device = mspdec->engine.subdev.device;
66 int ret; 32 nvkm_wr32(device, 0x085010, 0x0000fff2);
67 33 nvkm_wr32(device, 0x08501c, 0x0000fff2);
68 ret = nvkm_falcon_init(&priv->base);
69 if (ret)
70 return ret;
71
72 nv_wr32(priv, 0x085010, 0x0000fff2);
73 nv_wr32(priv, 0x08501c, 0x0000fff2);
74 return 0;
75} 34}
76 35
77static int 36static const struct nvkm_falcon_func
78gf100_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 37gf100_mspdec = {
79 struct nvkm_oclass *oclass, void *data, u32 size, 38 .pmc_enable = 0x00020000,
80 struct nvkm_object **pobject) 39 .init = gf100_mspdec_init,
81{ 40 .sclass = {
82 struct gf100_mspdec_priv *priv; 41 { -1, -1, GF100_MSPDEC },
83 int ret; 42 {}
84 43 }
85 ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true, 44};
86 "PMSPDEC", "mspdec", &priv);
87 *pobject = nv_object(priv);
88 if (ret)
89 return ret;
90 45
91 nv_subdev(priv)->unit = 0x00020000; 46int
92 nv_subdev(priv)->intr = nvkm_falcon_intr; 47gf100_mspdec_new(struct nvkm_device *device, int index,
93 nv_engine(priv)->cclass = &gf100_mspdec_cclass; 48 struct nvkm_engine **pengine)
94 nv_engine(priv)->sclass = gf100_mspdec_sclass; 49{
95 return 0; 50 return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine);
96} 51}
97
98struct nvkm_oclass
99gf100_mspdec_oclass = {
100 .handle = NV_ENGINE(MSPDEC, 0xc0),
101 .ofuncs = &(struct nvkm_ofuncs) {
102 .ctor = gf100_mspdec_ctor,
103 .dtor = _nvkm_falcon_dtor,
104 .init = gf100_mspdec_init,
105 .fini = _nvkm_falcon_fini,
106 .rd32 = _nvkm_falcon_rd32,
107 .wr32 = _nvkm_falcon_wr32,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index 979920650dbd..de804a15bfd4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -21,89 +21,23 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/mspdec.h> 24#include "priv.h"
25#include <engine/falcon.h> 25
26 26#include <nvif/class.h>
27struct gk104_mspdec_priv { 27
28 struct nvkm_falcon base; 28static const struct nvkm_falcon_func
29gk104_mspdec = {
30 .pmc_enable = 0x00020000,
31 .init = gf100_mspdec_init,
32 .sclass = {
33 { -1, -1, GK104_MSPDEC },
34 {}
35 }
29}; 36};
30 37
31/******************************************************************************* 38int
32 * MSPDEC object classes 39gk104_mspdec_new(struct nvkm_device *device, int index,
33 ******************************************************************************/ 40 struct nvkm_engine **pengine)
34
35static struct nvkm_oclass
36gk104_mspdec_sclass[] = {
37 { 0x95b2, &nvkm_object_ofuncs },
38 {},
39};
40
41/*******************************************************************************
42 * PMSPDEC context
43 ******************************************************************************/
44
45static struct nvkm_oclass
46gk104_mspdec_cclass = {
47 .handle = NV_ENGCTX(MSPDEC, 0xe0),
48 .ofuncs = &(struct nvkm_ofuncs) {
49 .ctor = _nvkm_falcon_context_ctor,
50 .dtor = _nvkm_falcon_context_dtor,
51 .init = _nvkm_falcon_context_init,
52 .fini = _nvkm_falcon_context_fini,
53 .rd32 = _nvkm_falcon_context_rd32,
54 .wr32 = _nvkm_falcon_context_wr32,
55 },
56};
57
58/*******************************************************************************
59 * PMSPDEC engine/subdev functions
60 ******************************************************************************/
61
62static int
63gk104_mspdec_init(struct nvkm_object *object)
64{ 41{
65 struct gk104_mspdec_priv *priv = (void *)object; 42 return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine);
66 int ret;
67
68 ret = nvkm_falcon_init(&priv->base);
69 if (ret)
70 return ret;
71
72 nv_wr32(priv, 0x085010, 0x0000fff2);
73 nv_wr32(priv, 0x08501c, 0x0000fff2);
74 return 0;
75}
76
77static int
78gk104_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
79 struct nvkm_oclass *oclass, void *data, u32 size,
80 struct nvkm_object **pobject)
81{
82 struct gk104_mspdec_priv *priv;
83 int ret;
84
85 ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
86 "PMSPDEC", "mspdec", &priv);
87 *pobject = nv_object(priv);
88 if (ret)
89 return ret;
90
91 nv_subdev(priv)->unit = 0x00020000;
92 nv_subdev(priv)->intr = nvkm_falcon_intr;
93 nv_engine(priv)->cclass = &gk104_mspdec_cclass;
94 nv_engine(priv)->sclass = gk104_mspdec_sclass;
95 return 0;
96} 43}
97
98struct nvkm_oclass
99gk104_mspdec_oclass = {
100 .handle = NV_ENGINE(MSPDEC, 0xe0),
101 .ofuncs = &(struct nvkm_ofuncs) {
102 .ctor = gk104_mspdec_ctor,
103 .dtor = _nvkm_falcon_dtor,
104 .init = gk104_mspdec_init,
105 .fini = _nvkm_falcon_fini,
106 .rd32 = _nvkm_falcon_rd32,
107 .wr32 = _nvkm_falcon_wr32,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
new file mode 100644
index 000000000000..835631713c95
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */
24#include "priv.h"
25
26#include <nvif/class.h>
27
28static const struct nvkm_falcon_func
29gt215_mspdec = {
30 .pmc_enable = 0x01020000,
31 .init = g98_mspdec_init,
32 .sclass = {
33 { -1, -1, GT212_MSPDEC },
34 {}
35 }
36};
37
38int
39gt215_mspdec_new(struct nvkm_device *device, int index,
40 struct nvkm_engine **pengine)
41{
42 return nvkm_mspdec_new_(&gt215_mspdec, device, index, pengine);
43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
new file mode 100644
index 000000000000..d518af4bc9de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -0,0 +1,11 @@
1#ifndef __NVKM_MSPDEC_PRIV_H__
2#define __NVKM_MSPDEC_PRIV_H__
3#include <engine/mspdec.h>
4
5int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
6 int index, struct nvkm_engine **);
7
8void g98_mspdec_init(struct nvkm_falcon *);
9
10void gf100_mspdec_init(struct nvkm_falcon *);
11#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
index 4576a9eee39d..3ea7eafb408f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
@@ -1,2 +1,4 @@
1nvkm-y += nvkm/engine/msppp/base.o
1nvkm-y += nvkm/engine/msppp/g98.o 2nvkm-y += nvkm/engine/msppp/g98.o
3nvkm-y += nvkm/engine/msppp/gt215.o
2nvkm-y += nvkm/engine/msppp/gf100.o 4nvkm-y += nvkm/engine/msppp/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
new file mode 100644
index 000000000000..bfae5e60e925
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
@@ -0,0 +1,31 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26int
27nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
28 int index, struct nvkm_engine **pengine)
29{
30 return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine);
31}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index 7a602a2dec94..73f633ae2ee7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -21,89 +21,31 @@
21 * 21 *
22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin 22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */ 23 */
24#include <engine/msppp.h> 24#include "priv.h"
25#include <engine/falcon.h>
26 25
27struct g98_msppp_priv { 26#include <nvif/class.h>
28 struct nvkm_falcon base;
29};
30
31/*******************************************************************************
32 * MSPPP object classes
33 ******************************************************************************/
34
35static struct nvkm_oclass
36g98_msppp_sclass[] = {
37 { 0x88b3, &nvkm_object_ofuncs },
38 { 0x85b3, &nvkm_object_ofuncs },
39 {},
40};
41
42/*******************************************************************************
43 * PMSPPP context
44 ******************************************************************************/
45
46static struct nvkm_oclass
47g98_msppp_cclass = {
48 .handle = NV_ENGCTX(MSPPP, 0x98),
49 .ofuncs = &(struct nvkm_ofuncs) {
50 .ctor = _nvkm_falcon_context_ctor,
51 .dtor = _nvkm_falcon_context_dtor,
52 .init = _nvkm_falcon_context_init,
53 .fini = _nvkm_falcon_context_fini,
54 .rd32 = _nvkm_falcon_context_rd32,
55 .wr32 = _nvkm_falcon_context_wr32,
56 },
57};
58
59/*******************************************************************************
60 * PMSPPP engine/subdev functions
61 ******************************************************************************/
62 27
63static int 28void
64g98_msppp_init(struct nvkm_object *object) 29g98_msppp_init(struct nvkm_falcon *msppp)
65{ 30{
66 struct g98_msppp_priv *priv = (void *)object; 31 struct nvkm_device *device = msppp->engine.subdev.device;
67 int ret; 32 nvkm_wr32(device, 0x086010, 0x0000ffd2);
68 33 nvkm_wr32(device, 0x08601c, 0x0000fff2);
69 ret = nvkm_falcon_init(&priv->base);
70 if (ret)
71 return ret;
72
73 nv_wr32(priv, 0x086010, 0x0000ffd2);
74 nv_wr32(priv, 0x08601c, 0x0000fff2);
75 return 0;
76} 34}
77 35
78static int 36static const struct nvkm_falcon_func
79g98_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 37g98_msppp = {
80 struct nvkm_oclass *oclass, void *data, u32 size, 38 .pmc_enable = 0x00400002,
81 struct nvkm_object **pobject) 39 .init = g98_msppp_init,
82{ 40 .sclass = {
83 struct g98_msppp_priv *priv; 41 { -1, -1, G98_MSPPP },
84 int ret; 42 {}
85 43 }
86 ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true, 44};
87 "PMSPPP", "msppp", &priv);
88 *pobject = nv_object(priv);
89 if (ret)
90 return ret;
91 45
92 nv_subdev(priv)->unit = 0x00400002; 46int
93 nv_engine(priv)->cclass = &g98_msppp_cclass; 47g98_msppp_new(struct nvkm_device *device, int index,
94 nv_engine(priv)->sclass = g98_msppp_sclass; 48 struct nvkm_engine **pengine)
95 return 0; 49{
50 return nvkm_msppp_new_(&g98_msppp, device, index, pengine);
96} 51}
97
98struct nvkm_oclass
99g98_msppp_oclass = {
100 .handle = NV_ENGINE(MSPPP, 0x98),
101 .ofuncs = &(struct nvkm_ofuncs) {
102 .ctor = g98_msppp_ctor,
103 .dtor = _nvkm_falcon_dtor,
104 .init = g98_msppp_init,
105 .fini = _nvkm_falcon_fini,
106 .rd32 = _nvkm_falcon_rd32,
107 .wr32 = _nvkm_falcon_wr32,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 6047baee1f75..c42c0c07e2db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -21,89 +21,31 @@
21 * 21 *
22 * Authors: Maarten Lankhorst 22 * Authors: Maarten Lankhorst
23 */ 23 */
24#include <engine/msppp.h> 24#include "priv.h"
25#include <engine/falcon.h>
26 25
27struct gf100_msppp_priv { 26#include <nvif/class.h>
28 struct nvkm_falcon base;
29};
30
31/*******************************************************************************
32 * MSPPP object classes
33 ******************************************************************************/
34
35static struct nvkm_oclass
36gf100_msppp_sclass[] = {
37 { 0x90b3, &nvkm_object_ofuncs },
38 {},
39};
40
41/*******************************************************************************
42 * PMSPPP context
43 ******************************************************************************/
44
45static struct nvkm_oclass
46gf100_msppp_cclass = {
47 .handle = NV_ENGCTX(MSPPP, 0xc0),
48 .ofuncs = &(struct nvkm_ofuncs) {
49 .ctor = _nvkm_falcon_context_ctor,
50 .dtor = _nvkm_falcon_context_dtor,
51 .init = _nvkm_falcon_context_init,
52 .fini = _nvkm_falcon_context_fini,
53 .rd32 = _nvkm_falcon_context_rd32,
54 .wr32 = _nvkm_falcon_context_wr32,
55 },
56};
57
58/*******************************************************************************
59 * PMSPPP engine/subdev functions
60 ******************************************************************************/
61 27
62static int 28static void
63gf100_msppp_init(struct nvkm_object *object) 29gf100_msppp_init(struct nvkm_falcon *msppp)
64{ 30{
65 struct gf100_msppp_priv *priv = (void *)object; 31 struct nvkm_device *device = msppp->engine.subdev.device;
66 int ret; 32 nvkm_wr32(device, 0x086010, 0x0000fff2);
67 33 nvkm_wr32(device, 0x08601c, 0x0000fff2);
68 ret = nvkm_falcon_init(&priv->base);
69 if (ret)
70 return ret;
71
72 nv_wr32(priv, 0x086010, 0x0000fff2);
73 nv_wr32(priv, 0x08601c, 0x0000fff2);
74 return 0;
75} 34}
76 35
77static int 36static const struct nvkm_falcon_func
78gf100_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 37gf100_msppp = {
79 struct nvkm_oclass *oclass, void *data, u32 size, 38 .pmc_enable = 0x00000002,
80 struct nvkm_object **pobject) 39 .init = gf100_msppp_init,
81{ 40 .sclass = {
82 struct gf100_msppp_priv *priv; 41 { -1, -1, GF100_MSPPP },
83 int ret; 42 {}
84 43 }
85 ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true, 44};
86 "PMSPPP", "msppp", &priv);
87 *pobject = nv_object(priv);
88 if (ret)
89 return ret;
90 45
91 nv_subdev(priv)->unit = 0x00000002; 46int
92 nv_subdev(priv)->intr = nvkm_falcon_intr; 47gf100_msppp_new(struct nvkm_device *device, int index,
93 nv_engine(priv)->cclass = &gf100_msppp_cclass; 48 struct nvkm_engine **pengine)
94 nv_engine(priv)->sclass = gf100_msppp_sclass; 49{
95 return 0; 50 return nvkm_msppp_new_(&gf100_msppp, device, index, pengine);
96} 51}
97
98struct nvkm_oclass
99gf100_msppp_oclass = {
100 .handle = NV_ENGINE(MSPPP, 0xc0),
101 .ofuncs = &(struct nvkm_ofuncs) {
102 .ctor = gf100_msppp_ctor,
103 .dtor = _nvkm_falcon_dtor,
104 .init = gf100_msppp_init,
105 .fini = _nvkm_falcon_fini,
106 .rd32 = _nvkm_falcon_rd32,
107 .wr32 = _nvkm_falcon_wr32,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
new file mode 100644
index 000000000000..00e7795f1d51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */
24#include "priv.h"
25
26#include <nvif/class.h>
27
28static const struct nvkm_falcon_func
29gt215_msppp = {
30 .pmc_enable = 0x00400002,
31 .init = g98_msppp_init,
32 .sclass = {
33 { -1, -1, GT212_MSPPP },
34 {}
35 }
36};
37
38int
39gt215_msppp_new(struct nvkm_device *device, int index,
40 struct nvkm_engine **pengine)
41{
42 return nvkm_msppp_new_(&gt215_msppp, device, index, pengine);
43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
new file mode 100644
index 000000000000..37a91f9d9181
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -0,0 +1,9 @@
1#ifndef __NVKM_MSPPP_PRIV_H__
2#define __NVKM_MSPPP_PRIV_H__
3#include <engine/msppp.h>
4
5int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
6 int index, struct nvkm_engine **);
7
8void g98_msppp_init(struct nvkm_falcon *);
9#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
index 0c9811009e28..28c8ecd27b6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
@@ -1,3 +1,6 @@
1nvkm-y += nvkm/engine/msvld/base.o
1nvkm-y += nvkm/engine/msvld/g98.o 2nvkm-y += nvkm/engine/msvld/g98.o
3nvkm-y += nvkm/engine/msvld/gt215.o
4nvkm-y += nvkm/engine/msvld/mcp89.o
2nvkm-y += nvkm/engine/msvld/gf100.o 5nvkm-y += nvkm/engine/msvld/gf100.o
3nvkm-y += nvkm/engine/msvld/gk104.o 6nvkm-y += nvkm/engine/msvld/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
new file mode 100644
index 000000000000..745bbb653dc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
@@ -0,0 +1,31 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26int
27nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
28 int index, struct nvkm_engine **pengine)
29{
30 return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
31}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index c8a6b4ef52a1..47e2929bfaf0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -21,90 +21,31 @@
21 * 21 *
22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin 22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */ 23 */
24#include <engine/msvld.h> 24#include "priv.h"
25#include <engine/falcon.h>
26 25
27struct g98_msvld_priv { 26#include <nvif/class.h>
28 struct nvkm_falcon base;
29};
30
31/*******************************************************************************
32 * MSVLD object classes
33 ******************************************************************************/
34
35static struct nvkm_oclass
36g98_msvld_sclass[] = {
37 { 0x88b1, &nvkm_object_ofuncs },
38 { 0x85b1, &nvkm_object_ofuncs },
39 { 0x86b1, &nvkm_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PMSVLD context
45 ******************************************************************************/
46
47static struct nvkm_oclass
48g98_msvld_cclass = {
49 .handle = NV_ENGCTX(MSVLD, 0x98),
50 .ofuncs = &(struct nvkm_ofuncs) {
51 .ctor = _nvkm_falcon_context_ctor,
52 .dtor = _nvkm_falcon_context_dtor,
53 .init = _nvkm_falcon_context_init,
54 .fini = _nvkm_falcon_context_fini,
55 .rd32 = _nvkm_falcon_context_rd32,
56 .wr32 = _nvkm_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PMSVLD engine/subdev functions
62 ******************************************************************************/
63 27
64static int 28void
65g98_msvld_init(struct nvkm_object *object) 29g98_msvld_init(struct nvkm_falcon *msvld)
66{ 30{
67 struct g98_msvld_priv *priv = (void *)object; 31 struct nvkm_device *device = msvld->engine.subdev.device;
68 int ret; 32 nvkm_wr32(device, 0x084010, 0x0000ffd2);
69 33 nvkm_wr32(device, 0x08401c, 0x0000fff2);
70 ret = nvkm_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x084010, 0x0000ffd2);
75 nv_wr32(priv, 0x08401c, 0x0000fff2);
76 return 0;
77} 34}
78 35
79static int 36static const struct nvkm_falcon_func
80g98_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 37g98_msvld = {
81 struct nvkm_oclass *oclass, void *data, u32 size, 38 .pmc_enable = 0x04008000,
82 struct nvkm_object **pobject) 39 .init = g98_msvld_init,
83{ 40 .sclass = {
84 struct g98_msvld_priv *priv; 41 { -1, -1, G98_MSVLD },
85 int ret; 42 {}
86 43 }
87 ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true, 44};
88 "PMSVLD", "msvld", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92 45
93 nv_subdev(priv)->unit = 0x04008000; 46int
94 nv_engine(priv)->cclass = &g98_msvld_cclass; 47g98_msvld_new(struct nvkm_device *device, int index,
95 nv_engine(priv)->sclass = g98_msvld_sclass; 48 struct nvkm_engine **pengine)
96 return 0; 49{
50 return nvkm_msvld_new_(&g98_msvld, device, index, pengine);
97} 51}
98
99struct nvkm_oclass
100g98_msvld_oclass = {
101 .handle = NV_ENGINE(MSVLD, 0x98),
102 .ofuncs = &(struct nvkm_ofuncs) {
103 .ctor = g98_msvld_ctor,
104 .dtor = _nvkm_falcon_dtor,
105 .init = g98_msvld_init,
106 .fini = _nvkm_falcon_fini,
107 .rd32 = _nvkm_falcon_rd32,
108 .wr32 = _nvkm_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index b8d1e0f521ef..1ac581ba9f96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -21,89 +21,31 @@
21 * 21 *
22 * Authors: Maarten Lankhorst 22 * Authors: Maarten Lankhorst
23 */ 23 */
24#include <engine/msvld.h> 24#include "priv.h"
25#include <engine/falcon.h>
26 25
27struct gf100_msvld_priv { 26#include <nvif/class.h>
28 struct nvkm_falcon base;
29};
30
31/*******************************************************************************
32 * MSVLD object classes
33 ******************************************************************************/
34
35static struct nvkm_oclass
36gf100_msvld_sclass[] = {
37 { 0x90b1, &nvkm_object_ofuncs },
38 {},
39};
40
41/*******************************************************************************
42 * PMSVLD context
43 ******************************************************************************/
44
45static struct nvkm_oclass
46gf100_msvld_cclass = {
47 .handle = NV_ENGCTX(MSVLD, 0xc0),
48 .ofuncs = &(struct nvkm_ofuncs) {
49 .ctor = _nvkm_falcon_context_ctor,
50 .dtor = _nvkm_falcon_context_dtor,
51 .init = _nvkm_falcon_context_init,
52 .fini = _nvkm_falcon_context_fini,
53 .rd32 = _nvkm_falcon_context_rd32,
54 .wr32 = _nvkm_falcon_context_wr32,
55 },
56};
57
58/*******************************************************************************
59 * PMSVLD engine/subdev functions
60 ******************************************************************************/
61 27
62static int 28void
63gf100_msvld_init(struct nvkm_object *object) 29gf100_msvld_init(struct nvkm_falcon *msvld)
64{ 30{
65 struct gf100_msvld_priv *priv = (void *)object; 31 struct nvkm_device *device = msvld->engine.subdev.device;
66 int ret; 32 nvkm_wr32(device, 0x084010, 0x0000fff2);
67 33 nvkm_wr32(device, 0x08401c, 0x0000fff2);
68 ret = nvkm_falcon_init(&priv->base);
69 if (ret)
70 return ret;
71
72 nv_wr32(priv, 0x084010, 0x0000fff2);
73 nv_wr32(priv, 0x08401c, 0x0000fff2);
74 return 0;
75} 34}
76 35
77static int 36static const struct nvkm_falcon_func
78gf100_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 37gf100_msvld = {
79 struct nvkm_oclass *oclass, void *data, u32 size, 38 .pmc_enable = 0x00008000,
80 struct nvkm_object **pobject) 39 .init = gf100_msvld_init,
81{ 40 .sclass = {
82 struct gf100_msvld_priv *priv; 41 { -1, -1, GF100_MSVLD },
83 int ret; 42 {}
84 43 }
85 ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true, 44};
86 "PMSVLD", "msvld", &priv);
87 *pobject = nv_object(priv);
88 if (ret)
89 return ret;
90 45
91 nv_subdev(priv)->unit = 0x00008000; 46int
92 nv_subdev(priv)->intr = nvkm_falcon_intr; 47gf100_msvld_new(struct nvkm_device *device, int index,
93 nv_engine(priv)->cclass = &gf100_msvld_cclass; 48 struct nvkm_engine **pengine)
94 nv_engine(priv)->sclass = gf100_msvld_sclass; 49{
95 return 0; 50 return nvkm_msvld_new_(&gf100_msvld, device, index, pengine);
96} 51}
97
98struct nvkm_oclass
99gf100_msvld_oclass = {
100 .handle = NV_ENGINE(MSVLD, 0xc0),
101 .ofuncs = &(struct nvkm_ofuncs) {
102 .ctor = gf100_msvld_ctor,
103 .dtor = _nvkm_falcon_dtor,
104 .init = gf100_msvld_init,
105 .fini = _nvkm_falcon_fini,
106 .rd32 = _nvkm_falcon_rd32,
107 .wr32 = _nvkm_falcon_wr32,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index a0b0927834df..4bba16e0f560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -21,89 +21,23 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/msvld.h> 24#include "priv.h"
25#include <engine/falcon.h> 25
26 26#include <nvif/class.h>
27struct gk104_msvld_priv { 27
28 struct nvkm_falcon base; 28static const struct nvkm_falcon_func
29gk104_msvld = {
30 .pmc_enable = 0x00008000,
31 .init = gf100_msvld_init,
32 .sclass = {
33 { -1, -1, GK104_MSVLD },
34 {}
35 }
29}; 36};
30 37
31/******************************************************************************* 38int
32 * MSVLD object classes 39gk104_msvld_new(struct nvkm_device *device, int index,
33 ******************************************************************************/ 40 struct nvkm_engine **pengine)
34
35static struct nvkm_oclass
36gk104_msvld_sclass[] = {
37 { 0x95b1, &nvkm_object_ofuncs },
38 {},
39};
40
41/*******************************************************************************
42 * PMSVLD context
43 ******************************************************************************/
44
45static struct nvkm_oclass
46gk104_msvld_cclass = {
47 .handle = NV_ENGCTX(MSVLD, 0xe0),
48 .ofuncs = &(struct nvkm_ofuncs) {
49 .ctor = _nvkm_falcon_context_ctor,
50 .dtor = _nvkm_falcon_context_dtor,
51 .init = _nvkm_falcon_context_init,
52 .fini = _nvkm_falcon_context_fini,
53 .rd32 = _nvkm_falcon_context_rd32,
54 .wr32 = _nvkm_falcon_context_wr32,
55 },
56};
57
58/*******************************************************************************
59 * PMSVLD engine/subdev functions
60 ******************************************************************************/
61
62static int
63gk104_msvld_init(struct nvkm_object *object)
64{ 41{
65 struct gk104_msvld_priv *priv = (void *)object; 42 return nvkm_msvld_new_(&gk104_msvld, device, index, pengine);
66 int ret;
67
68 ret = nvkm_falcon_init(&priv->base);
69 if (ret)
70 return ret;
71
72 nv_wr32(priv, 0x084010, 0x0000fff2);
73 nv_wr32(priv, 0x08401c, 0x0000fff2);
74 return 0;
75}
76
77static int
78gk104_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
79 struct nvkm_oclass *oclass, void *data, u32 size,
80 struct nvkm_object **pobject)
81{
82 struct gk104_msvld_priv *priv;
83 int ret;
84
85 ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
86 "PMSVLD", "msvld", &priv);
87 *pobject = nv_object(priv);
88 if (ret)
89 return ret;
90
91 nv_subdev(priv)->unit = 0x00008000;
92 nv_subdev(priv)->intr = nvkm_falcon_intr;
93 nv_engine(priv)->cclass = &gk104_msvld_cclass;
94 nv_engine(priv)->sclass = gk104_msvld_sclass;
95 return 0;
96} 43}
97
98struct nvkm_oclass
99gk104_msvld_oclass = {
100 .handle = NV_ENGINE(MSVLD, 0xe0),
101 .ofuncs = &(struct nvkm_ofuncs) {
102 .ctor = gk104_msvld_ctor,
103 .dtor = _nvkm_falcon_dtor,
104 .init = gk104_msvld_init,
105 .fini = _nvkm_falcon_fini,
106 .rd32 = _nvkm_falcon_rd32,
107 .wr32 = _nvkm_falcon_wr32,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
new file mode 100644
index 000000000000..e17cb5605b2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */
24#include "priv.h"
25
26#include <nvif/class.h>
27
28static const struct nvkm_falcon_func
29gt215_msvld = {
30 .pmc_enable = 0x04008000,
31 .init = g98_msvld_init,
32 .sclass = {
33 { -1, -1, GT212_MSVLD },
34 {}
35 }
36};
37
38int
39gt215_msvld_new(struct nvkm_device *device, int index,
40 struct nvkm_engine **pengine)
41{
42 return nvkm_msvld_new_(&gt215_msvld, device, index, pengine);
43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
new file mode 100644
index 000000000000..511800f6a43b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
23 */
24#include "priv.h"
25
26#include <nvif/class.h>
27
28static const struct nvkm_falcon_func
29mcp89_msvld = {
30 .pmc_enable = 0x04008000,
31 .init = g98_msvld_init,
32 .sclass = {
33 { -1, -1, IGT21A_MSVLD },
34 {}
35 }
36};
37
38int
39mcp89_msvld_new(struct nvkm_device *device, int index,
40 struct nvkm_engine **pengine)
41{
42 return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine);
43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
new file mode 100644
index 000000000000..9dc1da67d929
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -0,0 +1,11 @@
1#ifndef __NVKM_MSVLD_PRIV_H__
2#define __NVKM_MSVLD_PRIV_H__
3#include <engine/msvld.h>
4
5int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
6 int index, struct nvkm_engine **);
7
8void g98_msvld_init(struct nvkm_falcon *);
9
10void gf100_msvld_init(struct nvkm_falcon *);
11#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
index 413b6091e256..1614d385fb0c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
@@ -1,9 +1,10 @@
1nvkm-y += nvkm/engine/pm/base.o 1nvkm-y += nvkm/engine/pm/base.o
2nvkm-y += nvkm/engine/pm/daemon.o
3nvkm-y += nvkm/engine/pm/nv40.o 2nvkm-y += nvkm/engine/pm/nv40.o
4nvkm-y += nvkm/engine/pm/nv50.o 3nvkm-y += nvkm/engine/pm/nv50.o
5nvkm-y += nvkm/engine/pm/g84.o 4nvkm-y += nvkm/engine/pm/g84.o
5nvkm-y += nvkm/engine/pm/gt200.o
6nvkm-y += nvkm/engine/pm/gt215.o 6nvkm-y += nvkm/engine/pm/gt215.o
7nvkm-y += nvkm/engine/pm/gf100.o 7nvkm-y += nvkm/engine/pm/gf100.o
8nvkm-y += nvkm/engine/pm/gf108.o
9nvkm-y += nvkm/engine/pm/gf117.o
8nvkm-y += nvkm/engine/pm/gk104.o 10nvkm-y += nvkm/engine/pm/gk104.o
9nvkm-y += nvkm/engine/pm/gk110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 4cf36a3aa814..0db9be202c42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -24,370 +24,751 @@
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/client.h> 26#include <core/client.h>
27#include <core/device.h>
28#include <core/option.h> 27#include <core/option.h>
29 28
30#include <nvif/class.h> 29#include <nvif/class.h>
31#include <nvif/ioctl.h> 30#include <nvif/ioctl.h>
32#include <nvif/unpack.h> 31#include <nvif/unpack.h>
33 32
34#define QUAD_MASK 0x0f 33static u8
35#define QUAD_FREE 0x01 34nvkm_pm_count_perfdom(struct nvkm_pm *pm)
35{
36 struct nvkm_perfdom *dom;
37 u8 domain_nr = 0;
36 38
37static struct nvkm_perfsig * 39 list_for_each_entry(dom, &pm->domains, head)
38nvkm_perfsig_find_(struct nvkm_perfdom *dom, const char *name, u32 size) 40 domain_nr++;
41 return domain_nr;
42}
43
44static u16
45nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
39{ 46{
40 char path[64]; 47 u16 signal_nr = 0;
41 int i; 48 int i;
42 49
43 if (name[0] != '/') { 50 if (dom) {
44 for (i = 0; i < dom->signal_nr; i++) { 51 for (i = 0; i < dom->signal_nr; i++) {
45 if ( dom->signal[i].name && 52 if (dom->signal[i].name)
46 !strncmp(name, dom->signal[i].name, size)) 53 signal_nr++;
47 return &dom->signal[i];
48 }
49 } else {
50 for (i = 0; i < dom->signal_nr; i++) {
51 snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
52 if (!strncmp(name, path, size))
53 return &dom->signal[i];
54 } 54 }
55 } 55 }
56 return signal_nr;
57}
56 58
59static struct nvkm_perfdom *
60nvkm_perfdom_find(struct nvkm_pm *pm, int di)
61{
62 struct nvkm_perfdom *dom;
63 int tmp = 0;
64
65 list_for_each_entry(dom, &pm->domains, head) {
66 if (tmp++ == di)
67 return dom;
68 }
57 return NULL; 69 return NULL;
58} 70}
59 71
60struct nvkm_perfsig * 72struct nvkm_perfsig *
61nvkm_perfsig_find(struct nvkm_pm *ppm, const char *name, u32 size, 73nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
62 struct nvkm_perfdom **pdom)
63{ 74{
64 struct nvkm_perfdom *dom = *pdom; 75 struct nvkm_perfdom *dom = *pdom;
65 struct nvkm_perfsig *sig;
66 76
67 if (dom == NULL) { 77 if (dom == NULL) {
68 list_for_each_entry(dom, &ppm->domains, head) { 78 dom = nvkm_perfdom_find(pm, di);
69 sig = nvkm_perfsig_find_(dom, name, size); 79 if (dom == NULL)
70 if (sig) { 80 return NULL;
71 *pdom = dom; 81 *pdom = dom;
72 return sig; 82 }
73 }
74 }
75 83
84 if (!dom->signal[si].name)
76 return NULL; 85 return NULL;
77 } 86 return &dom->signal[si];
87}
78 88
79 return nvkm_perfsig_find_(dom, name, size); 89static u8
90nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
91{
92 u8 source_nr = 0, i;
93
94 for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
95 if (sig->source[i])
96 source_nr++;
97 }
98 return source_nr;
80} 99}
81 100
82struct nvkm_perfctr * 101static struct nvkm_perfsrc *
83nvkm_perfsig_wrap(struct nvkm_pm *ppm, const char *name, 102nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
84 struct nvkm_perfdom **pdom)
85{ 103{
86 struct nvkm_perfsig *sig; 104 struct nvkm_perfsrc *src;
87 struct nvkm_perfctr *ctr; 105 bool found = false;
106 int tmp = 1; /* Sources ID start from 1 */
107 u8 i;
108
109 for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
110 if (sig->source[i] == si) {
111 found = true;
112 break;
113 }
114 }
88 115
89 sig = nvkm_perfsig_find(ppm, name, strlen(name), pdom); 116 if (found) {
90 if (!sig) 117 list_for_each_entry(src, &pm->sources, head) {
91 return NULL; 118 if (tmp++ == si)
119 return src;
120 }
121 }
92 122
93 ctr = kzalloc(sizeof(*ctr), GFP_KERNEL); 123 return NULL;
94 if (ctr) { 124}
95 ctr->signal[0] = sig; 125
96 ctr->logic_op = 0xaaaa; 126static int
127nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
128{
129 struct nvkm_subdev *subdev = &pm->engine.subdev;
130 struct nvkm_device *device = subdev->device;
131 struct nvkm_perfdom *dom = NULL;
132 struct nvkm_perfsig *sig;
133 struct nvkm_perfsrc *src;
134 u32 mask, value;
135 int i, j;
136
137 for (i = 0; i < 4; i++) {
138 for (j = 0; j < 8 && ctr->source[i][j]; j++) {
139 sig = nvkm_perfsig_find(pm, ctr->domain,
140 ctr->signal[i], &dom);
141 if (!sig)
142 return -EINVAL;
143
144 src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
145 if (!src)
146 return -EINVAL;
147
148 /* set enable bit if needed */
149 mask = value = 0x00000000;
150 if (src->enable)
151 mask = value = 0x80000000;
152 mask |= (src->mask << src->shift);
153 value |= ((ctr->source[i][j] >> 32) << src->shift);
154
155 /* enable the source */
156 nvkm_mask(device, src->addr, mask, value);
157 nvkm_debug(subdev,
158 "enabled source %08x %08x %08x\n",
159 src->addr, mask, value);
160 }
97 } 161 }
162 return 0;
163}
98 164
99 return ctr; 165static int
166nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
167{
168 struct nvkm_subdev *subdev = &pm->engine.subdev;
169 struct nvkm_device *device = subdev->device;
170 struct nvkm_perfdom *dom = NULL;
171 struct nvkm_perfsig *sig;
172 struct nvkm_perfsrc *src;
173 u32 mask;
174 int i, j;
175
176 for (i = 0; i < 4; i++) {
177 for (j = 0; j < 8 && ctr->source[i][j]; j++) {
178 sig = nvkm_perfsig_find(pm, ctr->domain,
179 ctr->signal[i], &dom);
180 if (!sig)
181 return -EINVAL;
182
183 src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
184 if (!src)
185 return -EINVAL;
186
187 /* unset enable bit if needed */
188 mask = 0x00000000;
189 if (src->enable)
190 mask = 0x80000000;
191 mask |= (src->mask << src->shift);
192
193 /* disable the source */
194 nvkm_mask(device, src->addr, mask, 0);
195 nvkm_debug(subdev, "disabled source %08x %08x\n",
196 src->addr, mask);
197 }
198 }
199 return 0;
100} 200}
101 201
102/******************************************************************************* 202/*******************************************************************************
103 * Perfmon object classes 203 * Perfdom object classes
104 ******************************************************************************/ 204 ******************************************************************************/
105static int 205static int
106nvkm_perfctr_query(struct nvkm_object *object, void *data, u32 size) 206nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
107{ 207{
108 union { 208 union {
109 struct nvif_perfctr_query_v0 v0; 209 struct nvif_perfdom_init none;
110 } *args = data; 210 } *args = data;
111 struct nvkm_device *device = nv_device(object); 211 struct nvkm_object *object = &dom->object;
112 struct nvkm_pm *ppm = (void *)object->engine; 212 struct nvkm_pm *pm = dom->perfmon->pm;
113 struct nvkm_perfdom *dom = NULL, *chk; 213 int ret, i;
114 const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false); 214
115 const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all); 215 nvif_ioctl(object, "perfdom init size %d\n", size);
116 const char *name; 216 if (nvif_unvers(args->none)) {
117 int tmp = 0, di, si; 217 nvif_ioctl(object, "perfdom init\n");
218 } else
219 return ret;
220
221 for (i = 0; i < 4; i++) {
222 if (dom->ctr[i]) {
223 dom->func->init(pm, dom, dom->ctr[i]);
224
225 /* enable sources */
226 nvkm_perfsrc_enable(pm, dom->ctr[i]);
227 }
228 }
229
230 /* start next batch of counters for sampling */
231 dom->func->next(pm, dom);
232 return 0;
233}
234
235static int
236nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
237{
238 union {
239 struct nvif_perfdom_sample none;
240 } *args = data;
241 struct nvkm_object *object = &dom->object;
242 struct nvkm_pm *pm = dom->perfmon->pm;
118 int ret; 243 int ret;
119 244
120 nv_ioctl(object, "perfctr query size %d\n", size); 245 nvif_ioctl(object, "perfdom sample size %d\n", size);
246 if (nvif_unvers(args->none)) {
247 nvif_ioctl(object, "perfdom sample\n");
248 } else
249 return ret;
250 pm->sequence++;
251
252 /* sample previous batch of counters */
253 list_for_each_entry(dom, &pm->domains, head)
254 dom->func->next(pm, dom);
255
256 return 0;
257}
258
259static int
260nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
261{
262 union {
263 struct nvif_perfdom_read_v0 v0;
264 } *args = data;
265 struct nvkm_object *object = &dom->object;
266 struct nvkm_pm *pm = dom->perfmon->pm;
267 int ret, i;
268
269 nvif_ioctl(object, "perfdom read size %d\n", size);
121 if (nvif_unpack(args->v0, 0, 0, false)) { 270 if (nvif_unpack(args->v0, 0, 0, false)) {
122 nv_ioctl(object, "perfctr query vers %d iter %08x\n", 271 nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
123 args->v0.version, args->v0.iter);
124 di = (args->v0.iter & 0xff000000) >> 24;
125 si = (args->v0.iter & 0x00ffffff) - 1;
126 } else 272 } else
127 return ret; 273 return ret;
128 274
129 list_for_each_entry(chk, &ppm->domains, head) { 275 for (i = 0; i < 4; i++) {
130 if (tmp++ == di) { 276 if (dom->ctr[i])
131 dom = chk; 277 dom->func->read(pm, dom, dom->ctr[i]);
132 break;
133 }
134 } 278 }
135 279
136 if (dom == NULL || si >= (int)dom->signal_nr) 280 if (!dom->clk)
137 return -EINVAL; 281 return -EAGAIN;
138 282
139 if (si >= 0) { 283 for (i = 0; i < 4; i++)
140 if (raw || !(name = dom->signal[si].name)) { 284 if (dom->ctr[i])
141 snprintf(args->v0.name, sizeof(args->v0.name), 285 args->v0.ctr[i] = dom->ctr[i]->ctr;
142 "/%s/%02x", dom->name, si); 286 args->v0.clk = dom->clk;
143 } else { 287 return 0;
144 strncpy(args->v0.name, name, sizeof(args->v0.name)); 288}
289
290static int
291nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
292{
293 struct nvkm_perfdom *dom = nvkm_perfdom(object);
294 switch (mthd) {
295 case NVIF_PERFDOM_V0_INIT:
296 return nvkm_perfdom_init(dom, data, size);
297 case NVIF_PERFDOM_V0_SAMPLE:
298 return nvkm_perfdom_sample(dom, data, size);
299 case NVIF_PERFDOM_V0_READ:
300 return nvkm_perfdom_read(dom, data, size);
301 default:
302 break;
303 }
304 return -EINVAL;
305}
306
307static void *
308nvkm_perfdom_dtor(struct nvkm_object *object)
309{
310 struct nvkm_perfdom *dom = nvkm_perfdom(object);
311 struct nvkm_pm *pm = dom->perfmon->pm;
312 int i;
313
314 for (i = 0; i < 4; i++) {
315 struct nvkm_perfctr *ctr = dom->ctr[i];
316 if (ctr) {
317 nvkm_perfsrc_disable(pm, ctr);
318 if (ctr->head.next)
319 list_del(&ctr->head);
145 } 320 }
321 kfree(ctr);
146 } 322 }
147 323
148 do { 324 return dom;
149 while (++si < dom->signal_nr) { 325}
150 if (all || dom->signal[si].name) { 326
151 args->v0.iter = (di << 24) | ++si; 327static int
152 return 0; 328nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
153 } 329 struct nvkm_perfsig *signal[4], u64 source[4][8],
330 u16 logic_op, struct nvkm_perfctr **pctr)
331{
332 struct nvkm_perfctr *ctr;
333 int i, j;
334
335 if (!dom)
336 return -EINVAL;
337
338 ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
339 if (!ctr)
340 return -ENOMEM;
341
342 ctr->domain = domain;
343 ctr->logic_op = logic_op;
344 ctr->slot = slot;
345 for (i = 0; i < 4; i++) {
346 if (signal[i]) {
347 ctr->signal[i] = signal[i] - dom->signal;
348 for (j = 0; j < 8; j++)
349 ctr->source[i][j] = source[i][j];
154 } 350 }
155 si = -1; 351 }
156 di = di + 1; 352 list_add_tail(&ctr->head, &dom->list);
157 dom = list_entry(dom->head.next, typeof(*dom), head);
158 } while (&dom->head != &ppm->domains);
159 353
160 args->v0.iter = 0xffffffff;
161 return 0; 354 return 0;
162} 355}
163 356
357static const struct nvkm_object_func
358nvkm_perfdom = {
359 .dtor = nvkm_perfdom_dtor,
360 .mthd = nvkm_perfdom_mthd,
361};
362
164static int 363static int
165nvkm_perfctr_sample(struct nvkm_object *object, void *data, u32 size) 364nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
365 const struct nvkm_oclass *oclass, void *data, u32 size,
366 struct nvkm_object **pobject)
166{ 367{
167 union { 368 union {
168 struct nvif_perfctr_sample none; 369 struct nvif_perfdom_v0 v0;
169 } *args = data; 370 } *args = data;
170 struct nvkm_pm *ppm = (void *)object->engine; 371 struct nvkm_pm *pm = perfmon->pm;
171 struct nvkm_perfctr *ctr, *tmp; 372 struct nvkm_object *parent = oclass->parent;
373 struct nvkm_perfdom *sdom = NULL;
374 struct nvkm_perfctr *ctr[4] = {};
172 struct nvkm_perfdom *dom; 375 struct nvkm_perfdom *dom;
376 int c, s, m;
173 int ret; 377 int ret;
174 378
175 nv_ioctl(object, "perfctr sample size %d\n", size); 379 nvif_ioctl(parent, "create perfdom size %d\n", size);
176 if (nvif_unvers(args->none)) { 380 if (nvif_unpack(args->v0, 0, 0, false)) {
177 nv_ioctl(object, "perfctr sample\n"); 381 nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
382 args->v0.version, args->v0.domain, args->v0.mode);
178 } else 383 } else
179 return ret; 384 return ret;
180 ppm->sequence++;
181
182 list_for_each_entry(dom, &ppm->domains, head) {
183 /* sample previous batch of counters */
184 if (dom->quad != QUAD_MASK) {
185 dom->func->next(ppm, dom);
186 tmp = NULL;
187 while (!list_empty(&dom->list)) {
188 ctr = list_first_entry(&dom->list,
189 typeof(*ctr), head);
190 if (ctr->slot < 0) break;
191 if ( tmp && tmp == ctr) break;
192 if (!tmp) tmp = ctr;
193 dom->func->read(ppm, dom, ctr);
194 ctr->slot = -1;
195 list_move_tail(&ctr->head, &dom->list);
196 }
197 }
198 385
199 dom->quad = QUAD_MASK; 386 for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
200 387 struct nvkm_perfsig *sig[4] = {};
201 /* setup next batch of counters for sampling */ 388 u64 src[4][8] = {};
202 list_for_each_entry(ctr, &dom->list, head) { 389
203 ctr->slot = ffs(dom->quad) - 1; 390 for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
204 if (ctr->slot < 0) 391 sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
205 break; 392 args->v0.ctr[c].signal[s],
206 dom->quad &= ~(QUAD_FREE << ctr->slot); 393 &sdom);
207 dom->func->init(ppm, dom, ctr); 394 if (args->v0.ctr[c].signal[s] && !sig[s])
395 return -EINVAL;
396
397 for (m = 0; m < 8; m++) {
398 src[s][m] = args->v0.ctr[c].source[s][m];
399 if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
400 src[s][m]))
401 return -EINVAL;
402 }
208 } 403 }
209 404
210 if (dom->quad != QUAD_MASK) 405 ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
211 dom->func->next(ppm, dom); 406 args->v0.ctr[c].logic_op, &ctr[c]);
407 if (ret)
408 return ret;
212 } 409 }
213 410
411 if (!sdom)
412 return -EINVAL;
413
414 if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
415 return -ENOMEM;
416 nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
417 dom->perfmon = perfmon;
418 *pobject = &dom->object;
419
420 dom->func = sdom->func;
421 dom->addr = sdom->addr;
422 dom->mode = args->v0.mode;
423 for (c = 0; c < ARRAY_SIZE(ctr); c++)
424 dom->ctr[c] = ctr[c];
214 return 0; 425 return 0;
215} 426}
216 427
428/*******************************************************************************
429 * Perfmon object classes
430 ******************************************************************************/
217static int 431static int
218nvkm_perfctr_read(struct nvkm_object *object, void *data, u32 size) 432nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
433 void *data, u32 size)
219{ 434{
220 union { 435 union {
221 struct nvif_perfctr_read_v0 v0; 436 struct nvif_perfmon_query_domain_v0 v0;
222 } *args = data; 437 } *args = data;
223 struct nvkm_perfctr *ctr = (void *)object; 438 struct nvkm_object *object = &perfmon->object;
224 int ret; 439 struct nvkm_pm *pm = perfmon->pm;
440 struct nvkm_perfdom *dom;
441 u8 domain_nr;
442 int di, ret;
225 443
226 nv_ioctl(object, "perfctr read size %d\n", size); 444 nvif_ioctl(object, "perfmon query domain size %d\n", size);
227 if (nvif_unpack(args->v0, 0, 0, false)) { 445 if (nvif_unpack(args->v0, 0, 0, false)) {
228 nv_ioctl(object, "perfctr read vers %d\n", args->v0.version); 446 nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
447 args->v0.version, args->v0.iter);
448 di = (args->v0.iter & 0xff) - 1;
229 } else 449 } else
230 return ret; 450 return ret;
231 451
232 if (!ctr->clk) 452 domain_nr = nvkm_pm_count_perfdom(pm);
233 return -EAGAIN; 453 if (di >= (int)domain_nr)
454 return -EINVAL;
455
456 if (di >= 0) {
457 dom = nvkm_perfdom_find(pm, di);
458 if (dom == NULL)
459 return -EINVAL;
460
461 args->v0.id = di;
462 args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
463 strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
464
465 /* Currently only global counters (PCOUNTER) are implemented
466 * but this will be different for local counters (MP). */
467 args->v0.counter_nr = 4;
468 }
234 469
235 args->v0.clk = ctr->clk; 470 if (++di < domain_nr) {
236 args->v0.ctr = ctr->ctr; 471 args->v0.iter = ++di;
472 return 0;
473 }
474
475 args->v0.iter = 0xff;
237 return 0; 476 return 0;
238} 477}
239 478
240static int 479static int
241nvkm_perfctr_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 480nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
481 void *data, u32 size)
242{ 482{
243 switch (mthd) { 483 union {
244 case NVIF_PERFCTR_V0_QUERY: 484 struct nvif_perfmon_query_signal_v0 v0;
245 return nvkm_perfctr_query(object, data, size); 485 } *args = data;
246 case NVIF_PERFCTR_V0_SAMPLE: 486 struct nvkm_object *object = &perfmon->object;
247 return nvkm_perfctr_sample(object, data, size); 487 struct nvkm_pm *pm = perfmon->pm;
248 case NVIF_PERFCTR_V0_READ: 488 struct nvkm_device *device = pm->engine.subdev.device;
249 return nvkm_perfctr_read(object, data, size); 489 struct nvkm_perfdom *dom;
250 default: 490 struct nvkm_perfsig *sig;
251 break; 491 const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
492 const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
493 int ret, si;
494
495 nvif_ioctl(object, "perfmon query signal size %d\n", size);
496 if (nvif_unpack(args->v0, 0, 0, false)) {
497 nvif_ioctl(object,
498 "perfmon query signal vers %d dom %d iter %04x\n",
499 args->v0.version, args->v0.domain, args->v0.iter);
500 si = (args->v0.iter & 0xffff) - 1;
501 } else
502 return ret;
503
504 dom = nvkm_perfdom_find(pm, args->v0.domain);
505 if (dom == NULL || si >= (int)dom->signal_nr)
506 return -EINVAL;
507
508 if (si >= 0) {
509 sig = &dom->signal[si];
510 if (raw || !sig->name) {
511 snprintf(args->v0.name, sizeof(args->v0.name),
512 "/%s/%02x", dom->name, si);
513 } else {
514 strncpy(args->v0.name, sig->name,
515 sizeof(args->v0.name));
516 }
517
518 args->v0.signal = si;
519 args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
252 } 520 }
253 return -EINVAL;
254}
255 521
256static void 522 while (++si < dom->signal_nr) {
257nvkm_perfctr_dtor(struct nvkm_object *object) 523 if (all || dom->signal[si].name) {
258{ 524 args->v0.iter = ++si;
259 struct nvkm_perfctr *ctr = (void *)object; 525 return 0;
260 if (ctr->head.next) 526 }
261 list_del(&ctr->head); 527 }
262 nvkm_object_destroy(&ctr->base); 528
529 args->v0.iter = 0xffff;
530 return 0;
263} 531}
264 532
265static int 533static int
266nvkm_perfctr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 534nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
267 struct nvkm_oclass *oclass, void *data, u32 size, 535 void *data, u32 size)
268 struct nvkm_object **pobject)
269{ 536{
270 union { 537 union {
271 struct nvif_perfctr_v0 v0; 538 struct nvif_perfmon_query_source_v0 v0;
272 } *args = data; 539 } *args = data;
273 struct nvkm_pm *ppm = (void *)engine; 540 struct nvkm_object *object = &perfmon->object;
541 struct nvkm_pm *pm = perfmon->pm;
274 struct nvkm_perfdom *dom = NULL; 542 struct nvkm_perfdom *dom = NULL;
275 struct nvkm_perfsig *sig[4] = {}; 543 struct nvkm_perfsig *sig;
276 struct nvkm_perfctr *ctr; 544 struct nvkm_perfsrc *src;
277 int ret, i; 545 u8 source_nr = 0;
546 int si, ret;
278 547
279 nv_ioctl(parent, "create perfctr size %d\n", size); 548 nvif_ioctl(object, "perfmon query source size %d\n", size);
280 if (nvif_unpack(args->v0, 0, 0, false)) { 549 if (nvif_unpack(args->v0, 0, 0, false)) {
281 nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n", 550 nvif_ioctl(object,
282 args->v0.version, args->v0.logic_op); 551 "perfmon source vers %d dom %d sig %02x iter %02x\n",
552 args->v0.version, args->v0.domain, args->v0.signal,
553 args->v0.iter);
554 si = (args->v0.iter & 0xff) - 1;
283 } else 555 } else
284 return ret; 556 return ret;
285 557
286 for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) { 558 sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
287 sig[i] = nvkm_perfsig_find(ppm, args->v0.name[i], 559 if (!sig)
288 strnlen(args->v0.name[i], 560 return -EINVAL;
289 sizeof(args->v0.name[i])), 561
290 &dom); 562 source_nr = nvkm_perfsig_count_perfsrc(sig);
291 if (!sig[i]) 563 if (si >= (int)source_nr)
564 return -EINVAL;
565
566 if (si >= 0) {
567 src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
568 if (!src)
292 return -EINVAL; 569 return -EINVAL;
570
571 args->v0.source = sig->source[si];
572 args->v0.mask = src->mask;
573 strncpy(args->v0.name, src->name, sizeof(args->v0.name));
293 } 574 }
294 575
295 ret = nvkm_object_create(parent, engine, oclass, 0, &ctr); 576 if (++si < source_nr) {
296 *pobject = nv_object(ctr); 577 args->v0.iter = ++si;
297 if (ret) 578 return 0;
298 return ret; 579 }
299 580
300 ctr->slot = -1; 581 args->v0.iter = 0xff;
301 ctr->logic_op = args->v0.logic_op;
302 ctr->signal[0] = sig[0];
303 ctr->signal[1] = sig[1];
304 ctr->signal[2] = sig[2];
305 ctr->signal[3] = sig[3];
306 if (dom)
307 list_add_tail(&ctr->head, &dom->list);
308 return 0; 582 return 0;
309} 583}
310 584
311static struct nvkm_ofuncs 585static int
312nvkm_perfctr_ofuncs = { 586nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
313 .ctor = nvkm_perfctr_ctor, 587{
314 .dtor = nvkm_perfctr_dtor, 588 struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
315 .init = nvkm_object_init, 589 switch (mthd) {
316 .fini = nvkm_object_fini, 590 case NVIF_PERFMON_V0_QUERY_DOMAIN:
317 .mthd = nvkm_perfctr_mthd, 591 return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
318}; 592 case NVIF_PERFMON_V0_QUERY_SIGNAL:
593 return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
594 case NVIF_PERFMON_V0_QUERY_SOURCE:
595 return nvkm_perfmon_mthd_query_source(perfmon, data, size);
596 default:
597 break;
598 }
599 return -EINVAL;
600}
601
602static int
603nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
604 struct nvkm_object **pobject)
605{
606 struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
607 return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
608}
609
610static int
611nvkm_perfmon_child_get(struct nvkm_object *object, int index,
612 struct nvkm_oclass *oclass)
613{
614 if (index == 0) {
615 oclass->base.oclass = NVIF_IOCTL_NEW_V0_PERFDOM;
616 oclass->base.minver = 0;
617 oclass->base.maxver = 0;
618 oclass->ctor = nvkm_perfmon_child_new;
619 return 0;
620 }
621 return -EINVAL;
622}
623
624static void *
625nvkm_perfmon_dtor(struct nvkm_object *object)
626{
627 struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
628 struct nvkm_pm *pm = perfmon->pm;
629 mutex_lock(&pm->engine.subdev.mutex);
630 if (pm->perfmon == &perfmon->object)
631 pm->perfmon = NULL;
632 mutex_unlock(&pm->engine.subdev.mutex);
633 return perfmon;
634}
319 635
320struct nvkm_oclass 636static struct nvkm_object_func
321nvkm_pm_sclass[] = { 637nvkm_perfmon = {
322 { .handle = NVIF_IOCTL_NEW_V0_PERFCTR, 638 .dtor = nvkm_perfmon_dtor,
323 .ofuncs = &nvkm_perfctr_ofuncs, 639 .mthd = nvkm_perfmon_mthd,
324 }, 640 .sclass = nvkm_perfmon_child_get,
325 {},
326}; 641};
327 642
328/******************************************************************************* 643static int
329 * PPM context 644nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
330 ******************************************************************************/ 645 void *data, u32 size, struct nvkm_object **pobject)
331static void
332nvkm_perfctx_dtor(struct nvkm_object *object)
333{ 646{
334 struct nvkm_pm *ppm = (void *)object->engine; 647 struct nvkm_perfmon *perfmon;
335 struct nvkm_perfctx *ctx = (void *)object; 648
336 649 if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
337 mutex_lock(&nv_subdev(ppm)->mutex); 650 return -ENOMEM;
338 nvkm_engctx_destroy(&ctx->base); 651 nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
339 if (ppm->context == ctx) 652 perfmon->pm = pm;
340 ppm->context = NULL; 653 *pobject = &perfmon->object;
341 mutex_unlock(&nv_subdev(ppm)->mutex); 654 return 0;
342} 655}
343 656
657/*******************************************************************************
658 * PPM engine/subdev functions
659 ******************************************************************************/
660
344static int 661static int
345nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 662nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
346 struct nvkm_oclass *oclass, void *data, u32 size, 663 void *data, u32 size, struct nvkm_object **pobject)
347 struct nvkm_object **pobject)
348{ 664{
349 struct nvkm_pm *ppm = (void *)engine; 665 struct nvkm_pm *pm = nvkm_pm(oclass->engine);
350 struct nvkm_perfctx *ctx;
351 int ret; 666 int ret;
352 667
353 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0, 0, 0, &ctx); 668 ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
354 *pobject = nv_object(ctx);
355 if (ret) 669 if (ret)
356 return ret; 670 return ret;
357 671
358 mutex_lock(&nv_subdev(ppm)->mutex); 672 mutex_lock(&pm->engine.subdev.mutex);
359 if (ppm->context == NULL) 673 if (pm->perfmon == NULL)
360 ppm->context = ctx; 674 pm->perfmon = *pobject;
361 if (ctx != ppm->context) 675 ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
362 ret = -EBUSY; 676 mutex_unlock(&pm->engine.subdev.mutex);
363 mutex_unlock(&nv_subdev(ppm)->mutex);
364
365 return ret; 677 return ret;
366} 678}
367 679
368struct nvkm_oclass 680static const struct nvkm_device_oclass
369nvkm_pm_cclass = { 681nvkm_pm_oclass = {
370 .handle = NV_ENGCTX(PM, 0x00), 682 .base.oclass = NVIF_IOCTL_NEW_V0_PERFMON,
371 .ofuncs = &(struct nvkm_ofuncs) { 683 .base.minver = -1,
372 .ctor = nvkm_perfctx_ctor, 684 .base.maxver = -1,
373 .dtor = nvkm_perfctx_dtor, 685 .ctor = nvkm_pm_oclass_new,
374 .init = _nvkm_engctx_init,
375 .fini = _nvkm_engctx_fini,
376 },
377}; 686};
378 687
379/******************************************************************************* 688static int
380 * PPM engine/subdev functions 689nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
381 ******************************************************************************/ 690 const struct nvkm_device_oclass **class)
691{
692 if (index == 0) {
693 oclass->base = nvkm_pm_oclass.base;
694 *class = &nvkm_pm_oclass;
695 return index;
696 }
697 return 1;
698}
699
700int
701nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
702 const struct nvkm_specsrc *spec)
703{
704 const struct nvkm_specsrc *ssrc;
705 const struct nvkm_specmux *smux;
706 struct nvkm_perfsrc *src;
707 u8 source_nr = 0;
708
709 if (!spec) {
710 /* No sources are defined for this signal. */
711 return 0;
712 }
713
714 ssrc = spec;
715 while (ssrc->name) {
716 smux = ssrc->mux;
717 while (smux->name) {
718 bool found = false;
719 u8 source_id = 0;
720 u32 len;
721
722 list_for_each_entry(src, &pm->sources, head) {
723 if (src->addr == ssrc->addr &&
724 src->shift == smux->shift) {
725 found = true;
726 break;
727 }
728 source_id++;
729 }
730
731 if (!found) {
732 src = kzalloc(sizeof(*src), GFP_KERNEL);
733 if (!src)
734 return -ENOMEM;
735
736 src->addr = ssrc->addr;
737 src->mask = smux->mask;
738 src->shift = smux->shift;
739 src->enable = smux->enable;
740
741 len = strlen(ssrc->name) +
742 strlen(smux->name) + 2;
743 src->name = kzalloc(len, GFP_KERNEL);
744 if (!src->name) {
745 kfree(src);
746 return -ENOMEM;
747 }
748 snprintf(src->name, len, "%s_%s", ssrc->name,
749 smux->name);
750
751 list_add_tail(&src->head, &pm->sources);
752 }
753
754 sig->source[source_nr++] = source_id + 1;
755 smux++;
756 }
757 ssrc++;
758 }
759
760 return 0;
761}
762
382int 763int
383nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask, 764nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
384 u32 base, u32 size_unit, u32 size_domain, 765 u32 base, u32 size_unit, u32 size_domain,
385 const struct nvkm_specdom *spec) 766 const struct nvkm_specdom *spec)
386{ 767{
387 const struct nvkm_specdom *sdom; 768 const struct nvkm_specdom *sdom;
388 const struct nvkm_specsig *ssig; 769 const struct nvkm_specsig *ssig;
389 struct nvkm_perfdom *dom; 770 struct nvkm_perfdom *dom;
390 int i; 771 int ret, i;
391 772
392 for (i = 0; i == 0 || mask; i++) { 773 for (i = 0; i == 0 || mask; i++) {
393 u32 addr = base + (i * size_unit); 774 u32 addr = base + (i * size_unit);
@@ -410,16 +791,20 @@ nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
410 "%s/%02x", name, (int)(sdom - spec)); 791 "%s/%02x", name, (int)(sdom - spec));
411 } 792 }
412 793
413 list_add_tail(&dom->head, &ppm->domains); 794 list_add_tail(&dom->head, &pm->domains);
414 INIT_LIST_HEAD(&dom->list); 795 INIT_LIST_HEAD(&dom->list);
415 dom->func = sdom->func; 796 dom->func = sdom->func;
416 dom->addr = addr; 797 dom->addr = addr;
417 dom->quad = QUAD_MASK;
418 dom->signal_nr = sdom->signal_nr; 798 dom->signal_nr = sdom->signal_nr;
419 799
420 ssig = (sdom++)->signal; 800 ssig = (sdom++)->signal;
421 while (ssig->name) { 801 while (ssig->name) {
422 dom->signal[ssig->signal].name = ssig->name; 802 struct nvkm_perfsig *sig =
803 &dom->signal[ssig->signal];
804 sig->name = ssig->name;
805 ret = nvkm_perfsrc_new(pm, sig, ssig->source);
806 if (ret)
807 return ret;
423 ssig++; 808 ssig++;
424 } 809 }
425 810
@@ -432,47 +817,49 @@ nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
432 return 0; 817 return 0;
433} 818}
434 819
435int 820static int
436_nvkm_pm_fini(struct nvkm_object *object, bool suspend) 821nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
437{
438 struct nvkm_pm *ppm = (void *)object;
439 return nvkm_engine_fini(&ppm->base, suspend);
440}
441
442int
443_nvkm_pm_init(struct nvkm_object *object)
444{ 822{
445 struct nvkm_pm *ppm = (void *)object; 823 struct nvkm_pm *pm = nvkm_pm(engine);
446 return nvkm_engine_init(&ppm->base); 824 if (pm->func->fini)
825 pm->func->fini(pm);
826 return 0;
447} 827}
448 828
449void 829static void *
450_nvkm_pm_dtor(struct nvkm_object *object) 830nvkm_pm_dtor(struct nvkm_engine *engine)
451{ 831{
452 struct nvkm_pm *ppm = (void *)object; 832 struct nvkm_pm *pm = nvkm_pm(engine);
453 struct nvkm_perfdom *dom, *tmp; 833 struct nvkm_perfdom *dom, *next_dom;
834 struct nvkm_perfsrc *src, *next_src;
454 835
455 list_for_each_entry_safe(dom, tmp, &ppm->domains, head) { 836 list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
456 list_del(&dom->head); 837 list_del(&dom->head);
457 kfree(dom); 838 kfree(dom);
458 } 839 }
459 840
460 nvkm_engine_destroy(&ppm->base); 841 list_for_each_entry_safe(src, next_src, &pm->sources, head) {
842 list_del(&src->head);
843 kfree(src->name);
844 kfree(src);
845 }
846
847 return pm;
461} 848}
462 849
850static const struct nvkm_engine_func
851nvkm_pm = {
852 .dtor = nvkm_pm_dtor,
853 .fini = nvkm_pm_fini,
854 .base.sclass = nvkm_pm_oclass_get,
855};
856
463int 857int
464nvkm_pm_create_(struct nvkm_object *parent, struct nvkm_object *engine, 858nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
465 struct nvkm_oclass *oclass, int length, void **pobject) 859 int index, struct nvkm_pm *pm)
466{ 860{
467 struct nvkm_pm *ppm; 861 pm->func = func;
468 int ret; 862 INIT_LIST_HEAD(&pm->domains);
469 863 INIT_LIST_HEAD(&pm->sources);
470 ret = nvkm_engine_create_(parent, engine, oclass, true, "PPM", 864 return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
471 "pm", length, pobject);
472 ppm = *pobject;
473 if (ret)
474 return ret;
475
476 INIT_LIST_HEAD(&ppm->domains);
477 return 0;
478} 865}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
deleted file mode 100644
index a7a5f3a3c91b..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26static void
27pwr_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
28 struct nvkm_perfctr *ctr)
29{
30 u32 mask = 0x00000000;
31 u32 ctrl = 0x00000001;
32 int i;
33
34 for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
35 mask |= 1 << (ctr->signal[i] - dom->signal);
36
37 nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
38 nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
39 nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
40}
41
42static void
43pwr_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
44 struct nvkm_perfctr *ctr)
45{
46 ctr->ctr = ppm->pwr[ctr->slot];
47 ctr->clk = ppm->pwr[ppm->last];
48}
49
50static void
51pwr_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
52{
53 int i;
54
55 for (i = 0; i <= ppm->last; i++) {
56 ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
57 nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
58 }
59}
60
61static const struct nvkm_funcdom
62pwr_perfctr_func = {
63 .init = pwr_perfctr_init,
64 .read = pwr_perfctr_read,
65 .next = pwr_perfctr_next,
66};
67
68const struct nvkm_specdom
69gt215_pm_pwr[] = {
70 { 0x20, (const struct nvkm_specsig[]) {
71 { 0x00, "pwr_gr_idle" },
72 { 0x04, "pwr_bsp_idle" },
73 { 0x05, "pwr_vp_idle" },
74 { 0x06, "pwr_ppp_idle" },
75 { 0x13, "pwr_ce0_idle" },
76 {}
77 }, &pwr_perfctr_func },
78 {}
79};
80
81const struct nvkm_specdom
82gf100_pm_pwr[] = {
83 { 0x20, (const struct nvkm_specsig[]) {
84 { 0x00, "pwr_gr_idle" },
85 { 0x04, "pwr_bsp_idle" },
86 { 0x05, "pwr_vp_idle" },
87 { 0x06, "pwr_ppp_idle" },
88 { 0x13, "pwr_ce0_idle" },
89 { 0x14, "pwr_ce1_idle" },
90 {}
91 }, &pwr_perfctr_func },
92 {}
93};
94
95const struct nvkm_specdom
96gk104_pm_pwr[] = {
97 { 0x20, (const struct nvkm_specsig[]) {
98 { 0x00, "pwr_gr_idle" },
99 { 0x04, "pwr_bsp_idle" },
100 { 0x05, "pwr_vp_idle" },
101 { 0x06, "pwr_ppp_idle" },
102 { 0x13, "pwr_ce0_idle" },
103 { 0x14, "pwr_ce1_idle" },
104 { 0x15, "pwr_ce2_idle" },
105 {}
106 }, &pwr_perfctr_func },
107 {}
108};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
index d54c6705ba17..6e441ddafd86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -23,15 +23,121 @@
23 */ 23 */
24#include "nv40.h" 24#include "nv40.h"
25 25
26const struct nvkm_specsrc
27g84_vfetch_sources[] = {
28 { 0x400c0c, (const struct nvkm_specmux[]) {
29 { 0x3, 0, "unk0" },
30 {}
31 }, "pgraph_vfetch_unk0c" },
32 {}
33};
34
35static const struct nvkm_specsrc
36g84_prop_sources[] = {
37 { 0x408e50, (const struct nvkm_specmux[]) {
38 { 0x1f, 0, "sel", true },
39 {}
40 }, "pgraph_tpc0_prop_pm_mux" },
41 {}
42};
43
44static const struct nvkm_specsrc
45g84_crop_sources[] = {
46 { 0x407008, (const struct nvkm_specmux[]) {
47 { 0xf, 0, "sel0", true },
48 { 0x7, 16, "sel1", true },
49 {}
50 }, "pgraph_rop0_crop_pm_mux" },
51 {}
52};
53
54static const struct nvkm_specsrc
55g84_tex_sources[] = {
56 { 0x408808, (const struct nvkm_specmux[]) {
57 { 0xfffff, 0, "unk0" },
58 {}
59 }, "pgraph_tpc0_tex_unk08" },
60 {}
61};
62
26static const struct nvkm_specdom 63static const struct nvkm_specdom
27g84_pm[] = { 64g84_pm[] = {
28 { 0x20, (const struct nvkm_specsig[]) { 65 { 0x20, (const struct nvkm_specsig[]) {
29 {} 66 {}
30 }, &nv40_perfctr_func }, 67 }, &nv40_perfctr_func },
31 { 0x20, (const struct nvkm_specsig[]) { 68 { 0xf0, (const struct nvkm_specsig[]) {
69 { 0xbd, "pc01_gr_idle" },
70 { 0x5e, "pc01_strmout_00" },
71 { 0x5f, "pc01_strmout_01" },
72 { 0xd2, "pc01_trast_00" },
73 { 0xd3, "pc01_trast_01" },
74 { 0xd4, "pc01_trast_02" },
75 { 0xd5, "pc01_trast_03" },
76 { 0xd8, "pc01_trast_04" },
77 { 0xd9, "pc01_trast_05" },
78 { 0x5c, "pc01_vattr_00" },
79 { 0x5d, "pc01_vattr_01" },
80 { 0x66, "pc01_vfetch_00", g84_vfetch_sources },
81 { 0x67, "pc01_vfetch_01", g84_vfetch_sources },
82 { 0x68, "pc01_vfetch_02", g84_vfetch_sources },
83 { 0x69, "pc01_vfetch_03", g84_vfetch_sources },
84 { 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
85 { 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
86 { 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
87 { 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
88 { 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
89 { 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
90 { 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
91 { 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
92 { 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
93 { 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
94 { 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
95 { 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
96 { 0x76, "pc01_vfetch_10", g84_vfetch_sources },
97 { 0x77, "pc01_vfetch_11", g84_vfetch_sources },
98 { 0x78, "pc01_vfetch_12", g84_vfetch_sources },
99 { 0x79, "pc01_vfetch_13", g84_vfetch_sources },
100 { 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
101 { 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
102 { 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
103 { 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
104 { 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
105 { 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
106 { 0x07, "pc01_zcull_00", nv50_zcull_sources },
107 { 0x08, "pc01_zcull_01", nv50_zcull_sources },
108 { 0x09, "pc01_zcull_02", nv50_zcull_sources },
109 { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
110 { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
111 { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
112 { 0xa4, "pc01_unk00" },
113 { 0xec, "pc01_trailer" },
32 {} 114 {}
33 }, &nv40_perfctr_func }, 115 }, &nv40_perfctr_func },
34 { 0x20, (const struct nvkm_specsig[]) { 116 { 0xa0, (const struct nvkm_specsig[]) {
117 { 0x30, "pc02_crop_00", g84_crop_sources },
118 { 0x31, "pc02_crop_01", g84_crop_sources },
119 { 0x32, "pc02_crop_02", g84_crop_sources },
120 { 0x33, "pc02_crop_03", g84_crop_sources },
121 { 0x00, "pc02_prop_00", g84_prop_sources },
122 { 0x01, "pc02_prop_01", g84_prop_sources },
123 { 0x02, "pc02_prop_02", g84_prop_sources },
124 { 0x03, "pc02_prop_03", g84_prop_sources },
125 { 0x04, "pc02_prop_04", g84_prop_sources },
126 { 0x05, "pc02_prop_05", g84_prop_sources },
127 { 0x06, "pc02_prop_06", g84_prop_sources },
128 { 0x07, "pc02_prop_07", g84_prop_sources },
129 { 0x48, "pc02_tex_00", g84_tex_sources },
130 { 0x49, "pc02_tex_01", g84_tex_sources },
131 { 0x4a, "pc02_tex_02", g84_tex_sources },
132 { 0x4b, "pc02_tex_03", g84_tex_sources },
133 { 0x1a, "pc02_tex_04", g84_tex_sources },
134 { 0x1b, "pc02_tex_05", g84_tex_sources },
135 { 0x1c, "pc02_tex_06", g84_tex_sources },
136 { 0x44, "pc02_zrop_00", nv50_zrop_sources },
137 { 0x45, "pc02_zrop_01", nv50_zrop_sources },
138 { 0x46, "pc02_zrop_02", nv50_zrop_sources },
139 { 0x47, "pc02_zrop_03", nv50_zrop_sources },
140 { 0x8c, "pc02_trailer" },
35 {} 141 {}
36 }, &nv40_perfctr_func }, 142 }, &nv40_perfctr_func },
37 { 0x20, (const struct nvkm_specsig[]) { 143 { 0x20, (const struct nvkm_specsig[]) {
@@ -52,14 +158,8 @@ g84_pm[] = {
52 {} 158 {}
53}; 159};
54 160
55struct nvkm_oclass * 161int
56g84_pm_oclass = &(struct nv40_pm_oclass) { 162g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
57 .base.handle = NV_ENGINE(PM, 0x84), 163{
58 .base.ofuncs = &(struct nvkm_ofuncs) { 164 return nv40_pm_new_(g84_pm, device, index, ppm);
59 .ctor = nv40_pm_ctor, 165}
60 .dtor = _nvkm_pm_dtor,
61 .init = _nvkm_pm_init,
62 .fini = _nvkm_pm_fini,
63 },
64 .doms = g84_pm,
65}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index 008fed73dd82..d2901e9a7808 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -23,62 +23,146 @@
23 */ 23 */
24#include "gf100.h" 24#include "gf100.h"
25 25
26const struct nvkm_specsrc
27gf100_pbfb_sources[] = {
28 { 0x10f100, (const struct nvkm_specmux[]) {
29 { 0x1, 0, "unk0" },
30 { 0x3f, 4, "unk4" },
31 {}
32 }, "pbfb_broadcast_pm_unk100" },
33 {}
34};
35
36const struct nvkm_specsrc
37gf100_pmfb_sources[] = {
38 { 0x140028, (const struct nvkm_specmux[]) {
39 { 0x3fff, 0, "unk0" },
40 { 0x7, 16, "unk16" },
41 { 0x3, 24, "unk24" },
42 { 0x2, 29, "unk29" },
43 {}
44 }, "pmfb0_pm_unk28" },
45 {}
46};
47
48static const struct nvkm_specsrc
49gf100_l1_sources[] = {
50 { 0x5044a8, (const struct nvkm_specmux[]) {
51 { 0x3f, 0, "sel", true },
52 {}
53 }, "pgraph_gpc0_tpc0_l1_pm_mux" },
54 {}
55};
56
57static const struct nvkm_specsrc
58gf100_tex_sources[] = {
59 { 0x5042c0, (const struct nvkm_specmux[]) {
60 { 0xf, 0, "sel0", true },
61 { 0x7, 8, "sel1", true },
62 {}
63 }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
64 {}
65};
66
67static const struct nvkm_specsrc
68gf100_unk400_sources[] = {
69 { 0x50440c, (const struct nvkm_specmux[]) {
70 { 0x3f, 0, "sel", true },
71 {}
72 }, "pgraph_gpc0_tpc0_unk400_pm_mux" },
73 {}
74};
75
26static const struct nvkm_specdom 76static const struct nvkm_specdom
27gf100_pm_hub[] = { 77gf100_pm_hub[] = {
28 {} 78 {}
29}; 79};
30 80
31static const struct nvkm_specdom 81const struct nvkm_specdom
32gf100_pm_gpc[] = { 82gf100_pm_gpc[] = {
83 { 0xe0, (const struct nvkm_specsig[]) {
84 { 0x00, "gpc00_l1_00", gf100_l1_sources },
85 { 0x01, "gpc00_l1_01", gf100_l1_sources },
86 { 0x02, "gpc00_l1_02", gf100_l1_sources },
87 { 0x03, "gpc00_l1_03", gf100_l1_sources },
88 { 0x05, "gpc00_l1_04", gf100_l1_sources },
89 { 0x06, "gpc00_l1_05", gf100_l1_sources },
90 { 0x0a, "gpc00_tex_00", gf100_tex_sources },
91 { 0x0b, "gpc00_tex_01", gf100_tex_sources },
92 { 0x0c, "gpc00_tex_02", gf100_tex_sources },
93 { 0x0d, "gpc00_tex_03", gf100_tex_sources },
94 { 0x0e, "gpc00_tex_04", gf100_tex_sources },
95 { 0x0f, "gpc00_tex_05", gf100_tex_sources },
96 { 0x10, "gpc00_tex_06", gf100_tex_sources },
97 { 0x11, "gpc00_tex_07", gf100_tex_sources },
98 { 0x12, "gpc00_tex_08", gf100_tex_sources },
99 { 0x26, "gpc00_unk400_00", gf100_unk400_sources },
100 {}
101 }, &gf100_perfctr_func },
33 {} 102 {}
34}; 103};
35 104
36static const struct nvkm_specdom 105const struct nvkm_specdom
37gf100_pm_part[] = { 106gf100_pm_part[] = {
107 { 0xe0, (const struct nvkm_specsig[]) {
108 { 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
109 { 0x10, "part00_pbfb_01", gf100_pbfb_sources },
110 { 0x21, "part00_pmfb_00", gf100_pmfb_sources },
111 { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
112 { 0x00, "part00_pmfb_02", gf100_pmfb_sources },
113 { 0x02, "part00_pmfb_03", gf100_pmfb_sources },
114 { 0x01, "part00_pmfb_04", gf100_pmfb_sources },
115 { 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
116 { 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
117 { 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
118 { 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
119 { 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
120 { 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
121 { 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
122 {}
123 }, &gf100_perfctr_func },
38 {} 124 {}
39}; 125};
40 126
41static void 127static void
42gf100_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom, 128gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
43 struct nvkm_perfctr *ctr) 129 struct nvkm_perfctr *ctr)
44{ 130{
45 struct gf100_pm_priv *priv = (void *)ppm; 131 struct nvkm_device *device = pm->engine.subdev.device;
46 struct gf100_pm_cntr *cntr = (void *)ctr;
47 u32 log = ctr->logic_op; 132 u32 log = ctr->logic_op;
48 u32 src = 0x00000000; 133 u32 src = 0x00000000;
49 int i; 134 int i;
50 135
51 for (i = 0; i < 4 && ctr->signal[i]; i++) 136 for (i = 0; i < 4; i++)
52 src |= (ctr->signal[i] - dom->signal) << (i * 8); 137 src |= ctr->signal[i] << (i * 8);
53 138
54 nv_wr32(priv, dom->addr + 0x09c, 0x00040002); 139 nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
55 nv_wr32(priv, dom->addr + 0x100, 0x00000000); 140 nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
56 nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src); 141 nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
57 nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log); 142 nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
58} 143}
59 144
60static void 145static void
61gf100_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom, 146gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
62 struct nvkm_perfctr *ctr) 147 struct nvkm_perfctr *ctr)
63{ 148{
64 struct gf100_pm_priv *priv = (void *)ppm; 149 struct nvkm_device *device = pm->engine.subdev.device;
65 struct gf100_pm_cntr *cntr = (void *)ctr; 150
66 151 switch (ctr->slot) {
67 switch (cntr->base.slot) { 152 case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
68 case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break; 153 case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
69 case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break; 154 case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
70 case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break; 155 case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
71 case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
72 } 156 }
73 cntr->base.clk = nv_rd32(priv, dom->addr + 0x070); 157 dom->clk = nvkm_rd32(device, dom->addr + 0x070);
74} 158}
75 159
76static void 160static void
77gf100_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom) 161gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
78{ 162{
79 struct gf100_pm_priv *priv = (void *)ppm; 163 struct nvkm_device *device = pm->engine.subdev.device;
80 nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27); 164 nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
81 nv_wr32(priv, dom->addr + 0x0ec, 0x00000011); 165 nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
82} 166}
83 167
84const struct nvkm_funcdom 168const struct nvkm_funcdom
@@ -88,72 +172,72 @@ gf100_perfctr_func = {
88 .next = gf100_perfctr_next, 172 .next = gf100_perfctr_next,
89}; 173};
90 174
91int 175static void
92gf100_pm_fini(struct nvkm_object *object, bool suspend) 176gf100_pm_fini(struct nvkm_pm *pm)
93{ 177{
94 struct gf100_pm_priv *priv = (void *)object; 178 struct nvkm_device *device = pm->engine.subdev.device;
95 nv_mask(priv, 0x000200, 0x10000000, 0x00000000); 179 nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
96 nv_mask(priv, 0x000200, 0x10000000, 0x10000000); 180 nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
97 return nvkm_pm_fini(&priv->base, suspend);
98} 181}
99 182
100static int 183static const struct nvkm_pm_func
101gf100_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 184gf100_pm_ = {
102 struct nvkm_oclass *oclass, void *data, u32 size, 185 .fini = gf100_pm_fini,
103 struct nvkm_object **pobject) 186};
187
188int
189gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
190 int index, struct nvkm_pm **ppm)
104{ 191{
105 struct gf100_pm_priv *priv; 192 struct nvkm_pm *pm;
106 u32 mask; 193 u32 mask;
107 int ret; 194 int ret;
108 195
109 ret = nvkm_pm_create(parent, engine, oclass, &priv); 196 if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
110 *pobject = nv_object(priv); 197 return -ENOMEM;
111 if (ret)
112 return ret;
113 198
114 ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gf100_pm_pwr); 199 ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm);
115 if (ret) 200 if (ret)
116 return ret; 201 return ret;
117 202
118 /* HUB */ 203 /* HUB */
119 ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200, 204 ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
120 gf100_pm_hub); 205 func->doms_hub);
121 if (ret) 206 if (ret)
122 return ret; 207 return ret;
123 208
124 /* GPC */ 209 /* GPC */
125 mask = (1 << nv_rd32(priv, 0x022430)) - 1; 210 mask = (1 << nvkm_rd32(device, 0x022430)) - 1;
126 mask &= ~nv_rd32(priv, 0x022504); 211 mask &= ~nvkm_rd32(device, 0x022504);
127 mask &= ~nv_rd32(priv, 0x022584); 212 mask &= ~nvkm_rd32(device, 0x022584);
128 213
129 ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000, 214 ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
130 0x1000, 0x200, gf100_pm_gpc); 215 0x1000, 0x200, func->doms_gpc);
131 if (ret) 216 if (ret)
132 return ret; 217 return ret;
133 218
134 /* PART */ 219 /* PART */
135 mask = (1 << nv_rd32(priv, 0x022438)) - 1; 220 mask = (1 << nvkm_rd32(device, 0x022438)) - 1;
136 mask &= ~nv_rd32(priv, 0x022548); 221 mask &= ~nvkm_rd32(device, 0x022548);
137 mask &= ~nv_rd32(priv, 0x0225c8); 222 mask &= ~nvkm_rd32(device, 0x0225c8);
138 223
139 ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000, 224 ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
140 0x1000, 0x200, gf100_pm_part); 225 0x1000, 0x200, func->doms_part);
141 if (ret) 226 if (ret)
142 return ret; 227 return ret;
143 228
144 nv_engine(priv)->cclass = &nvkm_pm_cclass;
145 nv_engine(priv)->sclass = nvkm_pm_sclass;
146 priv->base.last = 7;
147 return 0; 229 return 0;
148} 230}
149 231
150struct nvkm_oclass 232static const struct gf100_pm_func
151gf100_pm_oclass = { 233gf100_pm = {
152 .handle = NV_ENGINE(PM, 0xc0), 234 .doms_gpc = gf100_pm_gpc,
153 .ofuncs = &(struct nvkm_ofuncs) { 235 .doms_hub = gf100_pm_hub,
154 .ctor = gf100_pm_ctor, 236 .doms_part = gf100_pm_part,
155 .dtor = _nvkm_pm_dtor,
156 .init = _nvkm_pm_init,
157 .fini = gf100_pm_fini,
158 },
159}; 237};
238
239int
240gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
241{
242 return gf100_pm_new_(&gf100_pm, device, index, ppm);
243}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 6a01fc7fec6f..56d0344853ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -2,14 +2,18 @@
2#define __NVKM_PM_NVC0_H__ 2#define __NVKM_PM_NVC0_H__
3#include "priv.h" 3#include "priv.h"
4 4
5struct gf100_pm_priv { 5struct gf100_pm_func {
6 struct nvkm_pm base; 6 const struct nvkm_specdom *doms_hub;
7 const struct nvkm_specdom *doms_gpc;
8 const struct nvkm_specdom *doms_part;
7}; 9};
8 10
9struct gf100_pm_cntr { 11int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *,
10 struct nvkm_perfctr base; 12 int index, struct nvkm_pm **);
11};
12 13
13extern const struct nvkm_funcdom gf100_perfctr_func; 14extern const struct nvkm_funcdom gf100_perfctr_func;
14int gf100_pm_fini(struct nvkm_object *, bool); 15extern const struct nvkm_specdom gf100_pm_gpc[];
16
17extern const struct nvkm_specsrc gf100_pbfb_sources[];
18extern const struct nvkm_specsrc gf100_pmfb_sources[];
15#endif 19#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
new file mode 100644
index 000000000000..49b24c98a7f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2015 Samuel Pitoiset
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Samuel Pitoiset
23 */
24#include "gf100.h"
25
26static const struct nvkm_specdom
27gf108_pm_hub[] = {
28 {}
29};
30
31static const struct nvkm_specdom
32gf108_pm_part[] = {
33 { 0xe0, (const struct nvkm_specsig[]) {
34 { 0x14, "part00_pbfb_00", gf100_pbfb_sources },
35 { 0x15, "part00_pbfb_01", gf100_pbfb_sources },
36 { 0x20, "part00_pbfb_02", gf100_pbfb_sources },
37 { 0x21, "part00_pbfb_03", gf100_pbfb_sources },
38 { 0x01, "part00_pmfb_00", gf100_pmfb_sources },
39 { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
40 { 0x05, "part00_pmfb_02", gf100_pmfb_sources},
41 { 0x07, "part00_pmfb_03", gf100_pmfb_sources },
42 { 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
43 { 0x12, "part00_pmfb_05", gf100_pmfb_sources },
44 { 0x13, "part00_pmfb_06", gf100_pmfb_sources },
45 { 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
46 { 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
47 { 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
48 { 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
49 { 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
50 {}
51 }, &gf100_perfctr_func },
52 {}
53};
54
55static const struct gf100_pm_func
56gf108_pm = {
57 .doms_gpc = gf100_pm_gpc,
58 .doms_hub = gf108_pm_hub,
59 .doms_part = gf108_pm_part,
60};
61
62int
63gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
64{
65 return gf100_pm_new_(&gf108_pm, device, index, ppm);
66}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
new file mode 100644
index 000000000000..9170025fc988
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
@@ -0,0 +1,80 @@
1/*
2 * Copyright 2015 Samuel Pitoiset
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Samuel Pitoiset
23 */
24#include "gf100.h"
25
26static const struct nvkm_specsrc
27gf117_pmfb_sources[] = {
28 { 0x140028, (const struct nvkm_specmux[]) {
29 { 0x3fff, 0, "unk0" },
30 { 0x7, 16, "unk16" },
31 { 0x3, 24, "unk24" },
32 { 0x2, 28, "unk28" },
33 {}
34 }, "pmfb0_pm_unk28" },
35 { 0x14125c, (const struct nvkm_specmux[]) {
36 { 0x3fff, 0, "unk0" },
37 {}
38 }, "pmfb0_subp0_pm_unk25c" },
39 {}
40};
41
42static const struct nvkm_specdom
43gf117_pm_hub[] = {
44 {}
45};
46
47static const struct nvkm_specdom
48gf117_pm_part[] = {
49 { 0xe0, (const struct nvkm_specsig[]) {
50 { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
51 { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
52 { 0x12, "part00_pmfb_00", gf117_pmfb_sources },
53 { 0x15, "part00_pmfb_01", gf117_pmfb_sources },
54 { 0x16, "part00_pmfb_02", gf117_pmfb_sources },
55 { 0x18, "part00_pmfb_03", gf117_pmfb_sources },
56 { 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
57 { 0x23, "part00_pmfb_05", gf117_pmfb_sources },
58 { 0x24, "part00_pmfb_06", gf117_pmfb_sources },
59 { 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
60 { 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
61 { 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
62 { 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
63 { 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
64 {}
65 }, &gf100_perfctr_func },
66 {}
67};
68
69static const struct gf100_pm_func
70gf117_pm = {
71 .doms_gpc = gf100_pm_gpc,
72 .doms_hub = gf117_pm_hub,
73 .doms_part = gf117_pm_part,
74};
75
76int
77gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
78{
79 return gf100_pm_new_(&gf117_pm, device, index, ppm);
80}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
index 75b9ff3d1a2c..07f946d26ac6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -23,6 +23,52 @@
23 */ 23 */
24#include "gf100.h" 24#include "gf100.h"
25 25
26static const struct nvkm_specsrc
27gk104_pmfb_sources[] = {
28 { 0x140028, (const struct nvkm_specmux[]) {
29 { 0x3fff, 0, "unk0" },
30 { 0x7, 16, "unk16" },
31 { 0x3, 24, "unk24" },
32 { 0x2, 28, "unk28" },
33 {}
34 }, "pmfb0_pm_unk28" },
35 { 0x14125c, (const struct nvkm_specmux[]) {
36 { 0x3fff, 0, "unk0" },
37 {}
38 }, "pmfb0_subp0_pm_unk25c" },
39 { 0x14165c, (const struct nvkm_specmux[]) {
40 { 0x3fff, 0, "unk0" },
41 {}
42 }, "pmfb0_subp1_pm_unk25c" },
43 { 0x141a5c, (const struct nvkm_specmux[]) {
44 { 0x3fff, 0, "unk0" },
45 {}
46 }, "pmfb0_subp2_pm_unk25c" },
47 { 0x141e5c, (const struct nvkm_specmux[]) {
48 { 0x3fff, 0, "unk0" },
49 {}
50 }, "pmfb0_subp3_pm_unk25c" },
51 {}
52};
53
54static const struct nvkm_specsrc
55gk104_tex_sources[] = {
56 { 0x5042c0, (const struct nvkm_specmux[]) {
57 { 0xf, 0, "sel0", true },
58 { 0x7, 8, "sel1", true },
59 {}
60 }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
61 { 0x5042c8, (const struct nvkm_specmux[]) {
62 { 0x1f, 0, "sel", true },
63 {}
64 }, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
65 { 0x5042b8, (const struct nvkm_specmux[]) {
66 { 0xff, 0, "sel", true },
67 {}
68 }, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
69 {}
70};
71
26static const struct nvkm_specdom 72static const struct nvkm_specdom
27gk104_pm_hub[] = { 73gk104_pm_hub[] = {
28 { 0x60, (const struct nvkm_specsig[]) { 74 { 0x60, (const struct nvkm_specsig[]) {
@@ -69,12 +115,51 @@ gk104_pm_gpc[] = {
69 { 0xc7, "gpc00_user_0" }, 115 { 0xc7, "gpc00_user_0" },
70 {} 116 {}
71 }, &gf100_perfctr_func }, 117 }, &gf100_perfctr_func },
118 { 0x20, (const struct nvkm_specsig[]) {
119 {}
120 }, &gf100_perfctr_func },
121 { 0x20, (const struct nvkm_specsig[]) {
122 { 0x00, "gpc02_tex_00", gk104_tex_sources },
123 { 0x01, "gpc02_tex_01", gk104_tex_sources },
124 { 0x02, "gpc02_tex_02", gk104_tex_sources },
125 { 0x03, "gpc02_tex_03", gk104_tex_sources },
126 { 0x04, "gpc02_tex_04", gk104_tex_sources },
127 { 0x05, "gpc02_tex_05", gk104_tex_sources },
128 { 0x06, "gpc02_tex_06", gk104_tex_sources },
129 { 0x07, "gpc02_tex_07", gk104_tex_sources },
130 { 0x08, "gpc02_tex_08", gk104_tex_sources },
131 { 0x0a, "gpc02_tex_0a", gk104_tex_sources },
132 { 0x0b, "gpc02_tex_0b", gk104_tex_sources },
133 { 0x0d, "gpc02_tex_0c", gk104_tex_sources },
134 { 0x0c, "gpc02_tex_0d", gk104_tex_sources },
135 { 0x0e, "gpc02_tex_0e", gk104_tex_sources },
136 { 0x0f, "gpc02_tex_0f", gk104_tex_sources },
137 { 0x10, "gpc02_tex_10", gk104_tex_sources },
138 { 0x11, "gpc02_tex_11", gk104_tex_sources },
139 { 0x12, "gpc02_tex_12", gk104_tex_sources },
140 {}
141 }, &gf100_perfctr_func },
72 {} 142 {}
73}; 143};
74 144
75static const struct nvkm_specdom 145static const struct nvkm_specdom
76gk104_pm_part[] = { 146gk104_pm_part[] = {
77 { 0x60, (const struct nvkm_specsig[]) { 147 { 0x60, (const struct nvkm_specsig[]) {
148 { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
149 { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
150 { 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
151 { 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
152 { 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
153 { 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
154 { 0x10, "part00_pmfb_04", gk104_pmfb_sources },
155 { 0x12, "part00_pmfb_05", gk104_pmfb_sources },
156 { 0x15, "part00_pmfb_06", gk104_pmfb_sources },
157 { 0x16, "part00_pmfb_07", gk104_pmfb_sources },
158 { 0x18, "part00_pmfb_08", gk104_pmfb_sources },
159 { 0x21, "part00_pmfb_09", gk104_pmfb_sources },
160 { 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
161 { 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
162 { 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
78 { 0x47, "part00_user_0" }, 163 { 0x47, "part00_user_0" },
79 {} 164 {}
80 }, &gf100_perfctr_func }, 165 }, &gf100_perfctr_func },
@@ -85,64 +170,15 @@ gk104_pm_part[] = {
85 {} 170 {}
86}; 171};
87 172
88static int 173static const struct gf100_pm_func
89gk104_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 174gk104_pm = {
90 struct nvkm_oclass *oclass, void *data, u32 size, 175 .doms_gpc = gk104_pm_gpc,
91 struct nvkm_object **pobject) 176 .doms_hub = gk104_pm_hub,
92{ 177 .doms_part = gk104_pm_part,
93 struct gf100_pm_priv *priv; 178};
94 u32 mask;
95 int ret;
96
97 ret = nvkm_pm_create(parent, engine, oclass, &priv);
98 *pobject = nv_object(priv);
99 if (ret)
100 return ret;
101
102 /* PDAEMON */
103 ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
104 if (ret)
105 return ret;
106
107 /* HUB */
108 ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
109 gk104_pm_hub);
110 if (ret)
111 return ret;
112
113 /* GPC */
114 mask = (1 << nv_rd32(priv, 0x022430)) - 1;
115 mask &= ~nv_rd32(priv, 0x022504);
116 mask &= ~nv_rd32(priv, 0x022584);
117
118 ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
119 0x1000, 0x200, gk104_pm_gpc);
120 if (ret)
121 return ret;
122
123 /* PART */
124 mask = (1 << nv_rd32(priv, 0x022438)) - 1;
125 mask &= ~nv_rd32(priv, 0x022548);
126 mask &= ~nv_rd32(priv, 0x0225c8);
127
128 ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
129 0x1000, 0x200, gk104_pm_part);
130 if (ret)
131 return ret;
132 179
133 nv_engine(priv)->cclass = &nvkm_pm_cclass; 180int
134 nv_engine(priv)->sclass = nvkm_pm_sclass; 181gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
135 priv->base.last = 7; 182{
136 return 0; 183 return gf100_pm_new_(&gk104_pm, device, index, ppm);
137} 184}
138
139struct nvkm_oclass
140gk104_pm_oclass = {
141 .handle = NV_ENGINE(PM, 0xe0),
142 .ofuncs = &(struct nvkm_ofuncs) {
143 .ctor = gk104_pm_ctor,
144 .dtor = _nvkm_pm_dtor,
145 .init = _nvkm_pm_init,
146 .fini = gf100_pm_fini,
147 },
148};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
new file mode 100644
index 000000000000..5cf5dd536fd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright 2015 Nouveau project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Samuel Pitoiset
23 */
24#include "nv40.h"
25
26const struct nvkm_specsrc
27gt200_crop_sources[] = {
28 { 0x407008, (const struct nvkm_specmux[]) {
29 { 0xf, 0, "sel0", true },
30 { 0x1f, 16, "sel1", true },
31 {}
32 }, "pgraph_rop0_crop_pm_mux" },
33 {}
34};
35
36const struct nvkm_specsrc
37gt200_prop_sources[] = {
38 { 0x408750, (const struct nvkm_specmux[]) {
39 { 0x3f, 0, "sel", true },
40 {}
41 }, "pgraph_tpc0_prop_pm_mux" },
42 {}
43};
44
45const struct nvkm_specsrc
46gt200_tex_sources[] = {
47 { 0x408508, (const struct nvkm_specmux[]) {
48 { 0xfffff, 0, "unk0" },
49 {}
50 }, "pgraph_tpc0_tex_unk08" },
51 {}
52};
53
54static const struct nvkm_specdom
55gt200_pm[] = {
56 { 0x20, (const struct nvkm_specsig[]) {
57 {}
58 }, &nv40_perfctr_func },
59 { 0xf0, (const struct nvkm_specsig[]) {
60 { 0xc9, "pc01_gr_idle" },
61 { 0x84, "pc01_strmout_00" },
62 { 0x85, "pc01_strmout_01" },
63 { 0xde, "pc01_trast_00" },
64 { 0xdf, "pc01_trast_01" },
65 { 0xe0, "pc01_trast_02" },
66 { 0xe1, "pc01_trast_03" },
67 { 0xe4, "pc01_trast_04" },
68 { 0xe5, "pc01_trast_05" },
69 { 0x82, "pc01_vattr_00" },
70 { 0x83, "pc01_vattr_01" },
71 { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
72 { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
73 { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
74 { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
75 { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
76 { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
77 { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
78 { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
79 { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
80 { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
81 { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
82 { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
83 { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
84 { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
85 { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
86 { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
87 { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
88 { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
89 { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
90 { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
91 { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
92 { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
93 { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
94 { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
95 { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
96 { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
97 { 0x07, "pc01_zcull_00", nv50_zcull_sources },
98 { 0x08, "pc01_zcull_01", nv50_zcull_sources },
99 { 0x09, "pc01_zcull_02", nv50_zcull_sources },
100 { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
101 { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
102 { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
103
104 { 0xb0, "pc01_unk00" },
105 { 0xec, "pc01_trailer" },
106 {}
107 }, &nv40_perfctr_func },
108 { 0xf0, (const struct nvkm_specsig[]) {
109 { 0x55, "pc02_crop_00", gt200_crop_sources },
110 { 0x56, "pc02_crop_01", gt200_crop_sources },
111 { 0x57, "pc02_crop_02", gt200_crop_sources },
112 { 0x58, "pc02_crop_03", gt200_crop_sources },
113 { 0x00, "pc02_prop_00", gt200_prop_sources },
114 { 0x01, "pc02_prop_01", gt200_prop_sources },
115 { 0x02, "pc02_prop_02", gt200_prop_sources },
116 { 0x03, "pc02_prop_03", gt200_prop_sources },
117 { 0x04, "pc02_prop_04", gt200_prop_sources },
118 { 0x05, "pc02_prop_05", gt200_prop_sources },
119 { 0x06, "pc02_prop_06", gt200_prop_sources },
120 { 0x07, "pc02_prop_07", gt200_prop_sources },
121 { 0x78, "pc02_tex_00", gt200_tex_sources },
122 { 0x79, "pc02_tex_01", gt200_tex_sources },
123 { 0x7a, "pc02_tex_02", gt200_tex_sources },
124 { 0x7b, "pc02_tex_03", gt200_tex_sources },
125 { 0x32, "pc02_tex_04", gt200_tex_sources },
126 { 0x33, "pc02_tex_05", gt200_tex_sources },
127 { 0x34, "pc02_tex_06", gt200_tex_sources },
128 { 0x74, "pc02_zrop_00", nv50_zrop_sources },
129 { 0x75, "pc02_zrop_01", nv50_zrop_sources },
130 { 0x76, "pc02_zrop_02", nv50_zrop_sources },
131 { 0x77, "pc02_zrop_03", nv50_zrop_sources },
132 { 0xec, "pc02_trailer" },
133 {}
134 }, &nv40_perfctr_func },
135 { 0x20, (const struct nvkm_specsig[]) {
136 {}
137 }, &nv40_perfctr_func },
138 { 0x20, (const struct nvkm_specsig[]) {
139 {}
140 }, &nv40_perfctr_func },
141 { 0x20, (const struct nvkm_specsig[]) {
142 {}
143 }, &nv40_perfctr_func },
144 { 0x20, (const struct nvkm_specsig[]) {
145 {}
146 }, &nv40_perfctr_func },
147 { 0x20, (const struct nvkm_specsig[]) {
148 {}
149 }, &nv40_perfctr_func },
150 {}
151};
152
153int
154gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
155{
156 return nv40_pm_new_(gt200_pm, device, index, ppm);
157}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index d065bfc59bbf..c9227ad41b04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -23,15 +23,94 @@
23 */ 23 */
24#include "nv40.h" 24#include "nv40.h"
25 25
26static const struct nvkm_specsrc
27gt215_zcull_sources[] = {
28 { 0x402ca4, (const struct nvkm_specmux[]) {
29 { 0x7fff, 0, "unk0" },
30 { 0xff, 24, "unk24" },
31 {}
32 }, "pgraph_zcull_pm_unka4" },
33 {}
34};
35
26static const struct nvkm_specdom 36static const struct nvkm_specdom
27gt215_pm[] = { 37gt215_pm[] = {
28 { 0x20, (const struct nvkm_specsig[]) { 38 { 0x20, (const struct nvkm_specsig[]) {
29 {} 39 {}
30 }, &nv40_perfctr_func }, 40 }, &nv40_perfctr_func },
31 { 0x20, (const struct nvkm_specsig[]) { 41 { 0xf0, (const struct nvkm_specsig[]) {
42 { 0xcb, "pc01_gr_idle" },
43 { 0x86, "pc01_strmout_00" },
44 { 0x87, "pc01_strmout_01" },
45 { 0xe0, "pc01_trast_00" },
46 { 0xe1, "pc01_trast_01" },
47 { 0xe2, "pc01_trast_02" },
48 { 0xe3, "pc01_trast_03" },
49 { 0xe6, "pc01_trast_04" },
50 { 0xe7, "pc01_trast_05" },
51 { 0x84, "pc01_vattr_00" },
52 { 0x85, "pc01_vattr_01" },
53 { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
54 { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
55 { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
56 { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
57 { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
58 { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
59 { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
60 { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
61 { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
62 { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
63 { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
64 { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
65 { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
66 { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
67 { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
68 { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
69 { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
70 { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
71 { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
72 { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
73 { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
74 { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
75 { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
76 { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
77 { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
78 { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
79 { 0x07, "pc01_zcull_00", gt215_zcull_sources },
80 { 0x08, "pc01_zcull_01", gt215_zcull_sources },
81 { 0x09, "pc01_zcull_02", gt215_zcull_sources },
82 { 0x0a, "pc01_zcull_03", gt215_zcull_sources },
83 { 0x0b, "pc01_zcull_04", gt215_zcull_sources },
84 { 0x0c, "pc01_zcull_05", gt215_zcull_sources },
85 { 0xb2, "pc01_unk00" },
86 { 0xec, "pc01_trailer" },
32 {} 87 {}
33 }, &nv40_perfctr_func }, 88 }, &nv40_perfctr_func },
34 { 0x20, (const struct nvkm_specsig[]) { 89 { 0xe0, (const struct nvkm_specsig[]) {
90 { 0x64, "pc02_crop_00", gt200_crop_sources },
91 { 0x65, "pc02_crop_01", gt200_crop_sources },
92 { 0x66, "pc02_crop_02", gt200_crop_sources },
93 { 0x67, "pc02_crop_03", gt200_crop_sources },
94 { 0x00, "pc02_prop_00", gt200_prop_sources },
95 { 0x01, "pc02_prop_01", gt200_prop_sources },
96 { 0x02, "pc02_prop_02", gt200_prop_sources },
97 { 0x03, "pc02_prop_03", gt200_prop_sources },
98 { 0x04, "pc02_prop_04", gt200_prop_sources },
99 { 0x05, "pc02_prop_05", gt200_prop_sources },
100 { 0x06, "pc02_prop_06", gt200_prop_sources },
101 { 0x07, "pc02_prop_07", gt200_prop_sources },
102 { 0x80, "pc02_tex_00", gt200_tex_sources },
103 { 0x81, "pc02_tex_01", gt200_tex_sources },
104 { 0x82, "pc02_tex_02", gt200_tex_sources },
105 { 0x83, "pc02_tex_03", gt200_tex_sources },
106 { 0x3a, "pc02_tex_04", gt200_tex_sources },
107 { 0x3b, "pc02_tex_05", gt200_tex_sources },
108 { 0x3c, "pc02_tex_06", gt200_tex_sources },
109 { 0x7c, "pc02_zrop_00", nv50_zrop_sources },
110 { 0x7d, "pc02_zrop_01", nv50_zrop_sources },
111 { 0x7e, "pc02_zrop_02", nv50_zrop_sources },
112 { 0x7f, "pc02_zrop_03", nv50_zrop_sources },
113 { 0xcc, "pc02_trailer" },
35 {} 114 {}
36 }, &nv40_perfctr_func }, 115 }, &nv40_perfctr_func },
37 { 0x20, (const struct nvkm_specsig[]) { 116 { 0x20, (const struct nvkm_specsig[]) {
@@ -52,32 +131,8 @@ gt215_pm[] = {
52 {} 131 {}
53}; 132};
54 133
55static int 134int
56gt215_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 135gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
57 struct nvkm_oclass *oclass, void *data, u32 size,
58 struct nvkm_object **object)
59{ 136{
60 int ret = nv40_pm_ctor(parent, engine, oclass, data, size, object); 137 return nv40_pm_new_(gt215_pm, device, index, ppm);
61 if (ret == 0) {
62 struct nv40_pm_priv *priv = (void *)*object;
63 ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
64 gt215_pm_pwr);
65 if (ret)
66 return ret;
67
68 priv->base.last = 3;
69 }
70 return ret;
71} 138}
72
73struct nvkm_oclass *
74gt215_pm_oclass = &(struct nv40_pm_oclass) {
75 .base.handle = NV_ENGINE(PM, 0xa3),
76 .base.ofuncs = &(struct nvkm_ofuncs) {
77 .ctor = gt215_pm_ctor,
78 .dtor = _nvkm_pm_dtor,
79 .init = _nvkm_pm_init,
80 .fini = _nvkm_pm_fini,
81 },
82 .doms = gt215_pm,
83}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index ff22f06b22b8..4bef72a9d106 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -24,46 +24,44 @@
24#include "nv40.h" 24#include "nv40.h"
25 25
26static void 26static void
27nv40_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom, 27nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
28 struct nvkm_perfctr *ctr) 28 struct nvkm_perfctr *ctr)
29{ 29{
30 struct nv40_pm_priv *priv = (void *)ppm; 30 struct nvkm_device *device = pm->engine.subdev.device;
31 struct nv40_pm_cntr *cntr = (void *)ctr;
32 u32 log = ctr->logic_op; 31 u32 log = ctr->logic_op;
33 u32 src = 0x00000000; 32 u32 src = 0x00000000;
34 int i; 33 int i;
35 34
36 for (i = 0; i < 4 && ctr->signal[i]; i++) 35 for (i = 0; i < 4; i++)
37 src |= (ctr->signal[i] - dom->signal) << (i * 8); 36 src |= ctr->signal[i] << (i * 8);
38 37
39 nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001); 38 nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
40 nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src); 39 nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
41 nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log); 40 nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
42} 41}
43 42
44static void 43static void
45nv40_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom, 44nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
46 struct nvkm_perfctr *ctr) 45 struct nvkm_perfctr *ctr)
47{ 46{
48 struct nv40_pm_priv *priv = (void *)ppm; 47 struct nvkm_device *device = pm->engine.subdev.device;
49 struct nv40_pm_cntr *cntr = (void *)ctr;
50 48
51 switch (cntr->base.slot) { 49 switch (ctr->slot) {
52 case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break; 50 case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
53 case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break; 51 case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
54 case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break; 52 case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
55 case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break; 53 case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
56 } 54 }
57 cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr); 55 dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
58} 56}
59 57
60static void 58static void
61nv40_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom) 59nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
62{ 60{
63 struct nv40_pm_priv *priv = (void *)ppm; 61 struct nvkm_device *device = pm->engine.subdev.device;
64 if (priv->sequence != ppm->sequence) { 62 if (pm->sequence != pm->sequence) {
65 nv_wr32(priv, 0x400084, 0x00000020); 63 nvkm_wr32(device, 0x400084, 0x00000020);
66 priv->sequence = ppm->sequence; 64 pm->sequence = pm->sequence;
67 } 65 }
68} 66}
69 67
@@ -74,6 +72,28 @@ nv40_perfctr_func = {
74 .next = nv40_perfctr_next, 72 .next = nv40_perfctr_next,
75}; 73};
76 74
75static const struct nvkm_pm_func
76nv40_pm_ = {
77};
78
79int
80nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
81 int index, struct nvkm_pm **ppm)
82{
83 struct nv40_pm *pm;
84 int ret;
85
86 if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
87 return -ENOMEM;
88 *ppm = &pm->base;
89
90 ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base);
91 if (ret)
92 return ret;
93
94 return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
95}
96
77static const struct nvkm_specdom 97static const struct nvkm_specdom
78nv40_pm[] = { 98nv40_pm[] = {
79 { 0x20, (const struct nvkm_specsig[]) { 99 { 0x20, (const struct nvkm_specsig[]) {
@@ -95,36 +115,7 @@ nv40_pm[] = {
95}; 115};
96 116
97int 117int
98nv40_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 118nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
99 struct nvkm_oclass *oclass, void *data, u32 size,
100 struct nvkm_object **pobject)
101{ 119{
102 struct nv40_pm_oclass *mclass = (void *)oclass; 120 return nv40_pm_new_(nv40_pm, device, index, ppm);
103 struct nv40_pm_priv *priv;
104 int ret;
105
106 ret = nvkm_pm_create(parent, engine, oclass, &priv);
107 *pobject = nv_object(priv);
108 if (ret)
109 return ret;
110
111 ret = nvkm_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
112 if (ret)
113 return ret;
114
115 nv_engine(priv)->cclass = &nvkm_pm_cclass;
116 nv_engine(priv)->sclass = nvkm_pm_sclass;
117 return 0;
118} 121}
119
120struct nvkm_oclass *
121nv40_pm_oclass = &(struct nv40_pm_oclass) {
122 .base.handle = NV_ENGINE(PM, 0x40),
123 .base.ofuncs = &(struct nvkm_ofuncs) {
124 .ctor = nv40_pm_ctor,
125 .dtor = _nvkm_pm_dtor,
126 .init = _nvkm_pm_init,
127 .fini = _nvkm_pm_fini,
128 },
129 .doms = nv40_pm,
130}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 2338e150420e..da481abe8f7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -1,24 +1,14 @@
1#ifndef __NVKM_PM_NV40_H__ 1#ifndef __NVKM_PM_NV40_H__
2#define __NVKM_PM_NV40_H__ 2#define __NVKM_PM_NV40_H__
3#define nv40_pm(p) container_of((p), struct nv40_pm, base)
3#include "priv.h" 4#include "priv.h"
4 5
5struct nv40_pm_oclass { 6struct nv40_pm {
6 struct nvkm_oclass base;
7 const struct nvkm_specdom *doms;
8};
9
10struct nv40_pm_priv {
11 struct nvkm_pm base; 7 struct nvkm_pm base;
12 u32 sequence; 8 u32 sequence;
13}; 9};
14 10
15int nv40_pm_ctor(struct nvkm_object *, struct nvkm_object *, 11int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *,
16 struct nvkm_oclass *, void *data, u32 size, 12 int index, struct nvkm_pm **);
17 struct nvkm_object **pobject);
18
19struct nv40_pm_cntr {
20 struct nvkm_perfctr base;
21};
22
23extern const struct nvkm_funcdom nv40_perfctr_func; 13extern const struct nvkm_funcdom nv40_perfctr_func;
24#endif 14#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
index 6af83b5d1b11..cc5a41d4c6f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -23,35 +23,153 @@
23 */ 23 */
24#include "nv40.h" 24#include "nv40.h"
25 25
26const struct nvkm_specsrc
27nv50_zcull_sources[] = {
28 { 0x402ca4, (const struct nvkm_specmux[]) {
29 { 0x7fff, 0, "unk0" },
30 {}
31 }, "pgraph_zcull_pm_unka4" },
32 {}
33};
34
35const struct nvkm_specsrc
36nv50_zrop_sources[] = {
37 { 0x40708c, (const struct nvkm_specmux[]) {
38 { 0xf, 0, "sel0", true },
39 { 0xf, 16, "sel1", true },
40 {}
41 }, "pgraph_rop0_zrop_pm_mux" },
42 {}
43};
44
45static const struct nvkm_specsrc
46nv50_prop_sources[] = {
47 { 0x40be50, (const struct nvkm_specmux[]) {
48 { 0x1f, 0, "sel", true },
49 {}
50 }, "pgraph_tpc3_prop_pm_mux" },
51 {}
52};
53
54static const struct nvkm_specsrc
55nv50_crop_sources[] = {
56 { 0x407008, (const struct nvkm_specmux[]) {
57 { 0x7, 0, "sel0", true },
58 { 0x7, 16, "sel1", true },
59 {}
60 }, "pgraph_rop0_crop_pm_mux" },
61 {}
62};
63
64static const struct nvkm_specsrc
65nv50_tex_sources[] = {
66 { 0x40b808, (const struct nvkm_specmux[]) {
67 { 0x3fff, 0, "unk0" },
68 {}
69 }, "pgraph_tpc3_tex_unk08" },
70 {}
71};
72
73static const struct nvkm_specsrc
74nv50_vfetch_sources[] = {
75 { 0x400c0c, (const struct nvkm_specmux[]) {
76 { 0x1, 0, "unk0" },
77 {}
78 }, "pgraph_vfetch_unk0c" },
79 {}
80};
81
26static const struct nvkm_specdom 82static const struct nvkm_specdom
27nv50_pm[] = { 83nv50_pm[] = {
28 { 0x040, (const struct nvkm_specsig[]) { 84 { 0x20, (const struct nvkm_specsig[]) {
29 {} 85 {}
30 }, &nv40_perfctr_func }, 86 }, &nv40_perfctr_func },
31 { 0x100, (const struct nvkm_specsig[]) { 87 { 0xf0, (const struct nvkm_specsig[]) {
32 { 0xc8, "gr_idle" }, 88 { 0xc8, "pc01_gr_idle" },
89 { 0x7f, "pc01_strmout_00" },
90 { 0x80, "pc01_strmout_01" },
91 { 0xdc, "pc01_trast_00" },
92 { 0xdd, "pc01_trast_01" },
93 { 0xde, "pc01_trast_02" },
94 { 0xdf, "pc01_trast_03" },
95 { 0xe2, "pc01_trast_04" },
96 { 0xe3, "pc01_trast_05" },
97 { 0x7c, "pc01_vattr_00" },
98 { 0x7d, "pc01_vattr_01" },
99 { 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
100 { 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
101 { 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
102 { 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
103 { 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
104 { 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
105 { 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
106 { 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
107 { 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
108 { 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
109 { 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
110 { 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
111 { 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
112 { 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
113 { 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
114 { 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
115 { 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
116 { 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
117 { 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
118 { 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
119 { 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
120 { 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
121 { 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
122 { 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
123 { 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
124 { 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
125 { 0x20, "pc01_zcull_00", nv50_zcull_sources },
126 { 0x21, "pc01_zcull_01", nv50_zcull_sources },
127 { 0x22, "pc01_zcull_02", nv50_zcull_sources },
128 { 0x23, "pc01_zcull_03", nv50_zcull_sources },
129 { 0x24, "pc01_zcull_04", nv50_zcull_sources },
130 { 0x25, "pc01_zcull_05", nv50_zcull_sources },
131 { 0xae, "pc01_unk00" },
132 { 0xee, "pc01_trailer" },
33 {} 133 {}
34 }, &nv40_perfctr_func }, 134 }, &nv40_perfctr_func },
35 { 0x100, (const struct nvkm_specsig[]) { 135 { 0xf0, (const struct nvkm_specsig[]) {
136 { 0x52, "pc02_crop_00", nv50_crop_sources },
137 { 0x53, "pc02_crop_01", nv50_crop_sources },
138 { 0x54, "pc02_crop_02", nv50_crop_sources },
139 { 0x55, "pc02_crop_03", nv50_crop_sources },
140 { 0x00, "pc02_prop_00", nv50_prop_sources },
141 { 0x01, "pc02_prop_01", nv50_prop_sources },
142 { 0x02, "pc02_prop_02", nv50_prop_sources },
143 { 0x03, "pc02_prop_03", nv50_prop_sources },
144 { 0x04, "pc02_prop_04", nv50_prop_sources },
145 { 0x05, "pc02_prop_05", nv50_prop_sources },
146 { 0x06, "pc02_prop_06", nv50_prop_sources },
147 { 0x07, "pc02_prop_07", nv50_prop_sources },
148 { 0x70, "pc02_tex_00", nv50_tex_sources },
149 { 0x71, "pc02_tex_01", nv50_tex_sources },
150 { 0x72, "pc02_tex_02", nv50_tex_sources },
151 { 0x73, "pc02_tex_03", nv50_tex_sources },
152 { 0x40, "pc02_tex_04", nv50_tex_sources },
153 { 0x41, "pc02_tex_05", nv50_tex_sources },
154 { 0x42, "pc02_tex_06", nv50_tex_sources },
155 { 0x6c, "pc02_zrop_00", nv50_zrop_sources },
156 { 0x6d, "pc02_zrop_01", nv50_zrop_sources },
157 { 0x6e, "pc02_zrop_02", nv50_zrop_sources },
158 { 0x6f, "pc02_zrop_03", nv50_zrop_sources },
159 { 0xee, "pc02_trailer" },
36 {} 160 {}
37 }, &nv40_perfctr_func }, 161 }, &nv40_perfctr_func },
38 { 0x020, (const struct nvkm_specsig[]) { 162 { 0x20, (const struct nvkm_specsig[]) {
39 {} 163 {}
40 }, &nv40_perfctr_func }, 164 }, &nv40_perfctr_func },
41 { 0x040, (const struct nvkm_specsig[]) { 165 { 0x20, (const struct nvkm_specsig[]) {
42 {} 166 {}
43 }, &nv40_perfctr_func }, 167 }, &nv40_perfctr_func },
44 {} 168 {}
45}; 169};
46 170
47struct nvkm_oclass * 171int
48nv50_pm_oclass = &(struct nv40_pm_oclass) { 172nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
49 .base.handle = NV_ENGINE(PM, 0x50), 173{
50 .base.ofuncs = &(struct nvkm_ofuncs) { 174 return nv40_pm_new_(nv50_pm, device, index, ppm);
51 .ctor = nv40_pm_ctor, 175}
52 .dtor = _nvkm_pm_dtor,
53 .init = _nvkm_pm_init,
54 .fini = _nvkm_pm_fini,
55 },
56 .doms = nv50_pm,
57}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 1e6eff2a6d79..d7b81cbf82b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -1,58 +1,85 @@
1#ifndef __NVKM_PM_PRIV_H__ 1#ifndef __NVKM_PM_PRIV_H__
2#define __NVKM_PM_PRIV_H__ 2#define __NVKM_PM_PRIV_H__
3#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
3#include <engine/pm.h> 4#include <engine/pm.h>
4 5
6int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *,
7 int index, struct nvkm_pm *);
8
9struct nvkm_pm_func {
10 void (*fini)(struct nvkm_pm *);
11};
12
5struct nvkm_perfctr { 13struct nvkm_perfctr {
6 struct nvkm_object base;
7 struct list_head head; 14 struct list_head head;
8 struct nvkm_perfsig *signal[4]; 15 u8 domain;
16 u8 signal[4];
17 u64 source[4][8];
9 int slot; 18 int slot;
10 u32 logic_op; 19 u32 logic_op;
11 u32 clk;
12 u32 ctr; 20 u32 ctr;
13}; 21};
14 22
15extern struct nvkm_oclass nvkm_pm_sclass[]; 23struct nvkm_specmux {
24 u32 mask;
25 u8 shift;
26 const char *name;
27 bool enable;
28};
16 29
17#include <core/engctx.h> 30struct nvkm_specsrc {
31 u32 addr;
32 const struct nvkm_specmux *mux;
33 const char *name;
34};
18 35
19struct nvkm_perfctx { 36struct nvkm_perfsrc {
20 struct nvkm_engctx base; 37 struct list_head head;
38 char *name;
39 u32 addr;
40 u32 mask;
41 u8 shift;
42 bool enable;
21}; 43};
22 44
23extern struct nvkm_oclass nvkm_pm_cclass; 45extern const struct nvkm_specsrc nv50_zcull_sources[];
46extern const struct nvkm_specsrc nv50_zrop_sources[];
47extern const struct nvkm_specsrc g84_vfetch_sources[];
48extern const struct nvkm_specsrc gt200_crop_sources[];
49extern const struct nvkm_specsrc gt200_prop_sources[];
50extern const struct nvkm_specsrc gt200_tex_sources[];
24 51
25struct nvkm_specsig { 52struct nvkm_specsig {
26 u8 signal; 53 u8 signal;
27 const char *name; 54 const char *name;
55 const struct nvkm_specsrc *source;
28}; 56};
29 57
30struct nvkm_perfsig { 58struct nvkm_perfsig {
31 const char *name; 59 const char *name;
60 u8 source[8];
32}; 61};
33 62
34struct nvkm_perfdom;
35struct nvkm_perfctr *
36nvkm_perfsig_wrap(struct nvkm_pm *, const char *, struct nvkm_perfdom **);
37
38struct nvkm_specdom { 63struct nvkm_specdom {
39 u16 signal_nr; 64 u16 signal_nr;
40 const struct nvkm_specsig *signal; 65 const struct nvkm_specsig *signal;
41 const struct nvkm_funcdom *func; 66 const struct nvkm_funcdom *func;
42}; 67};
43 68
44extern const struct nvkm_specdom gt215_pm_pwr[]; 69#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
45extern const struct nvkm_specdom gf100_pm_pwr[];
46extern const struct nvkm_specdom gk104_pm_pwr[];
47 70
48struct nvkm_perfdom { 71struct nvkm_perfdom {
72 struct nvkm_object object;
73 struct nvkm_perfmon *perfmon;
49 struct list_head head; 74 struct list_head head;
50 struct list_head list; 75 struct list_head list;
51 const struct nvkm_funcdom *func; 76 const struct nvkm_funcdom *func;
77 struct nvkm_perfctr *ctr[4];
52 char name[32]; 78 char name[32];
53 u32 addr; 79 u32 addr;
54 u8 quad; 80 u8 mode;
55 u32 signal_nr; 81 u32 clk;
82 u16 signal_nr;
56 struct nvkm_perfsig signal[]; 83 struct nvkm_perfsig signal[];
57}; 84};
58 85
@@ -67,24 +94,10 @@ struct nvkm_funcdom {
67int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32, 94int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
68 const struct nvkm_specdom *); 95 const struct nvkm_specdom *);
69 96
70#define nvkm_pm_create(p,e,o,d) \ 97#define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
71 nvkm_pm_create_((p), (e), (o), sizeof(**d), (void **)d) 98
72#define nvkm_pm_dtor(p) ({ \ 99struct nvkm_perfmon {
73 struct nvkm_pm *c = (p); \ 100 struct nvkm_object object;
74 _nvkm_pm_dtor(nv_object(c)); \ 101 struct nvkm_pm *pm;
75}) 102};
76#define nvkm_pm_init(p) ({ \
77 struct nvkm_pm *c = (p); \
78 _nvkm_pm_init(nv_object(c)); \
79})
80#define nvkm_pm_fini(p,s) ({ \
81 struct nvkm_pm *c = (p); \
82 _nvkm_pm_fini(nv_object(c), (s)); \
83})
84
85int nvkm_pm_create_(struct nvkm_object *, struct nvkm_object *,
86 struct nvkm_oclass *, int, void **);
87void _nvkm_pm_dtor(struct nvkm_object *);
88int _nvkm_pm_init(struct nvkm_object *);
89int _nvkm_pm_fini(struct nvkm_object *, bool);
90#endif 103#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
index 06ee06071104..66b147bd58eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
@@ -1,5 +1,5 @@
1/* 1/*
2 * fuc microcode for g98 psec engine 2 * fuc microcode for g98 sec engine
3 * Copyright (C) 2010 Marcin Kościelnicki 3 * Copyright (C) 2010 Marcin Kościelnicki
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -17,7 +17,7 @@
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */ 18 */
19 19
20.section #g98_psec_data 20.section #g98_sec_data
21 21
22ctx_dma: 22ctx_dma:
23ctx_dma_query: .b32 0 23ctx_dma_query: .b32 0
@@ -94,7 +94,7 @@ sec_dtable:
94 94
95.align 0x100 95.align 0x100
96 96
97.section #g98_psec_code 97.section #g98_sec_code
98 98
99 // $r0 is always set to 0 in our code - this allows some space savings. 99 // $r0 is always set to 0 in our code - this allows some space savings.
100 clear b32 $r0 100 clear b32 $r0
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 5d65c4fbb087..eca62221f299 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
1uint32_t g98_psec_data[] = { 1uint32_t g98_sec_data[] = {
2/* 0x0000: ctx_dma */ 2/* 0x0000: ctx_dma */
3/* 0x0000: ctx_dma_query */ 3/* 0x0000: ctx_dma_query */
4 0x00000000, 4 0x00000000,
@@ -150,7 +150,7 @@ uint32_t g98_psec_data[] = {
150 0x00000000, 150 0x00000000,
151}; 151};
152 152
153uint32_t g98_psec_code[] = { 153uint32_t g98_sec_code[] = {
154 0x17f004bd, 154 0x17f004bd,
155 0x0010fe35, 155 0x0010fe35,
156 0xf10004fe, 156 0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 9d5c1b8b1f8c..995c2c5ec150 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -22,47 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/sec.h> 24#include <engine/sec.h>
25#include <engine/falcon.h> 25#include <engine/fifo.h>
26#include "fuc/g98.fuc0s.h" 26#include "fuc/g98.fuc0s.h"
27 27
28#include <core/client.h> 28#include <core/client.h>
29#include <core/enum.h> 29#include <core/enum.h>
30#include <engine/fifo.h> 30#include <core/gpuobj.h>
31
32struct g98_sec_priv {
33 struct nvkm_falcon base;
34};
35
36/*******************************************************************************
37 * Crypt object classes
38 ******************************************************************************/
39 31
40static struct nvkm_oclass 32#include <nvif/class.h>
41g98_sec_sclass[] = {
42 { 0x88b4, &nvkm_object_ofuncs },
43 {},
44};
45
46/*******************************************************************************
47 * PSEC context
48 ******************************************************************************/
49
50static struct nvkm_oclass
51g98_sec_cclass = {
52 .handle = NV_ENGCTX(SEC, 0x98),
53 .ofuncs = &(struct nvkm_ofuncs) {
54 .ctor = _nvkm_falcon_context_ctor,
55 .dtor = _nvkm_falcon_context_dtor,
56 .init = _nvkm_falcon_context_init,
57 .fini = _nvkm_falcon_context_fini,
58 .rd32 = _nvkm_falcon_context_rd32,
59 .wr32 = _nvkm_falcon_context_wr32,
60 },
61};
62
63/*******************************************************************************
64 * PSEC engine/subdev functions
65 ******************************************************************************/
66 33
67static const struct nvkm_enum g98_sec_isr_error_name[] = { 34static const struct nvkm_enum g98_sec_isr_error_name[] = {
68 { 0x0000, "ILLEGAL_MTHD" }, 35 { 0x0000, "ILLEGAL_MTHD" },
@@ -73,77 +40,44 @@ static const struct nvkm_enum g98_sec_isr_error_name[] = {
73}; 40};
74 41
75static void 42static void
76g98_sec_intr(struct nvkm_subdev *subdev) 43g98_sec_intr(struct nvkm_falcon *sec, struct nvkm_fifo_chan *chan)
77{ 44{
78 struct nvkm_fifo *pfifo = nvkm_fifo(subdev); 45 struct nvkm_subdev *subdev = &sec->engine.subdev;
79 struct nvkm_engine *engine = nv_engine(subdev); 46 struct nvkm_device *device = subdev->device;
80 struct nvkm_object *engctx; 47 u32 ssta = nvkm_rd32(device, 0x087040) & 0x0000ffff;
81 struct g98_sec_priv *priv = (void *)subdev; 48 u32 addr = nvkm_rd32(device, 0x087040) >> 16;
82 u32 disp = nv_rd32(priv, 0x08701c);
83 u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
84 u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
85 u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
86 u32 addr = nv_rd32(priv, 0x087040) >> 16;
87 u32 mthd = (addr & 0x07ff) << 2; 49 u32 mthd = (addr & 0x07ff) << 2;
88 u32 subc = (addr & 0x3800) >> 11; 50 u32 subc = (addr & 0x3800) >> 11;
89 u32 data = nv_rd32(priv, 0x087044); 51 u32 data = nvkm_rd32(device, 0x087044);
90 int chid; 52 const struct nvkm_enum *en =
91 53 nvkm_enum_find(g98_sec_isr_error_name, ssta);
92 engctx = nvkm_engctx_get(engine, inst); 54
93 chid = pfifo->chid(pfifo, engctx); 55 nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
94 56 "subc %d mthd %04x data %08x\n", ssta,
95 if (stat & 0x00000040) { 57 en ? en->name : "UNKNOWN", chan ? chan->chid : -1,
96 nv_error(priv, "DISPATCH_ERROR ["); 58 chan ? chan->inst->addr : 0,
97 nvkm_enum_print(g98_sec_isr_error_name, ssta); 59 chan ? chan->object.client->name : "unknown",
98 pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n", 60 subc, mthd, data);
99 chid, (u64)inst << 12, nvkm_client_name(engctx), 61}
100 subc, mthd, data);
101 nv_wr32(priv, 0x087004, 0x00000040);
102 stat &= ~0x00000040;
103 }
104 62
105 if (stat) { 63static const struct nvkm_falcon_func
106 nv_error(priv, "unhandled intr 0x%08x\n", stat); 64g98_sec = {
107 nv_wr32(priv, 0x087004, stat); 65 .code.data = g98_sec_code,
66 .code.size = sizeof(g98_sec_code),
67 .data.data = g98_sec_data,
68 .data.size = sizeof(g98_sec_data),
69 .pmc_enable = 0x00004000,
70 .intr = g98_sec_intr,
71 .sclass = {
72 { -1, -1, G98_SEC },
73 {}
108 } 74 }
75};
109 76
110 nvkm_engctx_put(engctx); 77int
111} 78g98_sec_new(struct nvkm_device *device, int index,
112 79 struct nvkm_engine **pengine)
113static int
114g98_sec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
115 struct nvkm_oclass *oclass, void *data, u32 size,
116 struct nvkm_object **pobject)
117{ 80{
118 struct g98_sec_priv *priv; 81 return nvkm_falcon_new_(&g98_sec, device, index,
119 int ret; 82 true, 0x087000, pengine);
120
121 ret = nvkm_falcon_create(parent, engine, oclass, 0x087000, true,
122 "PSEC", "sec", &priv);
123 *pobject = nv_object(priv);
124 if (ret)
125 return ret;
126
127 nv_subdev(priv)->unit = 0x00004000;
128 nv_subdev(priv)->intr = g98_sec_intr;
129 nv_engine(priv)->cclass = &g98_sec_cclass;
130 nv_engine(priv)->sclass = g98_sec_sclass;
131 nv_falcon(priv)->code.data = g98_psec_code;
132 nv_falcon(priv)->code.size = sizeof(g98_psec_code);
133 nv_falcon(priv)->data.data = g98_psec_data;
134 nv_falcon(priv)->data.size = sizeof(g98_psec_data);
135 return 0;
136} 83}
137
138struct nvkm_oclass
139g98_sec_oclass = {
140 .handle = NV_ENGINE(SEC, 0x98),
141 .ofuncs = &(struct nvkm_ofuncs) {
142 .ctor = g98_sec_ctor,
143 .dtor = _nvkm_falcon_dtor,
144 .init = _nvkm_falcon_init,
145 .fini = _nvkm_falcon_fini,
146 .rd32 = _nvkm_falcon_rd32,
147 .wr32 = _nvkm_falcon_wr32,
148 },
149};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
index bdc3a05907d5..1c291e6fcf96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
@@ -1,4 +1,9 @@
1nvkm-y += nvkm/engine/sw/base.o
1nvkm-y += nvkm/engine/sw/nv04.o 2nvkm-y += nvkm/engine/sw/nv04.o
2nvkm-y += nvkm/engine/sw/nv10.o 3nvkm-y += nvkm/engine/sw/nv10.o
3nvkm-y += nvkm/engine/sw/nv50.o 4nvkm-y += nvkm/engine/sw/nv50.o
4nvkm-y += nvkm/engine/sw/gf100.o 5nvkm-y += nvkm/engine/sw/gf100.o
6
7nvkm-y += nvkm/engine/sw/chan.o
8
9nvkm-y += nvkm/engine/sw/nvsw.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
new file mode 100644
index 000000000000..53c1f7e75b54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25#include "chan.h"
26
27#include <engine/fifo.h>
28
29bool
30nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data)
31{
32 struct nvkm_sw_chan *chan;
33 bool handled = false;
34 unsigned long flags;
35
36 spin_lock_irqsave(&sw->engine.lock, flags);
37 list_for_each_entry(chan, &sw->chan, head) {
38 if (chan->fifo->chid == chid) {
39 handled = nvkm_sw_chan_mthd(chan, subc, mthd, data);
40 list_del(&chan->head);
41 list_add(&chan->head, &sw->chan);
42 break;
43 }
44 }
45 spin_unlock_irqrestore(&sw->engine.lock, flags);
46 return handled;
47}
48
49static int
50nvkm_sw_oclass_new(const struct nvkm_oclass *oclass, void *data, u32 size,
51 struct nvkm_object **pobject)
52{
53 struct nvkm_sw_chan *chan = nvkm_sw_chan(oclass->parent);
54 const struct nvkm_sw_chan_sclass *sclass = oclass->engn;
55 return sclass->ctor(chan, oclass, data, size, pobject);
56}
57
58static int
59nvkm_sw_oclass_get(struct nvkm_oclass *oclass, int index)
60{
61 struct nvkm_sw *sw = nvkm_sw(oclass->engine);
62 int c = 0;
63
64 while (sw->func->sclass[c].ctor) {
65 if (c++ == index) {
66 oclass->engn = &sw->func->sclass[index];
67 oclass->base = sw->func->sclass[index].base;
68 oclass->base.ctor = nvkm_sw_oclass_new;
69 return index;
70 }
71 }
72
73 return c;
74}
75
76static int
77nvkm_sw_cclass_get(struct nvkm_fifo_chan *fifoch,
78 const struct nvkm_oclass *oclass,
79 struct nvkm_object **pobject)
80{
81 struct nvkm_sw *sw = nvkm_sw(oclass->engine);
82 return sw->func->chan_new(sw, fifoch, oclass, pobject);
83}
84
85static void *
86nvkm_sw_dtor(struct nvkm_engine *engine)
87{
88 return nvkm_sw(engine);
89}
90
91static const struct nvkm_engine_func
92nvkm_sw = {
93 .dtor = nvkm_sw_dtor,
94 .fifo.cclass = nvkm_sw_cclass_get,
95 .fifo.sclass = nvkm_sw_oclass_get,
96};
97
98int
99nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
100 int index, struct nvkm_sw **psw)
101{
102 struct nvkm_sw *sw;
103
104 if (!(sw = *psw = kzalloc(sizeof(*sw), GFP_KERNEL)))
105 return -ENOMEM;
106 INIT_LIST_HEAD(&sw->chan);
107 sw->func = func;
108
109 return nvkm_engine_ctor(&nvkm_sw, device, index, 0, true, &sw->engine);
110}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
new file mode 100644
index 000000000000..d082f4f73a80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "chan.h"
25
26#include <core/notify.h>
27#include <engine/fifo.h>
28
29#include <nvif/event.h>
30#include <nvif/unpack.h>
31
32bool
33nvkm_sw_chan_mthd(struct nvkm_sw_chan *chan, int subc, u32 mthd, u32 data)
34{
35 switch (mthd) {
36 case 0x0000:
37 return true;
38 case 0x0500:
39 nvkm_event_send(&chan->event, 1, 0, NULL, 0);
40 return true;
41 default:
42 if (chan->func->mthd)
43 return chan->func->mthd(chan, subc, mthd, data);
44 break;
45 }
46 return false;
47}
48
49static int
50nvkm_sw_chan_event_ctor(struct nvkm_object *object, void *data, u32 size,
51 struct nvkm_notify *notify)
52{
53 union {
54 struct nvif_notify_uevent_req none;
55 } *req = data;
56 int ret;
57
58 if (nvif_unvers(req->none)) {
59 notify->size = sizeof(struct nvif_notify_uevent_rep);
60 notify->types = 1;
61 notify->index = 0;
62 }
63
64 return ret;
65}
66
67static const struct nvkm_event_func
68nvkm_sw_chan_event = {
69 .ctor = nvkm_sw_chan_event_ctor,
70};
71
72static void *
73nvkm_sw_chan_dtor(struct nvkm_object *object)
74{
75 struct nvkm_sw_chan *chan = nvkm_sw_chan(object);
76 struct nvkm_sw *sw = chan->sw;
77 unsigned long flags;
78 void *data = chan;
79
80 if (chan->func->dtor)
81 data = chan->func->dtor(chan);
82 nvkm_event_fini(&chan->event);
83
84 spin_lock_irqsave(&sw->engine.lock, flags);
85 list_del(&chan->head);
86 spin_unlock_irqrestore(&sw->engine.lock, flags);
87 return data;
88}
89
90static const struct nvkm_object_func
91nvkm_sw_chan = {
92 .dtor = nvkm_sw_chan_dtor,
93};
94
95int
96nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
97 struct nvkm_fifo_chan *fifo, const struct nvkm_oclass *oclass,
98 struct nvkm_sw_chan *chan)
99{
100 unsigned long flags;
101
102 nvkm_object_ctor(&nvkm_sw_chan, oclass, &chan->object);
103 chan->func = func;
104 chan->sw = sw;
105 chan->fifo = fifo;
106 spin_lock_irqsave(&sw->engine.lock, flags);
107 list_add(&chan->head, &sw->chan);
108 spin_unlock_irqrestore(&sw->engine.lock, flags);
109
110 return nvkm_event_init(&nvkm_sw_chan_event, 1, 1, &chan->event);
111}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
new file mode 100644
index 000000000000..6608bf6c6842
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -0,0 +1,26 @@
1#ifndef __NVKM_SW_CHAN_H__
2#define __NVKM_SW_CHAN_H__
3#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
4#include "priv.h"
5#include <core/event.h>
6
7struct nvkm_sw_chan {
8 const struct nvkm_sw_chan_func *func;
9 struct nvkm_object object;
10 struct nvkm_sw *sw;
11 struct nvkm_fifo_chan *fifo;
12 struct list_head head;
13
14 struct nvkm_event event;
15};
16
17struct nvkm_sw_chan_func {
18 void *(*dtor)(struct nvkm_sw_chan *);
19 bool (*mthd)(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
20};
21
22int nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *, struct nvkm_sw *,
23 struct nvkm_fifo_chan *, const struct nvkm_oclass *,
24 struct nvkm_sw_chan *);
25bool nvkm_sw_chan_mthd(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
26#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index 533d5d8ed363..b01ef7eca906 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -23,119 +23,133 @@
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25
26#include <core/gpuobj.h>
26#include <subdev/bar.h> 27#include <subdev/bar.h>
28#include <engine/disp.h>
29#include <engine/fifo.h>
30
31#include <nvif/event.h>
32#include <nvif/ioctl.h>
27 33
28/******************************************************************************* 34/*******************************************************************************
29 * software object classes 35 * software context
30 ******************************************************************************/ 36 ******************************************************************************/
31 37
32static int 38static int
33gf100_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd, 39gf100_sw_chan_vblsem_release(struct nvkm_notify *notify)
34 void *args, u32 size)
35{ 40{
36 struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent); 41 struct nv50_sw_chan *chan =
37 u64 data = *(u32 *)args; 42 container_of(notify, typeof(*chan), vblank.notify[notify->index]);
38 if (mthd == 0x0400) { 43 struct nvkm_sw *sw = chan->base.sw;
39 chan->vblank.offset &= 0x00ffffffffULL; 44 struct nvkm_device *device = sw->engine.subdev.device;
40 chan->vblank.offset |= data << 32; 45 u32 inst = chan->base.fifo->inst->addr >> 12;
41 } else { 46
42 chan->vblank.offset &= 0xff00000000ULL; 47 nvkm_wr32(device, 0x001718, 0x80000000 | inst);
43 chan->vblank.offset |= data; 48 nvkm_bar_flush(device->bar);
44 } 49 nvkm_wr32(device, 0x06000c, upper_32_bits(chan->vblank.offset));
45 return 0; 50 nvkm_wr32(device, 0x060010, lower_32_bits(chan->vblank.offset));
51 nvkm_wr32(device, 0x060014, chan->vblank.value);
52
53 return NVKM_NOTIFY_DROP;
46} 54}
47 55
48static int 56static bool
49gf100_sw_mthd_mp_control(struct nvkm_object *object, u32 mthd, 57gf100_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
50 void *args, u32 size)
51{ 58{
52 struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent); 59 struct nv50_sw_chan *chan = nv50_sw_chan(base);
53 struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine; 60 struct nvkm_engine *engine = chan->base.object.engine;
54 u32 data = *(u32 *)args; 61 struct nvkm_device *device = engine->subdev.device;
55
56 switch (mthd) { 62 switch (mthd) {
57 case 0x600: 63 case 0x0400:
58 nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */ 64 chan->vblank.offset &= 0x00ffffffffULL;
59 break; 65 chan->vblank.offset |= (u64)data << 32;
60 case 0x644: 66 return true;
61 if (data & ~0x1ffffe) 67 case 0x0404:
62 return -EINVAL; 68 chan->vblank.offset &= 0xff00000000ULL;
63 nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */ 69 chan->vblank.offset |= data;
70 return true;
71 case 0x0408:
72 chan->vblank.value = data;
73 return true;
74 case 0x040c:
75 if (data < device->disp->vblank.index_nr) {
76 nvkm_notify_get(&chan->vblank.notify[data]);
77 return true;
78 }
64 break; 79 break;
65 case 0x6ac: 80 case 0x600: /* MP.PM_UNK000 */
66 nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */ 81 nvkm_wr32(device, 0x419e00, data);
82 return true;
83 case 0x644: /* MP.TRAP_WARP_ERROR_EN */
84 if (!(data & ~0x001ffffe)) {
85 nvkm_wr32(device, 0x419e44, data);
86 return true;
87 }
67 break; 88 break;
89 case 0x6ac: /* MP.PM_UNK0AC */
90 nvkm_wr32(device, 0x419eac, data);
91 return true;
68 default: 92 default:
69 return -EINVAL; 93 break;
70 } 94 }
71 return 0; 95 return false;
72} 96}
73 97
74static struct nvkm_omthds 98static const struct nvkm_sw_chan_func
75gf100_sw_omthds[] = { 99gf100_sw_chan = {
76 { 0x0400, 0x0400, gf100_sw_mthd_vblsem_offset }, 100 .dtor = nv50_sw_chan_dtor,
77 { 0x0404, 0x0404, gf100_sw_mthd_vblsem_offset }, 101 .mthd = gf100_sw_chan_mthd,
78 { 0x0408, 0x0408, nv50_sw_mthd_vblsem_value },
79 { 0x040c, 0x040c, nv50_sw_mthd_vblsem_release },
80 { 0x0500, 0x0500, nv50_sw_mthd_flip },
81 { 0x0600, 0x0600, gf100_sw_mthd_mp_control },
82 { 0x0644, 0x0644, gf100_sw_mthd_mp_control },
83 { 0x06ac, 0x06ac, gf100_sw_mthd_mp_control },
84 {}
85}; 102};
86 103
87static struct nvkm_oclass
88gf100_sw_sclass[] = {
89 { 0x906e, &nvkm_object_ofuncs, gf100_sw_omthds },
90 {}
91};
92
93/*******************************************************************************
94 * software context
95 ******************************************************************************/
96
97static int 104static int
98gf100_sw_vblsem_release(struct nvkm_notify *notify) 105gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
106 const struct nvkm_oclass *oclass,
107 struct nvkm_object **pobject)
99{ 108{
100 struct nv50_sw_chan *chan = 109 struct nvkm_disp *disp = sw->engine.subdev.device->disp;
101 container_of(notify, typeof(*chan), vblank.notify[notify->index]); 110 struct nv50_sw_chan *chan;
102 struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine; 111 int ret, i;
103 struct nvkm_bar *bar = nvkm_bar(priv);
104 112
105 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); 113 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
106 bar->flush(bar); 114 return -ENOMEM;
107 nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset)); 115 *pobject = &chan->base.object;
108 nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
109 nv_wr32(priv, 0x060014, chan->vblank.value);
110 116
111 return NVKM_NOTIFY_DROP; 117 ret = nvkm_sw_chan_ctor(&gf100_sw_chan, sw, fifoch, oclass,
112} 118 &chan->base);
119 if (ret)
120 return ret;
113 121
114static struct nv50_sw_cclass 122 for (i = 0; disp && i < disp->vblank.index_nr; i++) {
115gf100_sw_cclass = { 123 ret = nvkm_notify_init(NULL, &disp->vblank,
116 .base.handle = NV_ENGCTX(SW, 0xc0), 124 gf100_sw_chan_vblsem_release, false,
117 .base.ofuncs = &(struct nvkm_ofuncs) { 125 &(struct nvif_notify_head_req_v0) {
118 .ctor = nv50_sw_context_ctor, 126 .head = i,
119 .dtor = nv50_sw_context_dtor, 127 },
120 .init = _nvkm_sw_context_init, 128 sizeof(struct nvif_notify_head_req_v0),
121 .fini = _nvkm_sw_context_fini, 129 sizeof(struct nvif_notify_head_rep_v0),
122 }, 130 &chan->vblank.notify[i]);
123 .vblank = gf100_sw_vblsem_release, 131 if (ret)
124}; 132 return ret;
133 }
134
135 return 0;
136}
125 137
126/******************************************************************************* 138/*******************************************************************************
127 * software engine/subdev functions 139 * software engine/subdev functions
128 ******************************************************************************/ 140 ******************************************************************************/
129 141
130struct nvkm_oclass * 142static const struct nvkm_sw_func
131gf100_sw_oclass = &(struct nv50_sw_oclass) { 143gf100_sw = {
132 .base.handle = NV_ENGINE(SW, 0xc0), 144 .chan_new = gf100_sw_chan_new,
133 .base.ofuncs = &(struct nvkm_ofuncs) { 145 .sclass = {
134 .ctor = nv50_sw_ctor, 146 { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_GF100 } },
135 .dtor = _nvkm_sw_dtor, 147 {}
136 .init = _nvkm_sw_init, 148 }
137 .fini = _nvkm_sw_fini, 149};
138 }, 150
139 .cclass = &gf100_sw_cclass.base, 151int
140 .sclass = gf100_sw_sclass, 152gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
141}.base; 153{
154 return nvkm_sw_new_(&gf100_sw, device, index, psw);
155}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index 897024421d36..445217ffa791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -21,15 +21,18 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/sw.h> 24#define nv04_sw_chan(p) container_of((p), struct nv04_sw_chan, base)
25#include <engine/fifo.h> 25#include "priv.h"
26#include "chan.h"
27#include "nvsw.h"
26 28
27struct nv04_sw_priv { 29#include <nvif/class.h>
28 struct nvkm_sw base; 30#include <nvif/ioctl.h>
29}; 31#include <nvif/unpack.h>
30 32
31struct nv04_sw_chan { 33struct nv04_sw_chan {
32 struct nvkm_sw_chan base; 34 struct nvkm_sw_chan base;
35 atomic_t ref;
33}; 36};
34 37
35/******************************************************************************* 38/*******************************************************************************
@@ -37,103 +40,99 @@ struct nv04_sw_chan {
37 ******************************************************************************/ 40 ******************************************************************************/
38 41
39static int 42static int
40nv04_sw_set_ref(struct nvkm_object *object, u32 mthd, void *data, u32 size) 43nv04_nvsw_mthd_get_ref(struct nvkm_nvsw *nvsw, void *data, u32 size)
41{ 44{
42 struct nvkm_object *channel = (void *)nv_engctx(object->parent); 45 struct nv04_sw_chan *chan = nv04_sw_chan(nvsw->chan);
43 struct nvkm_fifo_chan *fifo = (void *)channel->parent; 46 union {
44 atomic_set(&fifo->refcnt, *(u32*)data); 47 struct nv04_nvsw_get_ref_v0 v0;
45 return 0; 48 } *args = data;
49 int ret;
50
51 if (nvif_unpack(args->v0, 0, 0, false)) {
52 args->v0.ref = atomic_read(&chan->ref);
53 }
54
55 return ret;
46} 56}
47 57
48static int 58static int
49nv04_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size) 59nv04_nvsw_mthd(struct nvkm_nvsw *nvsw, u32 mthd, void *data, u32 size)
50{ 60{
51 struct nv04_sw_chan *chan = (void *)nv_engctx(object->parent); 61 switch (mthd) {
52 if (chan->base.flip) 62 case NV04_NVSW_GET_REF:
53 return chan->base.flip(chan->base.flip_data); 63 return nv04_nvsw_mthd_get_ref(nvsw, data, size);
64 default:
65 break;
66 }
54 return -EINVAL; 67 return -EINVAL;
55} 68}
56 69
57static struct nvkm_omthds 70static const struct nvkm_nvsw_func
58nv04_sw_omthds[] = { 71nv04_nvsw = {
59 { 0x0150, 0x0150, nv04_sw_set_ref }, 72 .mthd = nv04_nvsw_mthd,
60 { 0x0500, 0x0500, nv04_sw_flip },
61 {}
62}; 73};
63 74
64static struct nvkm_oclass 75static int
65nv04_sw_sclass[] = { 76nv04_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
66 { 0x006e, &nvkm_object_ofuncs, nv04_sw_omthds }, 77 void *data, u32 size, struct nvkm_object **pobject)
67 {} 78{
68}; 79 return nvkm_nvsw_new_(&nv04_nvsw, chan, oclass, data, size, pobject);
80}
69 81
70/******************************************************************************* 82/*******************************************************************************
71 * software context 83 * software context
72 ******************************************************************************/ 84 ******************************************************************************/
73 85
74static int 86static bool
75nv04_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 87nv04_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
76 struct nvkm_oclass *oclass, void *data, u32 size,
77 struct nvkm_object **pobject)
78{ 88{
79 struct nv04_sw_chan *chan; 89 struct nv04_sw_chan *chan = nv04_sw_chan(base);
80 int ret;
81 90
82 ret = nvkm_sw_context_create(parent, engine, oclass, &chan); 91 switch (mthd) {
83 *pobject = nv_object(chan); 92 case 0x0150:
84 if (ret) 93 atomic_set(&chan->ref, data);
85 return ret; 94 return true;
95 default:
96 break;
97 }
86 98
87 return 0; 99 return false;
88} 100}
89 101
90static struct nvkm_oclass 102static const struct nvkm_sw_chan_func
91nv04_sw_cclass = { 103nv04_sw_chan = {
92 .handle = NV_ENGCTX(SW, 0x04), 104 .mthd = nv04_sw_chan_mthd,
93 .ofuncs = &(struct nvkm_ofuncs) {
94 .ctor = nv04_sw_context_ctor,
95 .dtor = _nvkm_sw_context_dtor,
96 .init = _nvkm_sw_context_init,
97 .fini = _nvkm_sw_context_fini,
98 },
99}; 105};
100 106
101/*******************************************************************************
102 * software engine/subdev functions
103 ******************************************************************************/
104
105void
106nv04_sw_intr(struct nvkm_subdev *subdev)
107{
108 nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
109}
110
111static int 107static int
112nv04_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 108nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
113 struct nvkm_oclass *oclass, void *data, u32 size, 109 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
114 struct nvkm_object **pobject)
115{ 110{
116 struct nv04_sw_priv *priv; 111 struct nv04_sw_chan *chan;
117 int ret;
118 112
119 ret = nvkm_sw_create(parent, engine, oclass, &priv); 113 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
120 *pobject = nv_object(priv); 114 return -ENOMEM;
121 if (ret) 115 atomic_set(&chan->ref, 0);
122 return ret; 116 *pobject = &chan->base.object;
123 117
124 nv_engine(priv)->cclass = &nv04_sw_cclass; 118 return nvkm_sw_chan_ctor(&nv04_sw_chan, sw, fifo, oclass, &chan->base);
125 nv_engine(priv)->sclass = nv04_sw_sclass;
126 nv_subdev(priv)->intr = nv04_sw_intr;
127 return 0;
128} 119}
129 120
130struct nvkm_oclass * 121/*******************************************************************************
131nv04_sw_oclass = &(struct nvkm_oclass) { 122 * software engine/subdev functions
132 .handle = NV_ENGINE(SW, 0x04), 123 ******************************************************************************/
133 .ofuncs = &(struct nvkm_ofuncs) { 124
134 .ctor = nv04_sw_ctor, 125static const struct nvkm_sw_func
135 .dtor = _nvkm_sw_dtor, 126nv04_sw = {
136 .init = _nvkm_sw_init, 127 .chan_new = nv04_sw_chan_new,
137 .fini = _nvkm_sw_fini, 128 .sclass = {
138 }, 129 { nv04_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV04 } },
130 {}
131 }
139}; 132};
133
134int
135nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
136{
137 return nvkm_sw_new_(&nv04_sw, device, index, psw);
138}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index c61153a3fb8b..adf70d92b244 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -21,102 +21,48 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <engine/sw.h> 24#include "priv.h"
25#include "chan.h"
26#include "nvsw.h"
25 27
26struct nv10_sw_priv { 28#include <nvif/ioctl.h>
27 struct nvkm_sw base;
28};
29
30struct nv10_sw_chan {
31 struct nvkm_sw_chan base;
32};
33 29
34/******************************************************************************* 30/*******************************************************************************
35 * software object classes 31 * software context
36 ******************************************************************************/ 32 ******************************************************************************/
37 33
38static int 34static const struct nvkm_sw_chan_func
39nv10_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size) 35nv10_sw_chan = {
40{
41 struct nv10_sw_chan *chan = (void *)nv_engctx(object->parent);
42 if (chan->base.flip)
43 return chan->base.flip(chan->base.flip_data);
44 return -EINVAL;
45}
46
47static struct nvkm_omthds
48nv10_sw_omthds[] = {
49 { 0x0500, 0x0500, nv10_sw_flip },
50 {}
51};
52
53static struct nvkm_oclass
54nv10_sw_sclass[] = {
55 { 0x016e, &nvkm_object_ofuncs, nv10_sw_omthds },
56 {}
57}; 36};
58 37
59/*******************************************************************************
60 * software context
61 ******************************************************************************/
62
63static int 38static int
64nv10_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 39nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
65 struct nvkm_oclass *oclass, void *data, u32 size, 40 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
66 struct nvkm_object **pobject)
67{ 41{
68 struct nv10_sw_chan *chan; 42 struct nvkm_sw_chan *chan;
69 int ret;
70 43
71 ret = nvkm_sw_context_create(parent, engine, oclass, &chan); 44 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
72 *pobject = nv_object(chan); 45 return -ENOMEM;
73 if (ret) 46 *pobject = &chan->object;
74 return ret;
75 47
76 return 0; 48 return nvkm_sw_chan_ctor(&nv10_sw_chan, sw, fifo, oclass, chan);
77} 49}
78 50
79static struct nvkm_oclass
80nv10_sw_cclass = {
81 .handle = NV_ENGCTX(SW, 0x04),
82 .ofuncs = &(struct nvkm_ofuncs) {
83 .ctor = nv10_sw_context_ctor,
84 .dtor = _nvkm_sw_context_dtor,
85 .init = _nvkm_sw_context_init,
86 .fini = _nvkm_sw_context_fini,
87 },
88};
89
90/******************************************************************************* 51/*******************************************************************************
91 * software engine/subdev functions 52 * software engine/subdev functions
92 ******************************************************************************/ 53 ******************************************************************************/
93 54
94static int 55static const struct nvkm_sw_func
95nv10_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 56nv10_sw = {
96 struct nvkm_oclass *oclass, void *data, u32 size, 57 .chan_new = nv10_sw_chan_new,
97 struct nvkm_object **pobject) 58 .sclass = {
98{ 59 { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV10 } },
99 struct nv10_sw_priv *priv; 60 {}
100 int ret; 61 }
101 62};
102 ret = nvkm_sw_create(parent, engine, oclass, &priv);
103 *pobject = nv_object(priv);
104 if (ret)
105 return ret;
106 63
107 nv_engine(priv)->cclass = &nv10_sw_cclass; 64int
108 nv_engine(priv)->sclass = nv10_sw_sclass; 65nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
109 nv_subdev(priv)->intr = nv04_sw_intr; 66{
110 return 0; 67 return nvkm_sw_new_(&nv10_sw, device, index, psw);
111} 68}
112
113struct nvkm_oclass *
114nv10_sw_oclass = &(struct nvkm_oclass) {
115 .handle = NV_ENGINE(SW, 0x10),
116 .ofuncs = &(struct nvkm_ofuncs) {
117 .ctor = nv10_sw_ctor,
118 .dtor = _nvkm_sw_dtor,
119 .init = _nvkm_sw_init,
120 .fini = _nvkm_sw_fini,
121 },
122};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 401fcd73086b..a381196af69d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -23,153 +23,98 @@
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25
26#include <core/device.h> 26#include <core/gpuobj.h>
27#include <core/handle.h>
28#include <core/namedb.h>
29#include <engine/disp.h> 27#include <engine/disp.h>
28#include <engine/fifo/chan.h>
30#include <subdev/bar.h> 29#include <subdev/bar.h>
31 30
32#include <nvif/event.h> 31#include <nvif/event.h>
33 32#include <nvif/ioctl.h>
34/*******************************************************************************
35 * software object classes
36 ******************************************************************************/
37
38static int
39nv50_sw_mthd_dma_vblsem(struct nvkm_object *object, u32 mthd,
40 void *args, u32 size)
41{
42 struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
43 struct nvkm_fifo_chan *fifo = (void *)nv_object(chan)->parent;
44 struct nvkm_handle *handle;
45 int ret = -EINVAL;
46
47 handle = nvkm_namedb_get(nv_namedb(fifo), *(u32 *)args);
48 if (!handle)
49 return -ENOENT;
50
51 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
52 struct nvkm_gpuobj *gpuobj = nv_gpuobj(handle->object);
53 chan->vblank.ctxdma = gpuobj->node->offset >> 4;
54 ret = 0;
55 }
56 nvkm_namedb_put(handle);
57 return ret;
58}
59
60static int
61nv50_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
62 void *args, u32 size)
63{
64 struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
65 chan->vblank.offset = *(u32 *)args;
66 return 0;
67}
68
69int
70nv50_sw_mthd_vblsem_value(struct nvkm_object *object, u32 mthd,
71 void *args, u32 size)
72{
73 struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
74 chan->vblank.value = *(u32 *)args;
75 return 0;
76}
77
78int
79nv50_sw_mthd_vblsem_release(struct nvkm_object *object, u32 mthd,
80 void *args, u32 size)
81{
82 struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
83 u32 head = *(u32 *)args;
84 if (head >= nvkm_disp(chan)->vblank.index_nr)
85 return -EINVAL;
86
87 nvkm_notify_get(&chan->vblank.notify[head]);
88 return 0;
89}
90
91int
92nv50_sw_mthd_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
93{
94 struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
95 if (chan->base.flip)
96 return chan->base.flip(chan->base.flip_data);
97 return -EINVAL;
98}
99
100static struct nvkm_omthds
101nv50_sw_omthds[] = {
102 { 0x018c, 0x018c, nv50_sw_mthd_dma_vblsem },
103 { 0x0400, 0x0400, nv50_sw_mthd_vblsem_offset },
104 { 0x0404, 0x0404, nv50_sw_mthd_vblsem_value },
105 { 0x0408, 0x0408, nv50_sw_mthd_vblsem_release },
106 { 0x0500, 0x0500, nv50_sw_mthd_flip },
107 {}
108};
109
110static struct nvkm_oclass
111nv50_sw_sclass[] = {
112 { 0x506e, &nvkm_object_ofuncs, nv50_sw_omthds },
113 {}
114};
115 33
116/******************************************************************************* 34/*******************************************************************************
117 * software context 35 * software context
118 ******************************************************************************/ 36 ******************************************************************************/
119 37
120static int 38static int
121nv50_sw_vblsem_release(struct nvkm_notify *notify) 39nv50_sw_chan_vblsem_release(struct nvkm_notify *notify)
122{ 40{
123 struct nv50_sw_chan *chan = 41 struct nv50_sw_chan *chan =
124 container_of(notify, typeof(*chan), vblank.notify[notify->index]); 42 container_of(notify, typeof(*chan), vblank.notify[notify->index]);
125 struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine; 43 struct nvkm_sw *sw = chan->base.sw;
126 struct nvkm_bar *bar = nvkm_bar(priv); 44 struct nvkm_device *device = sw->engine.subdev.device;
127 45
128 nv_wr32(priv, 0x001704, chan->vblank.channel); 46 nvkm_wr32(device, 0x001704, chan->base.fifo->inst->addr >> 12);
129 nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); 47 nvkm_wr32(device, 0x001710, 0x80000000 | chan->vblank.ctxdma);
130 bar->flush(bar); 48 nvkm_bar_flush(device->bar);
131 49
132 if (nv_device(priv)->chipset == 0x50) { 50 if (device->chipset == 0x50) {
133 nv_wr32(priv, 0x001570, chan->vblank.offset); 51 nvkm_wr32(device, 0x001570, chan->vblank.offset);
134 nv_wr32(priv, 0x001574, chan->vblank.value); 52 nvkm_wr32(device, 0x001574, chan->vblank.value);
135 } else { 53 } else {
136 nv_wr32(priv, 0x060010, chan->vblank.offset); 54 nvkm_wr32(device, 0x060010, chan->vblank.offset);
137 nv_wr32(priv, 0x060014, chan->vblank.value); 55 nvkm_wr32(device, 0x060014, chan->vblank.value);
138 } 56 }
139 57
140 return NVKM_NOTIFY_DROP; 58 return NVKM_NOTIFY_DROP;
141} 59}
142 60
143void 61static bool
144nv50_sw_context_dtor(struct nvkm_object *object) 62nv50_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
145{ 63{
146 struct nv50_sw_chan *chan = (void *)object; 64 struct nv50_sw_chan *chan = nv50_sw_chan(base);
147 int i; 65 struct nvkm_engine *engine = chan->base.object.engine;
66 struct nvkm_device *device = engine->subdev.device;
67 switch (mthd) {
68 case 0x018c: chan->vblank.ctxdma = data; return true;
69 case 0x0400: chan->vblank.offset = data; return true;
70 case 0x0404: chan->vblank.value = data; return true;
71 case 0x0408:
72 if (data < device->disp->vblank.index_nr) {
73 nvkm_notify_get(&chan->vblank.notify[data]);
74 return true;
75 }
76 break;
77 default:
78 break;
79 }
80 return false;
81}
148 82
83void *
84nv50_sw_chan_dtor(struct nvkm_sw_chan *base)
85{
86 struct nv50_sw_chan *chan = nv50_sw_chan(base);
87 int i;
149 for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++) 88 for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
150 nvkm_notify_fini(&chan->vblank.notify[i]); 89 nvkm_notify_fini(&chan->vblank.notify[i]);
151 90 return chan;
152 nvkm_sw_context_destroy(&chan->base);
153} 91}
154 92
155int 93static const struct nvkm_sw_chan_func
156nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 94nv50_sw_chan = {
157 struct nvkm_oclass *oclass, void *data, u32 size, 95 .dtor = nv50_sw_chan_dtor,
158 struct nvkm_object **pobject) 96 .mthd = nv50_sw_chan_mthd,
97};
98
99static int
100nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
101 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
159{ 102{
160 struct nvkm_disp *pdisp = nvkm_disp(parent); 103 struct nvkm_disp *disp = sw->engine.subdev.device->disp;
161 struct nv50_sw_cclass *pclass = (void *)oclass;
162 struct nv50_sw_chan *chan; 104 struct nv50_sw_chan *chan;
163 int ret, i; 105 int ret, i;
164 106
165 ret = nvkm_sw_context_create(parent, engine, oclass, &chan); 107 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
166 *pobject = nv_object(chan); 108 return -ENOMEM;
109 *pobject = &chan->base.object;
110
111 ret = nvkm_sw_chan_ctor(&nv50_sw_chan, sw, fifoch, oclass, &chan->base);
167 if (ret) 112 if (ret)
168 return ret; 113 return ret;
169 114
170 for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) { 115 for (i = 0; disp && i < disp->vblank.index_nr; i++) {
171 ret = nvkm_notify_init(NULL, &pdisp->vblank, pclass->vblank, 116 ret = nvkm_notify_init(NULL, &disp->vblank,
172 false, 117 nv50_sw_chan_vblsem_release, false,
173 &(struct nvif_notify_head_req_v0) { 118 &(struct nvif_notify_head_req_v0) {
174 .head = i, 119 .head = i,
175 }, 120 },
@@ -180,55 +125,24 @@ nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
180 return ret; 125 return ret;
181 } 126 }
182 127
183 chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
184 return 0; 128 return 0;
185} 129}
186 130
187static struct nv50_sw_cclass
188nv50_sw_cclass = {
189 .base.handle = NV_ENGCTX(SW, 0x50),
190 .base.ofuncs = &(struct nvkm_ofuncs) {
191 .ctor = nv50_sw_context_ctor,
192 .dtor = nv50_sw_context_dtor,
193 .init = _nvkm_sw_context_init,
194 .fini = _nvkm_sw_context_fini,
195 },
196 .vblank = nv50_sw_vblsem_release,
197};
198
199/******************************************************************************* 131/*******************************************************************************
200 * software engine/subdev functions 132 * software engine/subdev functions
201 ******************************************************************************/ 133 ******************************************************************************/
202 134
135static const struct nvkm_sw_func
136nv50_sw = {
137 .chan_new = nv50_sw_chan_new,
138 .sclass = {
139 { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV50 } },
140 {}
141 }
142};
143
203int 144int
204nv50_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 145nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
205 struct nvkm_oclass *oclass, void *data, u32 size,
206 struct nvkm_object **pobject)
207{ 146{
208 struct nv50_sw_oclass *pclass = (void *)oclass; 147 return nvkm_sw_new_(&nv50_sw, device, index, psw);
209 struct nv50_sw_priv *priv;
210 int ret;
211
212 ret = nvkm_sw_create(parent, engine, oclass, &priv);
213 *pobject = nv_object(priv);
214 if (ret)
215 return ret;
216
217 nv_engine(priv)->cclass = pclass->cclass;
218 nv_engine(priv)->sclass = pclass->sclass;
219 nv_subdev(priv)->intr = nv04_sw_intr;
220 return 0;
221} 148}
222
223struct nvkm_oclass *
224nv50_sw_oclass = &(struct nv50_sw_oclass) {
225 .base.handle = NV_ENGINE(SW, 0x50),
226 .base.ofuncs = &(struct nvkm_ofuncs) {
227 .ctor = nv50_sw_ctor,
228 .dtor = _nvkm_sw_dtor,
229 .init = _nvkm_sw_init,
230 .fini = _nvkm_sw_fini,
231 },
232 .cclass = &nv50_sw_cclass.base,
233 .sclass = nv50_sw_sclass,
234}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
index d8adc1108467..25cdfdef2d46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -1,45 +1,20 @@
1#ifndef __NVKM_SW_NV50_H__ 1#ifndef __NVKM_SW_NV50_H__
2#define __NVKM_SW_NV50_H__ 2#define __NVKM_SW_NV50_H__
3#include <engine/sw.h> 3#define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
4#include "priv.h"
5#include "chan.h"
6#include "nvsw.h"
4#include <core/notify.h> 7#include <core/notify.h>
5 8
6struct nv50_sw_oclass {
7 struct nvkm_oclass base;
8 struct nvkm_oclass *cclass;
9 struct nvkm_oclass *sclass;
10};
11
12struct nv50_sw_priv {
13 struct nvkm_sw base;
14};
15
16int nv50_sw_ctor(struct nvkm_object *, struct nvkm_object *,
17 struct nvkm_oclass *, void *, u32,
18 struct nvkm_object **);
19
20struct nv50_sw_cclass {
21 struct nvkm_oclass base;
22 int (*vblank)(struct nvkm_notify *);
23};
24
25struct nv50_sw_chan { 9struct nv50_sw_chan {
26 struct nvkm_sw_chan base; 10 struct nvkm_sw_chan base;
27 struct { 11 struct {
28 struct nvkm_notify notify[4]; 12 struct nvkm_notify notify[4];
29 u32 channel;
30 u32 ctxdma; 13 u32 ctxdma;
31 u64 offset; 14 u64 offset;
32 u32 value; 15 u32 value;
33 } vblank; 16 } vblank;
34}; 17};
35 18
36int nv50_sw_context_ctor(struct nvkm_object *, 19void *nv50_sw_chan_dtor(struct nvkm_sw_chan *);
37 struct nvkm_object *,
38 struct nvkm_oclass *, void *, u32,
39 struct nvkm_object **);
40void nv50_sw_context_dtor(struct nvkm_object *);
41
42int nv50_sw_mthd_vblsem_value(struct nvkm_object *, u32, void *, u32);
43int nv50_sw_mthd_vblsem_release(struct nvkm_object *, u32, void *, u32);
44int nv50_sw_mthd_flip(struct nvkm_object *, u32, void *, u32);
45#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
new file mode 100644
index 000000000000..66cf986b9572
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "nvsw.h"
25#include "chan.h"
26
27#include <nvif/class.h>
28
29static int
30nvkm_nvsw_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
31{
32 struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
33 if (nvsw->func->mthd)
34 return nvsw->func->mthd(nvsw, mthd, data, size);
35 return -ENODEV;
36}
37
38static int
39nvkm_nvsw_ntfy_(struct nvkm_object *object, u32 mthd,
40 struct nvkm_event **pevent)
41{
42 struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
43 switch (mthd) {
44 case NVSW_NTFY_UEVENT:
45 *pevent = &nvsw->chan->event;
46 return 0;
47 default:
48 break;
49 }
50 return -EINVAL;
51}
52
53static const struct nvkm_object_func
54nvkm_nvsw_ = {
55 .mthd = nvkm_nvsw_mthd_,
56 .ntfy = nvkm_nvsw_ntfy_,
57};
58
59int
60nvkm_nvsw_new_(const struct nvkm_nvsw_func *func, struct nvkm_sw_chan *chan,
61 const struct nvkm_oclass *oclass, void *data, u32 size,
62 struct nvkm_object **pobject)
63{
64 struct nvkm_nvsw *nvsw;
65
66 if (!(nvsw = kzalloc(sizeof(*nvsw), GFP_KERNEL)))
67 return -ENOMEM;
68 *pobject = &nvsw->object;
69
70 nvkm_object_ctor(&nvkm_nvsw_, oclass, &nvsw->object);
71 nvsw->func = func;
72 nvsw->chan = chan;
73 return 0;
74}
75
76static const struct nvkm_nvsw_func
77nvkm_nvsw = {
78};
79
80int
81nvkm_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
82 void *data, u32 size, struct nvkm_object **pobject)
83{
84 return nvkm_nvsw_new_(&nvkm_nvsw, chan, oclass, data, size, pobject);
85}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
new file mode 100644
index 000000000000..943ef4c10091
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -0,0 +1,21 @@
1#ifndef __NVKM_NVSW_H__
2#define __NVKM_NVSW_H__
3#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
4#include "priv.h"
5
6struct nvkm_nvsw {
7 struct nvkm_object object;
8 const struct nvkm_nvsw_func *func;
9 struct nvkm_sw_chan *chan;
10};
11
12struct nvkm_nvsw_func {
13 int (*mthd)(struct nvkm_nvsw *, u32 mthd, void *data, u32 size);
14};
15
16int nvkm_nvsw_new_(const struct nvkm_nvsw_func *, struct nvkm_sw_chan *,
17 const struct nvkm_oclass *, void *data, u32 size,
18 struct nvkm_object **pobject);
19int nvkm_nvsw_new(struct nvkm_sw_chan *, const struct nvkm_oclass *,
20 void *data, u32 size, struct nvkm_object **pobject);
21#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
new file mode 100644
index 000000000000..0ef1318dc2fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -0,0 +1,21 @@
1#ifndef __NVKM_SW_PRIV_H__
2#define __NVKM_SW_PRIV_H__
3#define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
4#include <engine/sw.h>
5struct nvkm_sw_chan;
6
7int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *,
8 int index, struct nvkm_sw **);
9
10struct nvkm_sw_chan_sclass {
11 int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *,
12 void *data, u32 size, struct nvkm_object **);
13 struct nvkm_sclass base;
14};
15
16struct nvkm_sw_func {
17 int (*chan_new)(struct nvkm_sw *, struct nvkm_fifo_chan *,
18 const struct nvkm_oclass *, struct nvkm_object **);
19 const struct nvkm_sw_chan_sclass sclass[];
20};
21#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 45f4e186befc..4188c77ac927 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -22,72 +22,23 @@
22 * Authors: Ben Skeggs, Ilia Mirkin 22 * Authors: Ben Skeggs, Ilia Mirkin
23 */ 23 */
24#include <engine/vp.h> 24#include <engine/vp.h>
25#include <engine/xtensa.h>
26 25
27#include <core/engctx.h> 26#include <nvif/class.h>
28 27
29/******************************************************************************* 28static const struct nvkm_xtensa_func
30 * VP object classes 29g84_vp = {
31 ******************************************************************************/ 30 .pmc_enable = 0x01020000,
32 31 .fifo_val = 0x111,
33static struct nvkm_oclass 32 .unkd28 = 0x9c544,
34g84_vp_sclass[] = { 33 .sclass = {
35 { 0x7476, &nvkm_object_ofuncs }, 34 { -1, -1, NV74_VP2 },
36 {}, 35 {}
37}; 36 }
38
39/*******************************************************************************
40 * PVP context
41 ******************************************************************************/
42
43static struct nvkm_oclass
44g84_vp_cclass = {
45 .handle = NV_ENGCTX(VP, 0x84),
46 .ofuncs = &(struct nvkm_ofuncs) {
47 .ctor = _nvkm_xtensa_engctx_ctor,
48 .dtor = _nvkm_engctx_dtor,
49 .init = _nvkm_engctx_init,
50 .fini = _nvkm_engctx_fini,
51 .rd32 = _nvkm_engctx_rd32,
52 .wr32 = _nvkm_engctx_wr32,
53 },
54}; 37};
55 38
56/******************************************************************************* 39int
57 * PVP engine/subdev functions 40g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
58 ******************************************************************************/
59
60static int
61g84_vp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
62 struct nvkm_oclass *oclass, void *data, u32 size,
63 struct nvkm_object **pobject)
64{ 41{
65 struct nvkm_xtensa *priv; 42 return nvkm_xtensa_new_(&g84_vp, device, index,
66 int ret; 43 true, 0x00f000, pengine);
67
68 ret = nvkm_xtensa_create(parent, engine, oclass, 0xf000, true,
69 "PVP", "vp", &priv);
70 *pobject = nv_object(priv);
71 if (ret)
72 return ret;
73
74 nv_subdev(priv)->unit = 0x01020000;
75 nv_engine(priv)->cclass = &g84_vp_cclass;
76 nv_engine(priv)->sclass = g84_vp_sclass;
77 priv->fifo_val = 0x111;
78 priv->unkd28 = 0x9c544;
79 return 0;
80} 44}
81
82struct nvkm_oclass
83g84_vp_oclass = {
84 .handle = NV_ENGINE(VP, 0x84),
85 .ofuncs = &(struct nvkm_ofuncs) {
86 .ctor = g84_vp_ctor,
87 .dtor = _nvkm_xtensa_dtor,
88 .init = _nvkm_xtensa_init,
89 .fini = _nvkm_xtensa_fini,
90 .rd32 = _nvkm_xtensa_rd32,
91 .wr32 = _nvkm_xtensa_wr32,
92 },
93};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index cea90df533d9..a3d4f5bcec7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -20,153 +20,173 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22#include <engine/xtensa.h> 22#include <engine/xtensa.h>
23#include <core/device.h>
24 23
25#include <core/engctx.h> 24#include <core/gpuobj.h>
25#include <engine/fifo.h>
26 26
27u32 27static int
28_nvkm_xtensa_rd32(struct nvkm_object *object, u64 addr) 28nvkm_xtensa_oclass_get(struct nvkm_oclass *oclass, int index)
29{ 29{
30 struct nvkm_xtensa *xtensa = (void *)object; 30 struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine);
31 return nv_rd32(xtensa, xtensa->addr + addr); 31 int c = 0;
32}
33 32
34void 33 while (xtensa->func->sclass[c].oclass) {
35_nvkm_xtensa_wr32(struct nvkm_object *object, u64 addr, u32 data) 34 if (c++ == index) {
36{ 35 oclass->base = xtensa->func->sclass[index];
37 struct nvkm_xtensa *xtensa = (void *)object; 36 return index;
38 nv_wr32(xtensa, xtensa->addr + addr, data); 37 }
38 }
39
40 return c;
39} 41}
40 42
41int 43static int
42_nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 44nvkm_xtensa_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
43 struct nvkm_oclass *oclass, void *data, u32 size, 45 int align, struct nvkm_gpuobj **pgpuobj)
44 struct nvkm_object **pobject)
45{ 46{
46 struct nvkm_engctx *engctx; 47 return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align,
47 int ret; 48 true, parent, pgpuobj);
48
49 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000,
50 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
51 *pobject = nv_object(engctx);
52 return ret;
53} 49}
54 50
55void 51static const struct nvkm_object_func
56_nvkm_xtensa_intr(struct nvkm_subdev *subdev) 52nvkm_xtensa_cclass = {
53 .bind = nvkm_xtensa_cclass_bind,
54};
55
56static void
57nvkm_xtensa_intr(struct nvkm_engine *engine)
57{ 58{
58 struct nvkm_xtensa *xtensa = (void *)subdev; 59 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
59 u32 unk104 = nv_ro32(xtensa, 0xd04); 60 struct nvkm_subdev *subdev = &xtensa->engine.subdev;
60 u32 intr = nv_ro32(xtensa, 0xc20); 61 struct nvkm_device *device = subdev->device;
61 u32 chan = nv_ro32(xtensa, 0xc28); 62 const u32 base = xtensa->addr;
62 u32 unk10c = nv_ro32(xtensa, 0xd0c); 63 u32 unk104 = nvkm_rd32(device, base + 0xd04);
64 u32 intr = nvkm_rd32(device, base + 0xc20);
65 u32 chan = nvkm_rd32(device, base + 0xc28);
66 u32 unk10c = nvkm_rd32(device, base + 0xd0c);
63 67
64 if (intr & 0x10) 68 if (intr & 0x10)
65 nv_warn(xtensa, "Watchdog interrupt, engine hung.\n"); 69 nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n");
66 nv_wo32(xtensa, 0xc20, intr); 70 nvkm_wr32(device, base + 0xc20, intr);
67 intr = nv_ro32(xtensa, 0xc20); 71 intr = nvkm_rd32(device, base + 0xc20);
68 if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) { 72 if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
69 nv_debug(xtensa, "Enabling FIFO_CTRL\n"); 73 nvkm_debug(subdev, "Enabling FIFO_CTRL\n");
70 nv_mask(xtensa, xtensa->addr + 0xd94, 0, xtensa->fifo_val); 74 nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val);
71 } 75 }
72} 76}
73 77
74int 78static int
75nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine, 79nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
76 struct nvkm_oclass *oclass, u32 addr, bool enable,
77 const char *iname, const char *fname,
78 int length, void **pobject)
79{ 80{
80 struct nvkm_xtensa *xtensa; 81 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
81 int ret; 82 struct nvkm_device *device = xtensa->engine.subdev.device;
83 const u32 base = xtensa->addr;
82 84
83 ret = nvkm_engine_create_(parent, engine, oclass, enable, iname, 85 nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */
84 fname, length, pobject); 86 nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
85 xtensa = *pobject;
86 if (ret)
87 return ret;
88 87
89 nv_subdev(xtensa)->intr = _nvkm_xtensa_intr; 88 if (!suspend)
90 xtensa->addr = addr; 89 nvkm_memory_del(&xtensa->gpu_fw);
91 return 0; 90 return 0;
92} 91}
93 92
94int 93static int
95_nvkm_xtensa_init(struct nvkm_object *object) 94nvkm_xtensa_init(struct nvkm_engine *engine)
96{ 95{
97 struct nvkm_device *device = nv_device(object); 96 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
98 struct nvkm_xtensa *xtensa = (void *)object; 97 struct nvkm_subdev *subdev = &xtensa->engine.subdev;
98 struct nvkm_device *device = subdev->device;
99 const u32 base = xtensa->addr;
99 const struct firmware *fw; 100 const struct firmware *fw;
100 char name[32]; 101 char name[32];
101 int i, ret; 102 int i, ret;
103 u64 addr, size;
102 u32 tmp; 104 u32 tmp;
103 105
104 ret = nvkm_engine_init(&xtensa->base);
105 if (ret)
106 return ret;
107
108 if (!xtensa->gpu_fw) { 106 if (!xtensa->gpu_fw) {
109 snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x", 107 snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
110 xtensa->addr >> 12); 108 xtensa->addr >> 12);
111 109
112 ret = request_firmware(&fw, name, nv_device_base(device)); 110 ret = request_firmware(&fw, name, device->dev);
113 if (ret) { 111 if (ret) {
114 nv_warn(xtensa, "unable to load firmware %s\n", name); 112 nvkm_warn(subdev, "unable to load firmware %s\n", name);
115 return ret; 113 return ret;
116 } 114 }
117 115
118 if (fw->size > 0x40000) { 116 if (fw->size > 0x40000) {
119 nv_warn(xtensa, "firmware %s too large\n", name); 117 nvkm_warn(subdev, "firmware %s too large\n", name);
120 release_firmware(fw); 118 release_firmware(fw);
121 return -EINVAL; 119 return -EINVAL;
122 } 120 }
123 121
124 ret = nvkm_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, 122 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
123 0x40000, 0x1000, false,
125 &xtensa->gpu_fw); 124 &xtensa->gpu_fw);
126 if (ret) { 125 if (ret) {
127 release_firmware(fw); 126 release_firmware(fw);
128 return ret; 127 return ret;
129 } 128 }
130 129
131 nv_debug(xtensa, "Loading firmware to address: 0x%llx\n", 130 nvkm_kmap(xtensa->gpu_fw);
132 xtensa->gpu_fw->addr);
133
134 for (i = 0; i < fw->size / 4; i++) 131 for (i = 0; i < fw->size / 4; i++)
135 nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i)); 132 nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
133 nvkm_done(xtensa->gpu_fw);
136 release_firmware(fw); 134 release_firmware(fw);
137 } 135 }
138 136
139 nv_wo32(xtensa, 0xd10, 0x1fffffff); /* ?? */ 137 addr = nvkm_memory_addr(xtensa->gpu_fw);
140 nv_wo32(xtensa, 0xd08, 0x0fffffff); /* ?? */ 138 size = nvkm_memory_size(xtensa->gpu_fw);
141 139
142 nv_wo32(xtensa, 0xd28, xtensa->unkd28); /* ?? */ 140 nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */
143 nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */ 141 nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */
144 nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
145 142
146 nv_wo32(xtensa, 0xcc0, xtensa->gpu_fw->addr >> 8); /* XT_REGION_BASE */ 143 nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */
147 nv_wo32(xtensa, 0xcc4, 0x1c); /* XT_REGION_SETUP */ 144 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
148 nv_wo32(xtensa, 0xcc8, xtensa->gpu_fw->size >> 8); /* XT_REGION_LIMIT */ 145 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
149 146
150 tmp = nv_rd32(xtensa, 0x0); 147 nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */
151 nv_wo32(xtensa, 0xde0, tmp); /* SCRATCH_H2X */ 148 nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */
149 nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */
152 150
153 nv_wo32(xtensa, 0xce8, 0xf); /* XT_REGION_SETUP */ 151 tmp = nvkm_rd32(device, 0x0);
152 nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */
154 153
155 nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */ 154 nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */
156 nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */ 155
156 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
157 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
157 return 0; 158 return 0;
158} 159}
159 160
160int 161static void *
161_nvkm_xtensa_fini(struct nvkm_object *object, bool suspend) 162nvkm_xtensa_dtor(struct nvkm_engine *engine)
162{ 163{
163 struct nvkm_xtensa *xtensa = (void *)object; 164 return nvkm_xtensa(engine);
165}
164 166
165 nv_wo32(xtensa, 0xd84, 0); /* INTR_EN */ 167static const struct nvkm_engine_func
166 nv_wo32(xtensa, 0xd94, 0); /* FIFO_CTRL */ 168nvkm_xtensa = {
169 .dtor = nvkm_xtensa_dtor,
170 .init = nvkm_xtensa_init,
171 .fini = nvkm_xtensa_fini,
172 .intr = nvkm_xtensa_intr,
173 .fifo.sclass = nvkm_xtensa_oclass_get,
174 .cclass = &nvkm_xtensa_cclass,
175};
167 176
168 if (!suspend) 177int
169 nvkm_gpuobj_ref(NULL, &xtensa->gpu_fw); 178nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
179 struct nvkm_device *device, int index, bool enable,
180 u32 addr, struct nvkm_engine **pengine)
181{
182 struct nvkm_xtensa *xtensa;
183
184 if (!(xtensa = kzalloc(sizeof(*xtensa), GFP_KERNEL)))
185 return -ENOMEM;
186 xtensa->func = func;
187 xtensa->addr = addr;
188 *pengine = &xtensa->engine;
170 189
171 return nvkm_engine_fini(&xtensa->base, suspend); 190 return nvkm_engine_ctor(&nvkm_xtensa, device, index, func->pmc_enable,
191 enable, &xtensa->engine);
172} 192}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index a1bb3e48739c..ee2c38f50ef5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -13,6 +13,7 @@ include $(src)/nvkm/subdev/ltc/Kbuild
13include $(src)/nvkm/subdev/mc/Kbuild 13include $(src)/nvkm/subdev/mc/Kbuild
14include $(src)/nvkm/subdev/mmu/Kbuild 14include $(src)/nvkm/subdev/mmu/Kbuild
15include $(src)/nvkm/subdev/mxm/Kbuild 15include $(src)/nvkm/subdev/mxm/Kbuild
16include $(src)/nvkm/subdev/pci/Kbuild
16include $(src)/nvkm/subdev/pmu/Kbuild 17include $(src)/nvkm/subdev/pmu/Kbuild
17include $(src)/nvkm/subdev/therm/Kbuild 18include $(src)/nvkm/subdev/therm/Kbuild
18include $(src)/nvkm/subdev/timer/Kbuild 19include $(src)/nvkm/subdev/timer/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 1ab554a0b5e0..1e138b337955 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -1,4 +1,5 @@
1nvkm-y += nvkm/subdev/bar/base.o 1nvkm-y += nvkm/subdev/bar/base.o
2nvkm-y += nvkm/subdev/bar/nv50.o 2nvkm-y += nvkm/subdev/bar/nv50.o
3nvkm-y += nvkm/subdev/bar/g84.o
3nvkm-y += nvkm/subdev/bar/gf100.o 4nvkm-y += nvkm/subdev/bar/gf100.o
4nvkm-y += nvkm/subdev/bar/gk20a.o 5nvkm-y += nvkm/subdev/bar/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 3502d00122ef..a9433ad45b1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -23,122 +23,61 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/device.h> 26void
27#include <subdev/fb.h> 27nvkm_bar_flush(struct nvkm_bar *bar)
28#include <subdev/mmu.h>
29
30struct nvkm_barobj {
31 struct nvkm_object base;
32 struct nvkm_vma vma;
33 void __iomem *iomem;
34};
35
36static int
37nvkm_barobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
38 struct nvkm_oclass *oclass, void *data, u32 size,
39 struct nvkm_object **pobject)
40{ 28{
41 struct nvkm_device *device = nv_device(parent); 29 if (bar && bar->func->flush)
42 struct nvkm_bar *bar = nvkm_bar(device); 30 bar->func->flush(bar);
43 struct nvkm_mem *mem = data;
44 struct nvkm_barobj *barobj;
45 int ret;
46
47 ret = nvkm_object_create(parent, engine, oclass, 0, &barobj);
48 *pobject = nv_object(barobj);
49 if (ret)
50 return ret;
51
52 ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
53 if (ret)
54 return ret;
55
56 barobj->iomem = ioremap(nv_device_resource_start(device, 3) +
57 (u32)barobj->vma.offset, mem->size << 12);
58 if (!barobj->iomem) {
59 nv_warn(bar, "PRAMIN ioremap failed\n");
60 return -ENOMEM;
61 }
62
63 return 0;
64} 31}
65 32
66static void 33struct nvkm_vm *
67nvkm_barobj_dtor(struct nvkm_object *object) 34nvkm_bar_kmap(struct nvkm_bar *bar)
68{ 35{
69 struct nvkm_bar *bar = nvkm_bar(object); 36 /* disallow kmap() until after vm has been bootstrapped */
70 struct nvkm_barobj *barobj = (void *)object; 37 if (bar && bar->func->kmap && bar->subdev.oneinit)
71 if (barobj->vma.node) { 38 return bar->func->kmap(bar);
72 if (barobj->iomem) 39 return NULL;
73 iounmap(barobj->iomem);
74 bar->unmap(bar, &barobj->vma);
75 }
76 nvkm_object_destroy(&barobj->base);
77} 40}
78 41
79static u32 42int
80nvkm_barobj_rd32(struct nvkm_object *object, u64 addr) 43nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
81{ 44{
82 struct nvkm_barobj *barobj = (void *)object; 45 return bar->func->umap(bar, size, type, vma);
83 return ioread32_native(barobj->iomem + addr);
84} 46}
85 47
86static void 48static int
87nvkm_barobj_wr32(struct nvkm_object *object, u64 addr, u32 data) 49nvkm_bar_oneinit(struct nvkm_subdev *subdev)
88{ 50{
89 struct nvkm_barobj *barobj = (void *)object; 51 struct nvkm_bar *bar = nvkm_bar(subdev);
90 iowrite32_native(data, barobj->iomem + addr); 52 return bar->func->oneinit(bar);
91} 53}
92 54
93static struct nvkm_oclass 55static int
94nvkm_barobj_oclass = { 56nvkm_bar_init(struct nvkm_subdev *subdev)
95 .ofuncs = &(struct nvkm_ofuncs) {
96 .ctor = nvkm_barobj_ctor,
97 .dtor = nvkm_barobj_dtor,
98 .init = nvkm_object_init,
99 .fini = nvkm_object_fini,
100 .rd32 = nvkm_barobj_rd32,
101 .wr32 = nvkm_barobj_wr32,
102 },
103};
104
105int
106nvkm_bar_alloc(struct nvkm_bar *bar, struct nvkm_object *parent,
107 struct nvkm_mem *mem, struct nvkm_object **pobject)
108{ 57{
109 struct nvkm_object *gpuobj; 58 struct nvkm_bar *bar = nvkm_bar(subdev);
110 int ret = nvkm_object_ctor(parent, &parent->engine->subdev.object, 59 return bar->func->init(bar);
111 &nvkm_barobj_oclass, mem, 0, &gpuobj);
112 if (ret == 0)
113 *pobject = gpuobj;
114 return ret;
115} 60}
116 61
117int 62static void *
118nvkm_bar_create_(struct nvkm_object *parent, struct nvkm_object *engine, 63nvkm_bar_dtor(struct nvkm_subdev *subdev)
119 struct nvkm_oclass *oclass, int length, void **pobject)
120{ 64{
121 struct nvkm_bar *bar; 65 struct nvkm_bar *bar = nvkm_bar(subdev);
122 int ret; 66 return bar->func->dtor(bar);
123
124 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "BARCTL",
125 "bar", length, pobject);
126 bar = *pobject;
127 if (ret)
128 return ret;
129
130 return 0;
131} 67}
132 68
133void 69static const struct nvkm_subdev_func
134nvkm_bar_destroy(struct nvkm_bar *bar) 70nvkm_bar = {
135{ 71 .dtor = nvkm_bar_dtor,
136 nvkm_subdev_destroy(&bar->base); 72 .oneinit = nvkm_bar_oneinit,
137} 73 .init = nvkm_bar_init,
74};
138 75
139void 76void
140_nvkm_bar_dtor(struct nvkm_object *object) 77nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
78 int index, struct nvkm_bar *bar)
141{ 79{
142 struct nvkm_bar *bar = (void *)object; 80 nvkm_subdev_ctor(&nvkm_bar, device, index, 0, &bar->subdev);
143 nvkm_bar_destroy(bar); 81 bar->func = func;
82 spin_lock_init(&bar->lock);
144} 83}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
new file mode 100644
index 000000000000..ef717136c838
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "nv50.h"
25
26#include <subdev/timer.h>
27
28void
29g84_bar_flush(struct nvkm_bar *bar)
30{
31 struct nvkm_device *device = bar->subdev.device;
32 unsigned long flags;
33 spin_lock_irqsave(&bar->lock, flags);
34 nvkm_wr32(device, 0x070000, 0x00000001);
35 nvkm_msec(device, 2000,
36 if (!(nvkm_rd32(device, 0x070000) & 0x00000002))
37 break;
38 );
39 spin_unlock_irqrestore(&bar->lock, flags);
40}
41
42static const struct nvkm_bar_func
43g84_bar_func = {
44 .dtor = nv50_bar_dtor,
45 .oneinit = nv50_bar_oneinit,
46 .init = nv50_bar_init,
47 .kmap = nv50_bar_kmap,
48 .umap = nv50_bar_umap,
49 .flush = g84_bar_flush,
50};
51
52int
53g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
54{
55 return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar);
56}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 12a1aebd9a96..c794b2c2d21e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -21,101 +21,60 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "gf100.h"
25 25
26#include <core/device.h>
27#include <core/gpuobj.h> 26#include <core/gpuobj.h>
28#include <subdev/fb.h> 27#include <subdev/fb.h>
29#include <subdev/mmu.h> 28#include <subdev/mmu.h>
30 29
31struct gf100_bar_priv_vm { 30static struct nvkm_vm *
32 struct nvkm_gpuobj *mem; 31gf100_bar_kmap(struct nvkm_bar *base)
33 struct nvkm_gpuobj *pgd;
34 struct nvkm_vm *vm;
35};
36
37struct gf100_bar_priv {
38 struct nvkm_bar base;
39 spinlock_t lock;
40 struct gf100_bar_priv_vm bar[2];
41};
42
43static int
44gf100_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
45 struct nvkm_vma *vma)
46{
47 struct gf100_bar_priv *priv = (void *)bar;
48 int ret;
49
50 ret = nvkm_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
51 if (ret)
52 return ret;
53
54 nvkm_vm_map(vma, mem);
55 return 0;
56}
57
58static int
59gf100_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
60 struct nvkm_vma *vma)
61{ 32{
62 struct gf100_bar_priv *priv = (void *)bar; 33 return gf100_bar(base)->bar[0].vm;
63 int ret;
64
65 ret = nvkm_vm_get(priv->bar[1].vm, mem->size << 12,
66 mem->page_shift, flags, vma);
67 if (ret)
68 return ret;
69
70 nvkm_vm_map(vma, mem);
71 return 0;
72} 34}
73 35
74static void 36int
75gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma) 37gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
76{ 38{
77 nvkm_vm_unmap(vma); 39 struct gf100_bar *bar = gf100_bar(base);
78 nvkm_vm_put(vma); 40 return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
79} 41}
80 42
81static int 43static int
82gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm, 44gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
83 int bar_nr) 45 struct lock_class_key *key, int bar_nr)
84{ 46{
85 struct nvkm_device *device = nv_device(&priv->base); 47 struct nvkm_device *device = bar->base.subdev.device;
86 struct nvkm_vm *vm; 48 struct nvkm_vm *vm;
87 resource_size_t bar_len; 49 resource_size_t bar_len;
88 int ret; 50 int ret;
89 51
90 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0, 52 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
91 &bar_vm->mem); 53 &bar_vm->mem);
92 if (ret) 54 if (ret)
93 return ret; 55 return ret;
94 56
95 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0, 57 ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
96 &bar_vm->pgd);
97 if (ret) 58 if (ret)
98 return ret; 59 return ret;
99 60
100 bar_len = nv_device_resource_len(device, bar_nr); 61 bar_len = device->func->resource_size(device, bar_nr);
101 62
102 ret = nvkm_vm_new(device, 0, bar_len, 0, &vm); 63 ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
103 if (ret) 64 if (ret)
104 return ret; 65 return ret;
105 66
106 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 67 atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
107 68
108 /* 69 /*
109 * Bootstrap page table lookup. 70 * Bootstrap page table lookup.
110 */ 71 */
111 if (bar_nr == 3) { 72 if (bar_nr == 3) {
112 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 73 ret = nvkm_vm_boot(vm, bar_len);
113 (bar_len >> 12) * 8, 0x1000, 74 if (ret) {
114 NVOBJ_FLAG_ZERO_ALLOC, 75 nvkm_vm_ref(NULL, &vm, NULL);
115 &vm->pgt[0].obj[0]);
116 vm->pgt[0].refcount[0] = 1;
117 if (ret)
118 return ret; 76 return ret;
77 }
119 } 78 }
120 79
121 ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd); 80 ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
@@ -123,97 +82,101 @@ gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
123 if (ret) 82 if (ret)
124 return ret; 83 return ret;
125 84
126 nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr)); 85 nvkm_kmap(bar_vm->mem);
127 nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr)); 86 nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
128 nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1)); 87 nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
129 nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1)); 88 nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
89 nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
90 nvkm_done(bar_vm->mem);
130 return 0; 91 return 0;
131} 92}
132 93
133int 94int
134gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 95gf100_bar_oneinit(struct nvkm_bar *base)
135 struct nvkm_oclass *oclass, void *data, u32 size,
136 struct nvkm_object **pobject)
137{ 96{
138 struct nvkm_device *device = nv_device(parent); 97 static struct lock_class_key bar1_lock;
139 struct gf100_bar_priv *priv; 98 static struct lock_class_key bar3_lock;
140 bool has_bar3 = nv_device_resource_len(device, 3) != 0; 99 struct gf100_bar *bar = gf100_bar(base);
141 int ret; 100 int ret;
142 101
143 ret = nvkm_bar_create(parent, engine, oclass, &priv);
144 *pobject = nv_object(priv);
145 if (ret)
146 return ret;
147
148 /* BAR3 */ 102 /* BAR3 */
149 if (has_bar3) { 103 if (bar->base.func->kmap) {
150 ret = gf100_bar_ctor_vm(priv, &priv->bar[0], 3); 104 ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
151 if (ret) 105 if (ret)
152 return ret; 106 return ret;
153 } 107 }
154 108
155 /* BAR1 */ 109 /* BAR1 */
156 ret = gf100_bar_ctor_vm(priv, &priv->bar[1], 1); 110 ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
157 if (ret) 111 if (ret)
158 return ret; 112 return ret;
159 113
160 if (has_bar3) {
161 priv->base.alloc = nvkm_bar_alloc;
162 priv->base.kmap = gf100_bar_kmap;
163 }
164 priv->base.umap = gf100_bar_umap;
165 priv->base.unmap = gf100_bar_unmap;
166 priv->base.flush = g84_bar_flush;
167 spin_lock_init(&priv->lock);
168 return 0; 114 return 0;
169} 115}
170 116
171void 117int
172gf100_bar_dtor(struct nvkm_object *object) 118gf100_bar_init(struct nvkm_bar *base)
173{ 119{
174 struct gf100_bar_priv *priv = (void *)object; 120 struct gf100_bar *bar = gf100_bar(base);
121 struct nvkm_device *device = bar->base.subdev.device;
122 u32 addr;
123
124 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
125 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
175 126
176 nvkm_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd); 127 addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
177 nvkm_gpuobj_ref(NULL, &priv->bar[1].pgd); 128 nvkm_wr32(device, 0x001704, 0x80000000 | addr);
178 nvkm_gpuobj_ref(NULL, &priv->bar[1].mem);
179 129
180 if (priv->bar[0].vm) { 130 if (bar->bar[0].mem) {
181 nvkm_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]); 131 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
182 nvkm_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd); 132 nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
183 } 133 }
184 nvkm_gpuobj_ref(NULL, &priv->bar[0].pgd);
185 nvkm_gpuobj_ref(NULL, &priv->bar[0].mem);
186 134
187 nvkm_bar_destroy(&priv->base); 135 return 0;
188} 136}
189 137
190int 138void *
191gf100_bar_init(struct nvkm_object *object) 139gf100_bar_dtor(struct nvkm_bar *base)
192{ 140{
193 struct gf100_bar_priv *priv = (void *)object; 141 struct gf100_bar *bar = gf100_bar(base);
194 int ret;
195 142
196 ret = nvkm_bar_init(&priv->base); 143 nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
197 if (ret) 144 nvkm_gpuobj_del(&bar->bar[1].pgd);
198 return ret; 145 nvkm_memory_del(&bar->bar[1].mem);
199 146
200 nv_mask(priv, 0x000200, 0x00000100, 0x00000000); 147 if (bar->bar[0].vm) {
201 nv_mask(priv, 0x000200, 0x00000100, 0x00000100); 148 nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
149 nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
150 }
151 nvkm_gpuobj_del(&bar->bar[0].pgd);
152 nvkm_memory_del(&bar->bar[0].mem);
153 return bar;
154}
202 155
203 nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12); 156int
204 if (priv->bar[0].mem) 157gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
205 nv_wr32(priv, 0x001714, 158 int index, struct nvkm_bar **pbar)
206 0xc0000000 | priv->bar[0].mem->addr >> 12); 159{
160 struct gf100_bar *bar;
161 if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
162 return -ENOMEM;
163 nvkm_bar_ctor(func, device, index, &bar->base);
164 *pbar = &bar->base;
207 return 0; 165 return 0;
208} 166}
209 167
210struct nvkm_oclass 168static const struct nvkm_bar_func
211gf100_bar_oclass = { 169gf100_bar_func = {
212 .handle = NV_SUBDEV(BAR, 0xc0), 170 .dtor = gf100_bar_dtor,
213 .ofuncs = &(struct nvkm_ofuncs) { 171 .oneinit = gf100_bar_oneinit,
214 .ctor = gf100_bar_ctor, 172 .init = gf100_bar_init,
215 .dtor = gf100_bar_dtor, 173 .kmap = gf100_bar_kmap,
216 .init = gf100_bar_init, 174 .umap = gf100_bar_umap,
217 .fini = _nvkm_bar_fini, 175 .flush = g84_bar_flush,
218 },
219}; 176};
177
178int
179gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
180{
181 return gf100_bar_new_(&gf100_bar_func, device, index, pbar);
182}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
new file mode 100644
index 000000000000..f7dea69640d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -0,0 +1,23 @@
1#ifndef __GF100_BAR_H__
2#define __GF100_BAR_H__
3#define gf100_bar(p) container_of((p), struct gf100_bar, base)
4#include "priv.h"
5
6struct gf100_bar_vm {
7 struct nvkm_memory *mem;
8 struct nvkm_gpuobj *pgd;
9 struct nvkm_vm *vm;
10};
11
12struct gf100_bar {
13 struct nvkm_bar base;
14 struct gf100_bar_vm bar[2];
15};
16
17int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
18 int, struct nvkm_bar **);
19void *gf100_bar_dtor(struct nvkm_bar *);
20int gf100_bar_oneinit(struct nvkm_bar *);
21int gf100_bar_init(struct nvkm_bar *);
22int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
23#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 148f739a276e..9232fab4274c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -19,32 +19,22 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22#include "priv.h" 22#include "gf100.h"
23
24static const struct nvkm_bar_func
25gk20a_bar_func = {
26 .dtor = gf100_bar_dtor,
27 .oneinit = gf100_bar_oneinit,
28 .init = gf100_bar_init,
29 .umap = gf100_bar_umap,
30 .flush = g84_bar_flush,
31};
23 32
24int 33int
25gk20a_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 34gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
26 struct nvkm_oclass *oclass, void *data, u32 size,
27 struct nvkm_object **pobject)
28{ 35{
29 struct nvkm_bar *bar; 36 int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar);
30 int ret; 37 if (ret == 0)
31 38 (*pbar)->iomap_uncached = true;
32 ret = gf100_bar_ctor(parent, engine, oclass, data, size, pobject); 39 return ret;
33 if (ret)
34 return ret;
35
36 bar = (struct nvkm_bar *)*pobject;
37 bar->iomap_uncached = true;
38 return 0;
39} 40}
40
41struct nvkm_oclass
42gk20a_bar_oclass = {
43 .handle = NV_SUBDEV(BAR, 0xea),
44 .ofuncs = &(struct nvkm_ofuncs) {
45 .ctor = gk20a_bar_ctor,
46 .dtor = gf100_bar_dtor,
47 .init = gf100_bar_init,
48 .fini = _nvkm_bar_fini,
49 },
50};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 8548adb91dcc..370dcd8ff7b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -21,251 +21,196 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "nv50.h"
25 25
26#include <core/device.h>
27#include <core/gpuobj.h> 26#include <core/gpuobj.h>
28#include <subdev/fb.h> 27#include <subdev/fb.h>
29#include <subdev/mmu.h> 28#include <subdev/mmu.h>
30#include <subdev/timer.h> 29#include <subdev/timer.h>
31 30
32struct nv50_bar_priv { 31struct nvkm_vm *
33 struct nvkm_bar base; 32nv50_bar_kmap(struct nvkm_bar *base)
34 spinlock_t lock;
35 struct nvkm_gpuobj *mem;
36 struct nvkm_gpuobj *pad;
37 struct nvkm_gpuobj *pgd;
38 struct nvkm_vm *bar1_vm;
39 struct nvkm_gpuobj *bar1;
40 struct nvkm_vm *bar3_vm;
41 struct nvkm_gpuobj *bar3;
42};
43
44static int
45nv50_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
46 struct nvkm_vma *vma)
47{
48 struct nv50_bar_priv *priv = (void *)bar;
49 int ret;
50
51 ret = nvkm_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
52 if (ret)
53 return ret;
54
55 nvkm_vm_map(vma, mem);
56 return 0;
57}
58
59static int
60nv50_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
61 struct nvkm_vma *vma)
62{ 33{
63 struct nv50_bar_priv *priv = (void *)bar; 34 return nv50_bar(base)->bar3_vm;
64 int ret;
65
66 ret = nvkm_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
67 if (ret)
68 return ret;
69
70 nvkm_vm_map(vma, mem);
71 return 0;
72} 35}
73 36
74static void 37int
75nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma) 38nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
76{ 39{
77 nvkm_vm_unmap(vma); 40 struct nv50_bar *bar = nv50_bar(base);
78 nvkm_vm_put(vma); 41 return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
79} 42}
80 43
81static void 44static void
82nv50_bar_flush(struct nvkm_bar *bar) 45nv50_bar_flush(struct nvkm_bar *base)
83{
84 struct nv50_bar_priv *priv = (void *)bar;
85 unsigned long flags;
86 spin_lock_irqsave(&priv->lock, flags);
87 nv_wr32(priv, 0x00330c, 0x00000001);
88 if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
89 nv_warn(priv, "flush timeout\n");
90 spin_unlock_irqrestore(&priv->lock, flags);
91}
92
93void
94g84_bar_flush(struct nvkm_bar *bar)
95{ 46{
96 struct nv50_bar_priv *priv = (void *)bar; 47 struct nv50_bar *bar = nv50_bar(base);
48 struct nvkm_device *device = bar->base.subdev.device;
97 unsigned long flags; 49 unsigned long flags;
98 spin_lock_irqsave(&priv->lock, flags); 50 spin_lock_irqsave(&bar->base.lock, flags);
99 nv_wr32(bar, 0x070000, 0x00000001); 51 nvkm_wr32(device, 0x00330c, 0x00000001);
100 if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000)) 52 nvkm_msec(device, 2000,
101 nv_warn(priv, "flush timeout\n"); 53 if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
102 spin_unlock_irqrestore(&priv->lock, flags); 54 break;
55 );
56 spin_unlock_irqrestore(&bar->base.lock, flags);
103} 57}
104 58
105static int 59int
106nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 60nv50_bar_oneinit(struct nvkm_bar *base)
107 struct nvkm_oclass *oclass, void *data, u32 size,
108 struct nvkm_object **pobject)
109{ 61{
110 struct nvkm_device *device = nv_device(parent); 62 struct nv50_bar *bar = nv50_bar(base);
111 struct nvkm_object *heap; 63 struct nvkm_device *device = bar->base.subdev.device;
64 static struct lock_class_key bar1_lock;
65 static struct lock_class_key bar3_lock;
112 struct nvkm_vm *vm; 66 struct nvkm_vm *vm;
113 struct nv50_bar_priv *priv;
114 u64 start, limit; 67 u64 start, limit;
115 int ret; 68 int ret;
116 69
117 ret = nvkm_bar_create(parent, engine, oclass, &priv); 70 ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
118 *pobject = nv_object(priv);
119 if (ret) 71 if (ret)
120 return ret; 72 return ret;
121 73
122 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, 74 ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
123 NVOBJ_FLAG_HEAP, &priv->mem); 75 &bar->pad);
124 heap = nv_object(priv->mem);
125 if (ret) 76 if (ret)
126 return ret; 77 return ret;
127 78
128 ret = nvkm_gpuobj_new(nv_object(priv), heap, 79 ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
129 (device->chipset == 0x50) ? 0x1400 : 0x0200,
130 0, 0, &priv->pad);
131 if (ret)
132 return ret;
133
134 ret = nvkm_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd);
135 if (ret) 80 if (ret)
136 return ret; 81 return ret;
137 82
138 /* BAR3 */ 83 /* BAR3 */
139 start = 0x0100000000ULL; 84 start = 0x0100000000ULL;
140 limit = start + nv_device_resource_len(device, 3); 85 limit = start + device->func->resource_size(device, 3);
141 86
142 ret = nvkm_vm_new(device, start, limit, start, &vm); 87 ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
143 if (ret) 88 if (ret)
144 return ret; 89 return ret;
145 90
146 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 91 atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
147 92
148 ret = nvkm_gpuobj_new(nv_object(priv), heap, 93 ret = nvkm_vm_boot(vm, limit-- - start);
149 ((limit-- - start) >> 12) * 8, 0x1000,
150 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
151 vm->pgt[0].refcount[0] = 1;
152 if (ret) 94 if (ret)
153 return ret; 95 return ret;
154 96
155 ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd); 97 ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
156 nvkm_vm_ref(NULL, &vm, NULL); 98 nvkm_vm_ref(NULL, &vm, NULL);
157 if (ret) 99 if (ret)
158 return ret; 100 return ret;
159 101
160 ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3); 102 ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
161 if (ret) 103 if (ret)
162 return ret; 104 return ret;
163 105
164 nv_wo32(priv->bar3, 0x00, 0x7fc00000); 106 nvkm_kmap(bar->bar3);
165 nv_wo32(priv->bar3, 0x04, lower_32_bits(limit)); 107 nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
166 nv_wo32(priv->bar3, 0x08, lower_32_bits(start)); 108 nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
167 nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 | 109 nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
168 upper_32_bits(start)); 110 nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
169 nv_wo32(priv->bar3, 0x10, 0x00000000); 111 upper_32_bits(start));
170 nv_wo32(priv->bar3, 0x14, 0x00000000); 112 nvkm_wo32(bar->bar3, 0x10, 0x00000000);
113 nvkm_wo32(bar->bar3, 0x14, 0x00000000);
114 nvkm_done(bar->bar3);
171 115
172 /* BAR1 */ 116 /* BAR1 */
173 start = 0x0000000000ULL; 117 start = 0x0000000000ULL;
174 limit = start + nv_device_resource_len(device, 1); 118 limit = start + device->func->resource_size(device, 1);
175 119
176 ret = nvkm_vm_new(device, start, limit--, start, &vm); 120 ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
177 if (ret) 121 if (ret)
178 return ret; 122 return ret;
179 123
180 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 124 atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
181 125
182 ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd); 126 ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
183 nvkm_vm_ref(NULL, &vm, NULL); 127 nvkm_vm_ref(NULL, &vm, NULL);
184 if (ret) 128 if (ret)
185 return ret; 129 return ret;
186 130
187 ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1); 131 ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
188 if (ret) 132 if (ret)
189 return ret; 133 return ret;
190 134
191 nv_wo32(priv->bar1, 0x00, 0x7fc00000); 135 nvkm_kmap(bar->bar1);
192 nv_wo32(priv->bar1, 0x04, lower_32_bits(limit)); 136 nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
193 nv_wo32(priv->bar1, 0x08, lower_32_bits(start)); 137 nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
194 nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 | 138 nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
195 upper_32_bits(start)); 139 nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
196 nv_wo32(priv->bar1, 0x10, 0x00000000); 140 upper_32_bits(start));
197 nv_wo32(priv->bar1, 0x14, 0x00000000); 141 nvkm_wo32(bar->bar1, 0x10, 0x00000000);
198 142 nvkm_wo32(bar->bar1, 0x14, 0x00000000);
199 priv->base.alloc = nvkm_bar_alloc; 143 nvkm_done(bar->bar1);
200 priv->base.kmap = nv50_bar_kmap;
201 priv->base.umap = nv50_bar_umap;
202 priv->base.unmap = nv50_bar_unmap;
203 if (device->chipset == 0x50)
204 priv->base.flush = nv50_bar_flush;
205 else
206 priv->base.flush = g84_bar_flush;
207 spin_lock_init(&priv->lock);
208 return 0; 144 return 0;
209} 145}
210 146
211static void 147int
212nv50_bar_dtor(struct nvkm_object *object) 148nv50_bar_init(struct nvkm_bar *base)
213{ 149{
214 struct nv50_bar_priv *priv = (void *)object; 150 struct nv50_bar *bar = nv50_bar(base);
215 nvkm_gpuobj_ref(NULL, &priv->bar1); 151 struct nvkm_device *device = bar->base.subdev.device;
216 nvkm_vm_ref(NULL, &priv->bar1_vm, priv->pgd); 152 int i;
217 nvkm_gpuobj_ref(NULL, &priv->bar3); 153
218 if (priv->bar3_vm) { 154 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
219 nvkm_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]); 155 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
220 nvkm_vm_ref(NULL, &priv->bar3_vm, priv->pgd); 156 nvkm_wr32(device, 0x100c80, 0x00060001);
221 } 157 if (nvkm_msec(device, 2000,
222 nvkm_gpuobj_ref(NULL, &priv->pgd); 158 if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
223 nvkm_gpuobj_ref(NULL, &priv->pad); 159 break;
224 nvkm_gpuobj_ref(NULL, &priv->mem); 160 ) < 0)
225 nvkm_bar_destroy(&priv->base);
226}
227
228static int
229nv50_bar_init(struct nvkm_object *object)
230{
231 struct nv50_bar_priv *priv = (void *)object;
232 int ret, i;
233
234 ret = nvkm_bar_init(&priv->base);
235 if (ret)
236 return ret;
237
238 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
239 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
240 nv_wr32(priv, 0x100c80, 0x00060001);
241 if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) {
242 nv_error(priv, "vm flush timeout\n");
243 return -EBUSY; 161 return -EBUSY;
244 }
245 162
246 nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12); 163 nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
247 nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12); 164 nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
248 nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4); 165 nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
249 nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4); 166 nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
250 for (i = 0; i < 8; i++) 167 for (i = 0; i < 8; i++)
251 nv_wr32(priv, 0x001900 + (i * 4), 0x00000000); 168 nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
252 return 0; 169 return 0;
253} 170}
254 171
255static int 172void *
256nv50_bar_fini(struct nvkm_object *object, bool suspend) 173nv50_bar_dtor(struct nvkm_bar *base)
257{ 174{
258 struct nv50_bar_priv *priv = (void *)object; 175 struct nv50_bar *bar = nv50_bar(base);
259 return nvkm_bar_fini(&priv->base, suspend); 176 nvkm_gpuobj_del(&bar->bar1);
177 nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
178 nvkm_gpuobj_del(&bar->bar3);
179 if (bar->bar3_vm) {
180 nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
181 nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
182 }
183 nvkm_gpuobj_del(&bar->pgd);
184 nvkm_gpuobj_del(&bar->pad);
185 nvkm_gpuobj_del(&bar->mem);
186 return bar;
260} 187}
261 188
262struct nvkm_oclass 189int
263nv50_bar_oclass = { 190nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
264 .handle = NV_SUBDEV(BAR, 0x50), 191 int index, u32 pgd_addr, struct nvkm_bar **pbar)
265 .ofuncs = &(struct nvkm_ofuncs) { 192{
266 .ctor = nv50_bar_ctor, 193 struct nv50_bar *bar;
267 .dtor = nv50_bar_dtor, 194 if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
268 .init = nv50_bar_init, 195 return -ENOMEM;
269 .fini = nv50_bar_fini, 196 nvkm_bar_ctor(func, device, index, &bar->base);
270 }, 197 bar->pgd_addr = pgd_addr;
198 *pbar = &bar->base;
199 return 0;
200}
201
202static const struct nvkm_bar_func
203nv50_bar_func = {
204 .dtor = nv50_bar_dtor,
205 .oneinit = nv50_bar_oneinit,
206 .init = nv50_bar_init,
207 .kmap = nv50_bar_kmap,
208 .umap = nv50_bar_umap,
209 .flush = nv50_bar_flush,
271}; 210};
211
212int
213nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
214{
215 return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
216}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
new file mode 100644
index 000000000000..1eb764f22a49
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -0,0 +1,26 @@
1#ifndef __NV50_BAR_H__
2#define __NV50_BAR_H__
3#define nv50_bar(p) container_of((p), struct nv50_bar, base)
4#include "priv.h"
5
6struct nv50_bar {
7 struct nvkm_bar base;
8 u32 pgd_addr;
9 struct nvkm_gpuobj *mem;
10 struct nvkm_gpuobj *pad;
11 struct nvkm_gpuobj *pgd;
12 struct nvkm_vm *bar1_vm;
13 struct nvkm_gpuobj *bar1;
14 struct nvkm_vm *bar3_vm;
15 struct nvkm_gpuobj *bar3;
16};
17
18int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
19 int, u32 pgd_addr, struct nvkm_bar **);
20void *nv50_bar_dtor(struct nvkm_bar *);
21int nv50_bar_oneinit(struct nvkm_bar *);
22int nv50_bar_init(struct nvkm_bar *);
23struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
24int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
25void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
26#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index aa85f61b48c2..d834ef20db5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -1,30 +1,19 @@
1#ifndef __NVKM_BAR_PRIV_H__ 1#ifndef __NVKM_BAR_PRIV_H__
2#define __NVKM_BAR_PRIV_H__ 2#define __NVKM_BAR_PRIV_H__
3#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
3#include <subdev/bar.h> 4#include <subdev/bar.h>
4 5
5#define nvkm_bar_create(p,e,o,d) \ 6void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
6 nvkm_bar_create_((p), (e), (o), sizeof(**d), (void **)d) 7 int, struct nvkm_bar *);
7#define nvkm_bar_init(p) \
8 nvkm_subdev_init(&(p)->base)
9#define nvkm_bar_fini(p,s) \
10 nvkm_subdev_fini(&(p)->base, (s))
11 8
12int nvkm_bar_create_(struct nvkm_object *, struct nvkm_object *, 9struct nvkm_bar_func {
13 struct nvkm_oclass *, int, void **); 10 void *(*dtor)(struct nvkm_bar *);
14void nvkm_bar_destroy(struct nvkm_bar *); 11 int (*oneinit)(struct nvkm_bar *);
15 12 int (*init)(struct nvkm_bar *);
16void _nvkm_bar_dtor(struct nvkm_object *); 13 struct nvkm_vm *(*kmap)(struct nvkm_bar *);
17#define _nvkm_bar_init _nvkm_subdev_init 14 int (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
18#define _nvkm_bar_fini _nvkm_subdev_fini 15 void (*flush)(struct nvkm_bar *);
19 16};
20int nvkm_bar_alloc(struct nvkm_bar *, struct nvkm_object *,
21 struct nvkm_mem *, struct nvkm_object **);
22 17
23void g84_bar_flush(struct nvkm_bar *); 18void g84_bar_flush(struct nvkm_bar *);
24
25int gf100_bar_ctor(struct nvkm_object *, struct nvkm_object *,
26 struct nvkm_oclass *, void *, u32,
27 struct nvkm_object **);
28void gf100_bar_dtor(struct nvkm_object *);
29int gf100_bar_init(struct nvkm_object *);
30#endif 19#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
index 08eb03fbc203..43f0ba1fba7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
@@ -33,14 +33,14 @@ nvbios_M0203Te(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33 33
34 if (!bit_entry(bios, 'M', &bit_M)) { 34 if (!bit_entry(bios, 'M', &bit_M)) {
35 if (bit_M.version == 2 && bit_M.length > 0x04) 35 if (bit_M.version == 2 && bit_M.length > 0x04)
36 data = nv_ro16(bios, bit_M.offset + 0x03); 36 data = nvbios_rd16(bios, bit_M.offset + 0x03);
37 if (data) { 37 if (data) {
38 *ver = nv_ro08(bios, data + 0x00); 38 *ver = nvbios_rd08(bios, data + 0x00);
39 switch (*ver) { 39 switch (*ver) {
40 case 0x10: 40 case 0x10:
41 *hdr = nv_ro08(bios, data + 0x01); 41 *hdr = nvbios_rd08(bios, data + 0x01);
42 *len = nv_ro08(bios, data + 0x02); 42 *len = nvbios_rd08(bios, data + 0x02);
43 *cnt = nv_ro08(bios, data + 0x03); 43 *cnt = nvbios_rd08(bios, data + 0x03);
44 return data; 44 return data;
45 default: 45 default:
46 break; 46 break;
@@ -59,8 +59,8 @@ nvbios_M0203Tp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
59 memset(info, 0x00, sizeof(*info)); 59 memset(info, 0x00, sizeof(*info));
60 switch (!!data * *ver) { 60 switch (!!data * *ver) {
61 case 0x10: 61 case 0x10:
62 info->type = nv_ro08(bios, data + 0x04); 62 info->type = nvbios_rd08(bios, data + 0x04);
63 info->pointer = nv_ro16(bios, data + 0x05); 63 info->pointer = nvbios_rd16(bios, data + 0x05);
64 break; 64 break;
65 default: 65 default:
66 break; 66 break;
@@ -89,9 +89,9 @@ nvbios_M0203Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
89 memset(info, 0x00, sizeof(*info)); 89 memset(info, 0x00, sizeof(*info));
90 switch (!!data * *ver) { 90 switch (!!data * *ver) {
91 case 0x10: 91 case 0x10:
92 info->type = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0; 92 info->type = (nvbios_rd08(bios, data + 0x00) & 0x0f) >> 0;
93 info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4; 93 info->strap = (nvbios_rd08(bios, data + 0x00) & 0xf0) >> 4;
94 info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0; 94 info->group = (nvbios_rd08(bios, data + 0x01) & 0x0f) >> 0;
95 return data; 95 return data;
96 default: 96 default:
97 break; 97 break;
@@ -103,12 +103,13 @@ u32
103nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr, 103nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
104 struct nvbios_M0203E *info) 104 struct nvbios_M0203E *info)
105{ 105{
106 struct nvkm_subdev *subdev = &bios->subdev;
106 struct nvbios_M0203T M0203T; 107 struct nvbios_M0203T M0203T;
107 u8 cnt, len, idx = 0xff; 108 u8 cnt, len, idx = 0xff;
108 u32 data; 109 u32 data;
109 110
110 if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) { 111 if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
111 nv_warn(bios, "M0203T not found\n"); 112 nvkm_warn(subdev, "M0203T not found\n");
112 return 0x00000000; 113 return 0x00000000;
113 } 114 }
114 115
@@ -119,7 +120,7 @@ nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
119 continue; 120 continue;
120 return data; 121 return data;
121 default: 122 default:
122 nv_warn(bios, "M0203T type %02x\n", M0203T.type); 123 nvkm_warn(subdev, "M0203T type %02x\n", M0203T.type);
123 return 0x00000000; 124 return 0x00000000;
124 } 125 }
125 } 126 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
index e1a8ad5f3066..293a6af1b1d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
@@ -34,16 +34,16 @@ nvbios_M0205Te(struct nvkm_bios *bios,
34 34
35 if (!bit_entry(bios, 'M', &bit_M)) { 35 if (!bit_entry(bios, 'M', &bit_M)) {
36 if (bit_M.version == 2 && bit_M.length > 0x08) 36 if (bit_M.version == 2 && bit_M.length > 0x08)
37 data = nv_ro32(bios, bit_M.offset + 0x05); 37 data = nvbios_rd32(bios, bit_M.offset + 0x05);
38 if (data) { 38 if (data) {
39 *ver = nv_ro08(bios, data + 0x00); 39 *ver = nvbios_rd08(bios, data + 0x00);
40 switch (*ver) { 40 switch (*ver) {
41 case 0x10: 41 case 0x10:
42 *hdr = nv_ro08(bios, data + 0x01); 42 *hdr = nvbios_rd08(bios, data + 0x01);
43 *len = nv_ro08(bios, data + 0x02); 43 *len = nvbios_rd08(bios, data + 0x02);
44 *ssz = nv_ro08(bios, data + 0x03); 44 *ssz = nvbios_rd08(bios, data + 0x03);
45 *snr = nv_ro08(bios, data + 0x04); 45 *snr = nvbios_rd08(bios, data + 0x04);
46 *cnt = nv_ro08(bios, data + 0x05); 46 *cnt = nvbios_rd08(bios, data + 0x05);
47 return data; 47 return data;
48 default: 48 default:
49 break; 49 break;
@@ -63,7 +63,7 @@ nvbios_M0205Tp(struct nvkm_bios *bios,
63 memset(info, 0x00, sizeof(*info)); 63 memset(info, 0x00, sizeof(*info));
64 switch (!!data * *ver) { 64 switch (!!data * *ver) {
65 case 0x10: 65 case 0x10:
66 info->freq = nv_ro16(bios, data + 0x06); 66 info->freq = nvbios_rd16(bios, data + 0x06);
67 break; 67 break;
68 default: 68 default:
69 break; 69 break;
@@ -96,7 +96,7 @@ nvbios_M0205Ep(struct nvkm_bios *bios, int idx,
96 memset(info, 0x00, sizeof(*info)); 96 memset(info, 0x00, sizeof(*info));
97 switch (!!data * *ver) { 97 switch (!!data * *ver) {
98 case 0x10: 98 case 0x10:
99 info->type = nv_ro08(bios, data + 0x00) & 0x0f; 99 info->type = nvbios_rd08(bios, data + 0x00) & 0x0f;
100 return data; 100 return data;
101 default: 101 default:
102 break; 102 break;
@@ -126,7 +126,7 @@ nvbios_M0205Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
126 memset(info, 0x00, sizeof(*info)); 126 memset(info, 0x00, sizeof(*info));
127 switch (!!data * *ver) { 127 switch (!!data * *ver) {
128 case 0x10: 128 case 0x10:
129 info->data = nv_ro08(bios, data + 0x00); 129 info->data = nvbios_rd08(bios, data + 0x00);
130 return data; 130 return data;
131 default: 131 default:
132 break; 132 break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
index 3026920c3358..95d49a526472 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
@@ -34,16 +34,16 @@ nvbios_M0209Te(struct nvkm_bios *bios,
34 34
35 if (!bit_entry(bios, 'M', &bit_M)) { 35 if (!bit_entry(bios, 'M', &bit_M)) {
36 if (bit_M.version == 2 && bit_M.length > 0x0c) 36 if (bit_M.version == 2 && bit_M.length > 0x0c)
37 data = nv_ro32(bios, bit_M.offset + 0x09); 37 data = nvbios_rd32(bios, bit_M.offset + 0x09);
38 if (data) { 38 if (data) {
39 *ver = nv_ro08(bios, data + 0x00); 39 *ver = nvbios_rd08(bios, data + 0x00);
40 switch (*ver) { 40 switch (*ver) {
41 case 0x10: 41 case 0x10:
42 *hdr = nv_ro08(bios, data + 0x01); 42 *hdr = nvbios_rd08(bios, data + 0x01);
43 *len = nv_ro08(bios, data + 0x02); 43 *len = nvbios_rd08(bios, data + 0x02);
44 *ssz = nv_ro08(bios, data + 0x03); 44 *ssz = nvbios_rd08(bios, data + 0x03);
45 *snr = 1; 45 *snr = 1;
46 *cnt = nv_ro08(bios, data + 0x04); 46 *cnt = nvbios_rd08(bios, data + 0x04);
47 return data; 47 return data;
48 default: 48 default:
49 break; 49 break;
@@ -78,12 +78,12 @@ nvbios_M0209Ep(struct nvkm_bios *bios, int idx,
78 memset(info, 0x00, sizeof(*info)); 78 memset(info, 0x00, sizeof(*info));
79 switch (!!data * *ver) { 79 switch (!!data * *ver) {
80 case 0x10: 80 case 0x10:
81 info->v00_40 = (nv_ro08(bios, data + 0x00) & 0x40) >> 6; 81 info->v00_40 = (nvbios_rd08(bios, data + 0x00) & 0x40) >> 6;
82 info->bits = nv_ro08(bios, data + 0x00) & 0x3f; 82 info->bits = nvbios_rd08(bios, data + 0x00) & 0x3f;
83 info->modulo = nv_ro08(bios, data + 0x01); 83 info->modulo = nvbios_rd08(bios, data + 0x01);
84 info->v02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6; 84 info->v02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
85 info->v02_07 = nv_ro08(bios, data + 0x02) & 0x07; 85 info->v02_07 = nvbios_rd08(bios, data + 0x02) & 0x07;
86 info->v03 = nv_ro08(bios, data + 0x03); 86 info->v03 = nvbios_rd08(bios, data + 0x03);
87 return data; 87 return data;
88 default: 88 default:
89 break; 89 break;
@@ -122,7 +122,7 @@ nvbios_M0209Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
122 u32 mask = (1ULL << M0209E.bits) - 1; 122 u32 mask = (1ULL << M0209E.bits) - 1;
123 u16 off = bits / 8; 123 u16 off = bits / 8;
124 u8 mod = bits % 8; 124 u8 mod = bits % 8;
125 info->data[i] = nv_ro32(bios, data + off); 125 info->data[i] = nvbios_rd32(bios, data + off);
126 info->data[i] = info->data[i] >> mod; 126 info->data[i] = info->data[i] >> mod;
127 info->data[i] = info->data[i] & mask; 127 info->data[i] = info->data[i] & mask;
128 } 128 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
index b72edcf849b6..3f7db3eb3ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
@@ -34,15 +34,15 @@ nvbios_P0260Te(struct nvkm_bios *bios,
34 34
35 if (!bit_entry(bios, 'P', &bit_P)) { 35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2 && bit_P.length > 0x63) 36 if (bit_P.version == 2 && bit_P.length > 0x63)
37 data = nv_ro32(bios, bit_P.offset + 0x60); 37 data = nvbios_rd32(bios, bit_P.offset + 0x60);
38 if (data) { 38 if (data) {
39 *ver = nv_ro08(bios, data + 0); 39 *ver = nvbios_rd08(bios, data + 0);
40 switch (*ver) { 40 switch (*ver) {
41 case 0x10: 41 case 0x10:
42 *hdr = nv_ro08(bios, data + 1); 42 *hdr = nvbios_rd08(bios, data + 1);
43 *cnt = nv_ro08(bios, data + 2); 43 *cnt = nvbios_rd08(bios, data + 2);
44 *len = 4; 44 *len = 4;
45 *xnr = nv_ro08(bios, data + 3); 45 *xnr = nvbios_rd08(bios, data + 3);
46 *xsz = 4; 46 *xsz = 4;
47 return data; 47 return data;
48 default: 48 default:
@@ -72,7 +72,7 @@ nvbios_P0260Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
72 memset(info, 0x00, sizeof(*info)); 72 memset(info, 0x00, sizeof(*info));
73 switch (!!data * *ver) { 73 switch (!!data * *ver) {
74 case 0x10: 74 case 0x10:
75 info->data = nv_ro32(bios, data); 75 info->data = nvbios_rd32(bios, data);
76 return data; 76 return data;
77 default: 77 default:
78 break; 78 break;
@@ -98,7 +98,7 @@ nvbios_P0260Xp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
98 memset(info, 0x00, sizeof(*info)); 98 memset(info, 0x00, sizeof(*info));
99 switch (!!data * *ver) { 99 switch (!!data * *ver) {
100 case 0x10: 100 case 0x10:
101 info->data = nv_ro32(bios, data); 101 info->data = nvbios_rd32(bios, data);
102 return data; 102 return data;
103 default: 103 default:
104 break; 104 break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 8db204f92ed3..79536897efaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -53,6 +53,20 @@ nvbios_findstr(const u8 *data, int size, const char *str, int len)
53} 53}
54 54
55int 55int
56nvbios_memcmp(struct nvkm_bios *bios, u32 addr, const char *str, u32 len)
57{
58 unsigned char c1, c2;
59
60 while (len--) {
61 c1 = nvbios_rd08(bios, addr++);
62 c2 = *(str++);
63 if (c1 != c2)
64 return c1 - c2;
65 }
66 return 0;
67}
68
69int
56nvbios_extend(struct nvkm_bios *bios, u32 length) 70nvbios_extend(struct nvkm_bios *bios, u32 length)
57{ 71{
58 if (bios->size < length) { 72 if (bios->size < length) {
@@ -69,62 +83,29 @@ nvbios_extend(struct nvkm_bios *bios, u32 length)
69 return 0; 83 return 0;
70} 84}
71 85
72static u8 86static void *
73nvkm_bios_rd08(struct nvkm_object *object, u64 addr) 87nvkm_bios_dtor(struct nvkm_subdev *subdev)
74{
75 struct nvkm_bios *bios = (void *)object;
76 return bios->data[addr];
77}
78
79static u16
80nvkm_bios_rd16(struct nvkm_object *object, u64 addr)
81{ 88{
82 struct nvkm_bios *bios = (void *)object; 89 struct nvkm_bios *bios = nvkm_bios(subdev);
83 return get_unaligned_le16(&bios->data[addr]); 90 kfree(bios->data);
84} 91 return bios;
85
86static u32
87nvkm_bios_rd32(struct nvkm_object *object, u64 addr)
88{
89 struct nvkm_bios *bios = (void *)object;
90 return get_unaligned_le32(&bios->data[addr]);
91}
92
93static void
94nvkm_bios_wr08(struct nvkm_object *object, u64 addr, u8 data)
95{
96 struct nvkm_bios *bios = (void *)object;
97 bios->data[addr] = data;
98}
99
100static void
101nvkm_bios_wr16(struct nvkm_object *object, u64 addr, u16 data)
102{
103 struct nvkm_bios *bios = (void *)object;
104 put_unaligned_le16(data, &bios->data[addr]);
105} 92}
106 93
107static void 94static const struct nvkm_subdev_func
108nvkm_bios_wr32(struct nvkm_object *object, u64 addr, u32 data) 95nvkm_bios = {
109{ 96 .dtor = nvkm_bios_dtor,
110 struct nvkm_bios *bios = (void *)object; 97};
111 put_unaligned_le32(data, &bios->data[addr]);
112}
113 98
114static int 99int
115nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 100nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
116 struct nvkm_oclass *oclass, void *data, u32 size,
117 struct nvkm_object **pobject)
118{ 101{
119 struct nvkm_bios *bios; 102 struct nvkm_bios *bios;
120 struct bit_entry bit_i; 103 struct bit_entry bit_i;
121 int ret; 104 int ret;
122 105
123 ret = nvkm_subdev_create(parent, engine, oclass, 0, 106 if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
124 "VBIOS", "bios", &bios); 107 return -ENOMEM;
125 *pobject = nv_object(bios); 108 nvkm_subdev_ctor(&nvkm_bios, device, index, 0, &bios->subdev);
126 if (ret)
127 return ret;
128 109
129 ret = nvbios_shadow(bios); 110 ret = nvbios_shadow(bios);
130 if (ret) 111 if (ret)
@@ -134,73 +115,33 @@ nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
134 bios->bmp_offset = nvbios_findstr(bios->data, bios->size, 115 bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
135 "\xff\x7f""NV\0", 5); 116 "\xff\x7f""NV\0", 5);
136 if (bios->bmp_offset) { 117 if (bios->bmp_offset) {
137 nv_info(bios, "BMP version %x.%x\n", 118 nvkm_debug(&bios->subdev, "BMP version %x.%x\n",
138 bmp_version(bios) >> 8, 119 bmp_version(bios) >> 8,
139 bmp_version(bios) & 0xff); 120 bmp_version(bios) & 0xff);
140 } 121 }
141 122
142 bios->bit_offset = nvbios_findstr(bios->data, bios->size, 123 bios->bit_offset = nvbios_findstr(bios->data, bios->size,
143 "\xff\xb8""BIT", 5); 124 "\xff\xb8""BIT", 5);
144 if (bios->bit_offset) 125 if (bios->bit_offset)
145 nv_info(bios, "BIT signature found\n"); 126 nvkm_debug(&bios->subdev, "BIT signature found\n");
146 127
147 /* determine the vbios version number */ 128 /* determine the vbios version number */
148 if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) { 129 if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
149 bios->version.major = nv_ro08(bios, bit_i.offset + 3); 130 bios->version.major = nvbios_rd08(bios, bit_i.offset + 3);
150 bios->version.chip = nv_ro08(bios, bit_i.offset + 2); 131 bios->version.chip = nvbios_rd08(bios, bit_i.offset + 2);
151 bios->version.minor = nv_ro08(bios, bit_i.offset + 1); 132 bios->version.minor = nvbios_rd08(bios, bit_i.offset + 1);
152 bios->version.micro = nv_ro08(bios, bit_i.offset + 0); 133 bios->version.micro = nvbios_rd08(bios, bit_i.offset + 0);
153 bios->version.patch = nv_ro08(bios, bit_i.offset + 4); 134 bios->version.patch = nvbios_rd08(bios, bit_i.offset + 4);
154 } else 135 } else
155 if (bmp_version(bios)) { 136 if (bmp_version(bios)) {
156 bios->version.major = nv_ro08(bios, bios->bmp_offset + 13); 137 bios->version.major = nvbios_rd08(bios, bios->bmp_offset + 13);
157 bios->version.chip = nv_ro08(bios, bios->bmp_offset + 12); 138 bios->version.chip = nvbios_rd08(bios, bios->bmp_offset + 12);
158 bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11); 139 bios->version.minor = nvbios_rd08(bios, bios->bmp_offset + 11);
159 bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10); 140 bios->version.micro = nvbios_rd08(bios, bios->bmp_offset + 10);
160 } 141 }
161 142
162 nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n", 143 nvkm_info(&bios->subdev, "version %02x.%02x.%02x.%02x.%02x\n",
163 bios->version.major, bios->version.chip, 144 bios->version.major, bios->version.chip,
164 bios->version.minor, bios->version.micro, bios->version.patch); 145 bios->version.minor, bios->version.micro, bios->version.patch);
165
166 return 0; 146 return 0;
167} 147}
168
169static void
170nvkm_bios_dtor(struct nvkm_object *object)
171{
172 struct nvkm_bios *bios = (void *)object;
173 kfree(bios->data);
174 nvkm_subdev_destroy(&bios->base);
175}
176
177static int
178nvkm_bios_init(struct nvkm_object *object)
179{
180 struct nvkm_bios *bios = (void *)object;
181 return nvkm_subdev_init(&bios->base);
182}
183
184static int
185nvkm_bios_fini(struct nvkm_object *object, bool suspend)
186{
187 struct nvkm_bios *bios = (void *)object;
188 return nvkm_subdev_fini(&bios->base, suspend);
189}
190
191struct nvkm_oclass
192nvkm_bios_oclass = {
193 .handle = NV_SUBDEV(VBIOS, 0x00),
194 .ofuncs = &(struct nvkm_ofuncs) {
195 .ctor = nvkm_bios_ctor,
196 .dtor = nvkm_bios_dtor,
197 .init = nvkm_bios_init,
198 .fini = nvkm_bios_fini,
199 .rd08 = nvkm_bios_rd08,
200 .rd16 = nvkm_bios_rd16,
201 .rd32 = nvkm_bios_rd32,
202 .wr08 = nvkm_bios_wr08,
203 .wr16 = nvkm_bios_wr16,
204 .wr32 = nvkm_bios_wr32,
205 },
206};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
index eab540496cdf..070ff33f8d5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
@@ -28,18 +28,18 @@ int
28bit_entry(struct nvkm_bios *bios, u8 id, struct bit_entry *bit) 28bit_entry(struct nvkm_bios *bios, u8 id, struct bit_entry *bit)
29{ 29{
30 if (likely(bios->bit_offset)) { 30 if (likely(bios->bit_offset)) {
31 u8 entries = nv_ro08(bios, bios->bit_offset + 10); 31 u8 entries = nvbios_rd08(bios, bios->bit_offset + 10);
32 u32 entry = bios->bit_offset + 12; 32 u32 entry = bios->bit_offset + 12;
33 while (entries--) { 33 while (entries--) {
34 if (nv_ro08(bios, entry + 0) == id) { 34 if (nvbios_rd08(bios, entry + 0) == id) {
35 bit->id = nv_ro08(bios, entry + 0); 35 bit->id = nvbios_rd08(bios, entry + 0);
36 bit->version = nv_ro08(bios, entry + 1); 36 bit->version = nvbios_rd08(bios, entry + 1);
37 bit->length = nv_ro16(bios, entry + 2); 37 bit->length = nvbios_rd16(bios, entry + 2);
38 bit->offset = nv_ro16(bios, entry + 4); 38 bit->offset = nvbios_rd16(bios, entry + 4);
39 return 0; 39 return 0;
40 } 40 }
41 41
42 entry += nv_ro08(bios, bios->bit_offset + 9); 42 entry += nvbios_rd08(bios, bios->bit_offset + 9);
43 } 43 }
44 44
45 return -ENOENT; 45 return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index 12e958533f46..3756ec91a88d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -34,17 +34,17 @@ nvbios_boostTe(struct nvkm_bios *bios,
34 34
35 if (!bit_entry(bios, 'P', &bit_P)) { 35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2) 36 if (bit_P.version == 2)
37 boost = nv_ro16(bios, bit_P.offset + 0x30); 37 boost = nvbios_rd16(bios, bit_P.offset + 0x30);
38 38
39 if (boost) { 39 if (boost) {
40 *ver = nv_ro08(bios, boost + 0); 40 *ver = nvbios_rd08(bios, boost + 0);
41 switch (*ver) { 41 switch (*ver) {
42 case 0x11: 42 case 0x11:
43 *hdr = nv_ro08(bios, boost + 1); 43 *hdr = nvbios_rd08(bios, boost + 1);
44 *cnt = nv_ro08(bios, boost + 5); 44 *cnt = nvbios_rd08(bios, boost + 5);
45 *len = nv_ro08(bios, boost + 2); 45 *len = nvbios_rd08(bios, boost + 2);
46 *snr = nv_ro08(bios, boost + 4); 46 *snr = nvbios_rd08(bios, boost + 4);
47 *ssz = nv_ro08(bios, boost + 3); 47 *ssz = nvbios_rd08(bios, boost + 3);
48 return boost; 48 return boost;
49 default: 49 default:
50 break; 50 break;
@@ -78,9 +78,9 @@ nvbios_boostEp(struct nvkm_bios *bios, int idx,
78 u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len); 78 u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
79 memset(info, 0x00, sizeof(*info)); 79 memset(info, 0x00, sizeof(*info));
80 if (data) { 80 if (data) {
81 info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5; 81 info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
82 info->min = nv_ro16(bios, data + 0x02) * 1000; 82 info->min = nvbios_rd16(bios, data + 0x02) * 1000;
83 info->max = nv_ro16(bios, data + 0x04) * 1000; 83 info->max = nvbios_rd16(bios, data + 0x04) * 1000;
84 } 84 }
85 return data; 85 return data;
86} 86}
@@ -117,10 +117,10 @@ nvbios_boostSp(struct nvkm_bios *bios, int idx,
117 data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len); 117 data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
118 memset(info, 0x00, sizeof(*info)); 118 memset(info, 0x00, sizeof(*info));
119 if (data) { 119 if (data) {
120 info->domain = nv_ro08(bios, data + 0x00); 120 info->domain = nvbios_rd08(bios, data + 0x00);
121 info->percent = nv_ro08(bios, data + 0x01); 121 info->percent = nvbios_rd08(bios, data + 0x01);
122 info->min = nv_ro16(bios, data + 0x02) * 1000; 122 info->min = nvbios_rd16(bios, data + 0x02) * 1000;
123 info->max = nv_ro16(bios, data + 0x04) * 1000; 123 info->max = nvbios_rd16(bios, data + 0x04) * 1000;
124 } 124 }
125 return data; 125 return data;
126} 126}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
index 706a1650a4f2..276823426332 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
@@ -30,12 +30,12 @@ nvbios_connTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
30{ 30{
31 u32 dcb = dcb_table(bios, ver, hdr, cnt, len); 31 u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
32 if (dcb && *ver >= 0x30 && *hdr >= 0x16) { 32 if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
33 u32 data = nv_ro16(bios, dcb + 0x14); 33 u32 data = nvbios_rd16(bios, dcb + 0x14);
34 if (data) { 34 if (data) {
35 *ver = nv_ro08(bios, data + 0); 35 *ver = nvbios_rd08(bios, data + 0);
36 *hdr = nv_ro08(bios, data + 1); 36 *hdr = nvbios_rd08(bios, data + 1);
37 *cnt = nv_ro08(bios, data + 2); 37 *cnt = nvbios_rd08(bios, data + 2);
38 *len = nv_ro08(bios, data + 3); 38 *len = nvbios_rd08(bios, data + 3);
39 return data; 39 return data;
40 } 40 }
41 } 41 }
@@ -77,18 +77,18 @@ nvbios_connEp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
77 switch (!!data * *ver) { 77 switch (!!data * *ver) {
78 case 0x30: 78 case 0x30:
79 case 0x40: 79 case 0x40:
80 info->type = nv_ro08(bios, data + 0x00); 80 info->type = nvbios_rd08(bios, data + 0x00);
81 info->location = nv_ro08(bios, data + 0x01) & 0x0f; 81 info->location = nvbios_rd08(bios, data + 0x01) & 0x0f;
82 info->hpd = (nv_ro08(bios, data + 0x01) & 0x30) >> 4; 82 info->hpd = (nvbios_rd08(bios, data + 0x01) & 0x30) >> 4;
83 info->dp = (nv_ro08(bios, data + 0x01) & 0xc0) >> 6; 83 info->dp = (nvbios_rd08(bios, data + 0x01) & 0xc0) >> 6;
84 if (*len < 4) 84 if (*len < 4)
85 return data; 85 return data;
86 info->hpd |= (nv_ro08(bios, data + 0x02) & 0x03) << 2; 86 info->hpd |= (nvbios_rd08(bios, data + 0x02) & 0x03) << 2;
87 info->dp |= nv_ro08(bios, data + 0x02) & 0x0c; 87 info->dp |= nvbios_rd08(bios, data + 0x02) & 0x0c;
88 info->di = (nv_ro08(bios, data + 0x02) & 0xf0) >> 4; 88 info->di = (nvbios_rd08(bios, data + 0x02) & 0xf0) >> 4;
89 info->hpd |= (nv_ro08(bios, data + 0x03) & 0x07) << 4; 89 info->hpd |= (nvbios_rd08(bios, data + 0x03) & 0x07) << 4;
90 info->sr = (nv_ro08(bios, data + 0x03) & 0x08) >> 3; 90 info->sr = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
91 info->lcdid = (nv_ro08(bios, data + 0x03) & 0x70) >> 4; 91 info->lcdid = (nvbios_rd08(bios, data + 0x03) & 0x70) >> 4;
92 return data; 92 return data;
93 default: 93 default:
94 break; 94 break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 16f7ad8a4f80..32e01624a162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -34,17 +34,17 @@ nvbios_cstepTe(struct nvkm_bios *bios,
34 34
35 if (!bit_entry(bios, 'P', &bit_P)) { 35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2) 36 if (bit_P.version == 2)
37 cstep = nv_ro16(bios, bit_P.offset + 0x34); 37 cstep = nvbios_rd16(bios, bit_P.offset + 0x34);
38 38
39 if (cstep) { 39 if (cstep) {
40 *ver = nv_ro08(bios, cstep + 0); 40 *ver = nvbios_rd08(bios, cstep + 0);
41 switch (*ver) { 41 switch (*ver) {
42 case 0x10: 42 case 0x10:
43 *hdr = nv_ro08(bios, cstep + 1); 43 *hdr = nvbios_rd08(bios, cstep + 1);
44 *cnt = nv_ro08(bios, cstep + 3); 44 *cnt = nvbios_rd08(bios, cstep + 3);
45 *len = nv_ro08(bios, cstep + 2); 45 *len = nvbios_rd08(bios, cstep + 2);
46 *xnr = nv_ro08(bios, cstep + 5); 46 *xnr = nvbios_rd08(bios, cstep + 5);
47 *xsz = nv_ro08(bios, cstep + 4); 47 *xsz = nvbios_rd08(bios, cstep + 4);
48 return cstep; 48 return cstep;
49 default: 49 default:
50 break; 50 break;
@@ -75,8 +75,8 @@ nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
75 u16 data = nvbios_cstepEe(bios, idx, ver, hdr); 75 u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
76 memset(info, 0x00, sizeof(*info)); 76 memset(info, 0x00, sizeof(*info));
77 if (data) { 77 if (data) {
78 info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5; 78 info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
79 info->index = nv_ro08(bios, data + 0x03); 79 info->index = nvbios_rd08(bios, data + 0x03);
80 } 80 }
81 return data; 81 return data;
82} 82}
@@ -113,10 +113,10 @@ nvbios_cstepXp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
113 u16 data = nvbios_cstepXe(bios, idx, ver, hdr); 113 u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
114 memset(info, 0x00, sizeof(*info)); 114 memset(info, 0x00, sizeof(*info));
115 if (data) { 115 if (data) {
116 info->freq = nv_ro16(bios, data + 0x00) * 1000; 116 info->freq = nvbios_rd16(bios, data + 0x00) * 1000;
117 info->unkn[0] = nv_ro08(bios, data + 0x02); 117 info->unkn[0] = nvbios_rd08(bios, data + 0x02);
118 info->unkn[1] = nv_ro08(bios, data + 0x03); 118 info->unkn[1] = nvbios_rd08(bios, data + 0x03);
119 info->voltage = nv_ro08(bios, data + 0x04); 119 info->voltage = nvbios_rd08(bios, data + 0x04);
120 } 120 }
121 return data; 121 return data;
122} 122}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
index 8d78140f9401..8304b806f2a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
@@ -24,38 +24,37 @@
24#include <subdev/bios.h> 24#include <subdev/bios.h>
25#include <subdev/bios/dcb.h> 25#include <subdev/bios/dcb.h>
26 26
27#include <core/device.h>
28
29u16 27u16
30dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 28dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{ 29{
32 struct nvkm_device *device = nv_device(bios); 30 struct nvkm_subdev *subdev = &bios->subdev;
31 struct nvkm_device *device = subdev->device;
33 u16 dcb = 0x0000; 32 u16 dcb = 0x0000;
34 33
35 if (device->card_type > NV_04) 34 if (device->card_type > NV_04)
36 dcb = nv_ro16(bios, 0x36); 35 dcb = nvbios_rd16(bios, 0x36);
37 if (!dcb) { 36 if (!dcb) {
38 nv_warn(bios, "DCB table not found\n"); 37 nvkm_warn(subdev, "DCB table not found\n");
39 return dcb; 38 return dcb;
40 } 39 }
41 40
42 *ver = nv_ro08(bios, dcb); 41 *ver = nvbios_rd08(bios, dcb);
43 42
44 if (*ver >= 0x42) { 43 if (*ver >= 0x42) {
45 nv_warn(bios, "DCB version 0x%02x unknown\n", *ver); 44 nvkm_warn(subdev, "DCB version 0x%02x unknown\n", *ver);
46 return 0x0000; 45 return 0x0000;
47 } else 46 } else
48 if (*ver >= 0x30) { 47 if (*ver >= 0x30) {
49 if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) { 48 if (nvbios_rd32(bios, dcb + 6) == 0x4edcbdcb) {
50 *hdr = nv_ro08(bios, dcb + 1); 49 *hdr = nvbios_rd08(bios, dcb + 1);
51 *cnt = nv_ro08(bios, dcb + 2); 50 *cnt = nvbios_rd08(bios, dcb + 2);
52 *len = nv_ro08(bios, dcb + 3); 51 *len = nvbios_rd08(bios, dcb + 3);
53 return dcb; 52 return dcb;
54 } 53 }
55 } else 54 } else
56 if (*ver >= 0x20) { 55 if (*ver >= 0x20) {
57 if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) { 56 if (nvbios_rd32(bios, dcb + 4) == 0x4edcbdcb) {
58 u16 i2c = nv_ro16(bios, dcb + 2); 57 u16 i2c = nvbios_rd16(bios, dcb + 2);
59 *hdr = 8; 58 *hdr = 8;
60 *cnt = (i2c - dcb) / 8; 59 *cnt = (i2c - dcb) / 8;
61 *len = 8; 60 *len = 8;
@@ -63,8 +62,8 @@ dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
63 } 62 }
64 } else 63 } else
65 if (*ver >= 0x15) { 64 if (*ver >= 0x15) {
66 if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) { 65 if (!nvbios_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
67 u16 i2c = nv_ro16(bios, dcb + 2); 66 u16 i2c = nvbios_rd16(bios, dcb + 2);
68 *hdr = 4; 67 *hdr = 4;
69 *cnt = (i2c - dcb) / 10; 68 *cnt = (i2c - dcb) / 10;
70 *len = 10; 69 *len = 10;
@@ -88,11 +87,11 @@ dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
88 * 87 *
89 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful 88 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
90 */ 89 */
91 nv_warn(bios, "DCB contains no useful data\n"); 90 nvkm_debug(subdev, "DCB contains no useful data\n");
92 return 0x0000; 91 return 0x0000;
93 } 92 }
94 93
95 nv_warn(bios, "DCB header validation failed\n"); 94 nvkm_warn(subdev, "DCB header validation failed\n");
96 return 0x0000; 95 return 0x0000;
97} 96}
98 97
@@ -126,7 +125,7 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
126 memset(outp, 0x00, sizeof(*outp)); 125 memset(outp, 0x00, sizeof(*outp));
127 if (dcb) { 126 if (dcb) {
128 if (*ver >= 0x20) { 127 if (*ver >= 0x20) {
129 u32 conn = nv_ro32(bios, dcb + 0x00); 128 u32 conn = nvbios_rd32(bios, dcb + 0x00);
130 outp->or = (conn & 0x0f000000) >> 24; 129 outp->or = (conn & 0x0f000000) >> 24;
131 outp->location = (conn & 0x00300000) >> 20; 130 outp->location = (conn & 0x00300000) >> 20;
132 outp->bus = (conn & 0x000f0000) >> 16; 131 outp->bus = (conn & 0x000f0000) >> 16;
@@ -140,7 +139,7 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
140 } 139 }
141 140
142 if (*ver >= 0x40) { 141 if (*ver >= 0x40) {
143 u32 conf = nv_ro32(bios, dcb + 0x04); 142 u32 conf = nvbios_rd32(bios, dcb + 0x04);
144 switch (outp->type) { 143 switch (outp->type) {
145 case DCB_OUTPUT_DP: 144 case DCB_OUTPUT_DP:
146 switch (conf & 0x00e00000) { 145 switch (conf & 0x00e00000) {
@@ -156,20 +155,19 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
156 break; 155 break;
157 } 156 }
158 157
159 outp->dpconf.link_nr = (conf & 0x0f000000) >> 24; 158 switch ((conf & 0x0f000000) >> 24) {
160 if (*ver < 0x41) { 159 case 0xf:
161 switch (outp->dpconf.link_nr) { 160 case 0x4:
162 case 0x0f: 161 outp->dpconf.link_nr = 4;
163 outp->dpconf.link_nr = 4; 162 break;
164 break; 163 case 0x3:
165 case 0x03: 164 case 0x2:
166 outp->dpconf.link_nr = 2; 165 outp->dpconf.link_nr = 2;
167 break; 166 break;
168 case 0x01: 167 case 0x1:
169 default: 168 default:
170 outp->dpconf.link_nr = 1; 169 outp->dpconf.link_nr = 1;
171 break; 170 break;
172 }
173 } 171 }
174 172
175 /* fall-through... */ 173 /* fall-through... */
@@ -215,14 +213,14 @@ dcb_outp_foreach(struct nvkm_bios *bios, void *data,
215 u16 outp; 213 u16 outp;
216 214
217 while ((outp = dcb_outp(bios, ++idx, &ver, &len))) { 215 while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
218 if (nv_ro32(bios, outp) == 0x00000000) 216 if (nvbios_rd32(bios, outp) == 0x00000000)
219 break; /* seen on an NV11 with DCB v1.5 */ 217 break; /* seen on an NV11 with DCB v1.5 */
220 if (nv_ro32(bios, outp) == 0xffffffff) 218 if (nvbios_rd32(bios, outp) == 0xffffffff)
221 break; /* seen on an NV17 with DCB v2.0 */ 219 break; /* seen on an NV17 with DCB v2.0 */
222 220
223 if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED) 221 if (nvbios_rd08(bios, outp) == DCB_OUTPUT_UNUSED)
224 continue; 222 continue;
225 if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL) 223 if (nvbios_rd08(bios, outp) == DCB_OUTPUT_EOL)
226 break; 224 break;
227 225
228 ret = exec(bios, data, idx, outp); 226 ret = exec(bios, data, idx, outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index 262c410b7ee2..a5e92135cd77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -33,17 +33,17 @@ nvbios_disp_table(struct nvkm_bios *bios,
33 33
34 if (!bit_entry(bios, 'U', &U)) { 34 if (!bit_entry(bios, 'U', &U)) {
35 if (U.version == 1) { 35 if (U.version == 1) {
36 u16 data = nv_ro16(bios, U.offset); 36 u16 data = nvbios_rd16(bios, U.offset);
37 if (data) { 37 if (data) {
38 *ver = nv_ro08(bios, data + 0x00); 38 *ver = nvbios_rd08(bios, data + 0x00);
39 switch (*ver) { 39 switch (*ver) {
40 case 0x20: 40 case 0x20:
41 case 0x21: 41 case 0x21:
42 case 0x22: 42 case 0x22:
43 *hdr = nv_ro08(bios, data + 0x01); 43 *hdr = nvbios_rd08(bios, data + 0x01);
44 *len = nv_ro08(bios, data + 0x02); 44 *len = nvbios_rd08(bios, data + 0x02);
45 *cnt = nv_ro08(bios, data + 0x03); 45 *cnt = nvbios_rd08(bios, data + 0x03);
46 *sub = nv_ro08(bios, data + 0x04); 46 *sub = nvbios_rd08(bios, data + 0x04);
47 return data; 47 return data;
48 default: 48 default:
49 break; 49 break;
@@ -72,7 +72,7 @@ nvbios_disp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len, u8 *sub,
72{ 72{
73 u16 data = nvbios_disp_entry(bios, idx, ver, len, sub); 73 u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
74 if (data && *len >= 2) { 74 if (data && *len >= 2) {
75 info->data = nv_ro16(bios, data + 0); 75 info->data = nvbios_rd16(bios, data + 0);
76 return data; 76 return data;
77 } 77 }
78 return 0x0000; 78 return 0x0000;
@@ -85,7 +85,7 @@ nvbios_outp_entry(struct nvkm_bios *bios, u8 idx,
85 struct nvbios_disp info; 85 struct nvbios_disp info;
86 u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info); 86 u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
87 if (data) { 87 if (data) {
88 *cnt = nv_ro08(bios, info.data + 0x05); 88 *cnt = nvbios_rd08(bios, info.data + 0x05);
89 *len = 0x06; 89 *len = 0x06;
90 data = info.data; 90 data = info.data;
91 } 91 }
@@ -98,15 +98,15 @@ nvbios_outp_parse(struct nvkm_bios *bios, u8 idx,
98{ 98{
99 u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len); 99 u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
100 if (data && *hdr >= 0x0a) { 100 if (data && *hdr >= 0x0a) {
101 info->type = nv_ro16(bios, data + 0x00); 101 info->type = nvbios_rd16(bios, data + 0x00);
102 info->mask = nv_ro32(bios, data + 0x02); 102 info->mask = nvbios_rd32(bios, data + 0x02);
103 if (*ver <= 0x20) /* match any link */ 103 if (*ver <= 0x20) /* match any link */
104 info->mask |= 0x00c0; 104 info->mask |= 0x00c0;
105 info->script[0] = nv_ro16(bios, data + 0x06); 105 info->script[0] = nvbios_rd16(bios, data + 0x06);
106 info->script[1] = nv_ro16(bios, data + 0x08); 106 info->script[1] = nvbios_rd16(bios, data + 0x08);
107 info->script[2] = 0x0000; 107 info->script[2] = 0x0000;
108 if (*hdr >= 0x0c) 108 if (*hdr >= 0x0c)
109 info->script[2] = nv_ro16(bios, data + 0x0a); 109 info->script[2] = nvbios_rd16(bios, data + 0x0a);
110 return data; 110 return data;
111 } 111 }
112 return 0x0000; 112 return 0x0000;
@@ -141,9 +141,9 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
141{ 141{
142 u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); 142 u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
143 if (data) { 143 if (data) {
144 info->match = nv_ro16(bios, data + 0x00); 144 info->match = nvbios_rd16(bios, data + 0x00);
145 info->clkcmp[0] = nv_ro16(bios, data + 0x02); 145 info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
146 info->clkcmp[1] = nv_ro16(bios, data + 0x04); 146 info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
147 } 147 }
148 return data; 148 return data;
149} 149}
@@ -164,8 +164,8 @@ u16
164nvbios_oclk_match(struct nvkm_bios *bios, u16 cmp, u32 khz) 164nvbios_oclk_match(struct nvkm_bios *bios, u16 cmp, u32 khz)
165{ 165{
166 while (cmp) { 166 while (cmp) {
167 if (khz / 10 >= nv_ro16(bios, cmp + 0x00)) 167 if (khz / 10 >= nvbios_rd16(bios, cmp + 0x00))
168 return nv_ro16(bios, cmp + 0x02); 168 return nvbios_rd16(bios, cmp + 0x02);
169 cmp += 0x04; 169 cmp += 0x04;
170 } 170 }
171 return 0x0000; 171 return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 95970faae6c8..05332476354a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -32,17 +32,17 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
32 32
33 if (!bit_entry(bios, 'd', &d)) { 33 if (!bit_entry(bios, 'd', &d)) {
34 if (d.version == 1 && d.length >= 2) { 34 if (d.version == 1 && d.length >= 2) {
35 u16 data = nv_ro16(bios, d.offset); 35 u16 data = nvbios_rd16(bios, d.offset);
36 if (data) { 36 if (data) {
37 *ver = nv_ro08(bios, data + 0x00); 37 *ver = nvbios_rd08(bios, data + 0x00);
38 switch (*ver) { 38 switch (*ver) {
39 case 0x21: 39 case 0x21:
40 case 0x30: 40 case 0x30:
41 case 0x40: 41 case 0x40:
42 case 0x41: 42 case 0x41:
43 *hdr = nv_ro08(bios, data + 0x01); 43 *hdr = nvbios_rd08(bios, data + 0x01);
44 *len = nv_ro08(bios, data + 0x02); 44 *len = nvbios_rd08(bios, data + 0x02);
45 *cnt = nv_ro08(bios, data + 0x03); 45 *cnt = nvbios_rd08(bios, data + 0x03);
46 return data; 46 return data;
47 default: 47 default:
48 break; 48 break;
@@ -60,17 +60,17 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
60{ 60{
61 u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len); 61 u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
62 if (data && idx < *cnt) { 62 if (data && idx < *cnt) {
63 u16 outp = nv_ro16(bios, data + *hdr + idx * *len); 63 u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
64 switch (*ver * !!outp) { 64 switch (*ver * !!outp) {
65 case 0x21: 65 case 0x21:
66 case 0x30: 66 case 0x30:
67 *hdr = nv_ro08(bios, data + 0x04); 67 *hdr = nvbios_rd08(bios, data + 0x04);
68 *len = nv_ro08(bios, data + 0x05); 68 *len = nvbios_rd08(bios, data + 0x05);
69 *cnt = nv_ro08(bios, outp + 0x04); 69 *cnt = nvbios_rd08(bios, outp + 0x04);
70 break; 70 break;
71 case 0x40: 71 case 0x40:
72 case 0x41: 72 case 0x41:
73 *hdr = nv_ro08(bios, data + 0x04); 73 *hdr = nvbios_rd08(bios, data + 0x04);
74 *cnt = 0; 74 *cnt = 0;
75 *len = 0; 75 *len = 0;
76 break; 76 break;
@@ -91,31 +91,31 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
91 u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len); 91 u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
92 memset(info, 0x00, sizeof(*info)); 92 memset(info, 0x00, sizeof(*info));
93 if (data && *ver) { 93 if (data && *ver) {
94 info->type = nv_ro16(bios, data + 0x00); 94 info->type = nvbios_rd16(bios, data + 0x00);
95 info->mask = nv_ro16(bios, data + 0x02); 95 info->mask = nvbios_rd16(bios, data + 0x02);
96 switch (*ver) { 96 switch (*ver) {
97 case 0x21: 97 case 0x21:
98 case 0x30: 98 case 0x30:
99 info->flags = nv_ro08(bios, data + 0x05); 99 info->flags = nvbios_rd08(bios, data + 0x05);
100 info->script[0] = nv_ro16(bios, data + 0x06); 100 info->script[0] = nvbios_rd16(bios, data + 0x06);
101 info->script[1] = nv_ro16(bios, data + 0x08); 101 info->script[1] = nvbios_rd16(bios, data + 0x08);
102 info->lnkcmp = nv_ro16(bios, data + 0x0a); 102 info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
103 if (*len >= 0x0f) { 103 if (*len >= 0x0f) {
104 info->script[2] = nv_ro16(bios, data + 0x0c); 104 info->script[2] = nvbios_rd16(bios, data + 0x0c);
105 info->script[3] = nv_ro16(bios, data + 0x0e); 105 info->script[3] = nvbios_rd16(bios, data + 0x0e);
106 } 106 }
107 if (*len >= 0x11) 107 if (*len >= 0x11)
108 info->script[4] = nv_ro16(bios, data + 0x10); 108 info->script[4] = nvbios_rd16(bios, data + 0x10);
109 break; 109 break;
110 case 0x40: 110 case 0x40:
111 case 0x41: 111 case 0x41:
112 info->flags = nv_ro08(bios, data + 0x04); 112 info->flags = nvbios_rd08(bios, data + 0x04);
113 info->script[0] = nv_ro16(bios, data + 0x05); 113 info->script[0] = nvbios_rd16(bios, data + 0x05);
114 info->script[1] = nv_ro16(bios, data + 0x07); 114 info->script[1] = nvbios_rd16(bios, data + 0x07);
115 info->lnkcmp = nv_ro16(bios, data + 0x09); 115 info->lnkcmp = nvbios_rd16(bios, data + 0x09);
116 info->script[2] = nv_ro16(bios, data + 0x0b); 116 info->script[2] = nvbios_rd16(bios, data + 0x0b);
117 info->script[3] = nv_ro16(bios, data + 0x0d); 117 info->script[3] = nvbios_rd16(bios, data + 0x0d);
118 info->script[4] = nv_ro16(bios, data + 0x0f); 118 info->script[4] = nvbios_rd16(bios, data + 0x0f);
119 break; 119 break;
120 default: 120 default:
121 data = 0x0000; 121 data = 0x0000;
@@ -147,8 +147,9 @@ nvbios_dpcfg_entry(struct nvkm_bios *bios, u16 outp, u8 idx,
147 if (*ver >= 0x40) { 147 if (*ver >= 0x40) {
148 outp = nvbios_dp_table(bios, ver, hdr, cnt, len); 148 outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
149 *hdr = *hdr + (*len * * cnt); 149 *hdr = *hdr + (*len * * cnt);
150 *len = nv_ro08(bios, outp + 0x06); 150 *len = nvbios_rd08(bios, outp + 0x06);
151 *cnt = nv_ro08(bios, outp + 0x07); 151 *cnt = nvbios_rd08(bios, outp + 0x07) *
152 nvbios_rd08(bios, outp + 0x05);
152 } 153 }
153 154
154 if (idx < *cnt) 155 if (idx < *cnt)
@@ -167,17 +168,17 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
167 if (data) { 168 if (data) {
168 switch (*ver) { 169 switch (*ver) {
169 case 0x21: 170 case 0x21:
170 info->dc = nv_ro08(bios, data + 0x02); 171 info->dc = nvbios_rd08(bios, data + 0x02);
171 info->pe = nv_ro08(bios, data + 0x03); 172 info->pe = nvbios_rd08(bios, data + 0x03);
172 info->tx_pu = nv_ro08(bios, data + 0x04); 173 info->tx_pu = nvbios_rd08(bios, data + 0x04);
173 break; 174 break;
174 case 0x30: 175 case 0x30:
175 case 0x40: 176 case 0x40:
176 case 0x41: 177 case 0x41:
177 info->pc = nv_ro08(bios, data + 0x00); 178 info->pc = nvbios_rd08(bios, data + 0x00);
178 info->dc = nv_ro08(bios, data + 0x01); 179 info->dc = nvbios_rd08(bios, data + 0x01);
179 info->pe = nv_ro08(bios, data + 0x02); 180 info->pe = nvbios_rd08(bios, data + 0x02);
180 info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f; 181 info->tx_pu = nvbios_rd08(bios, data + 0x03);
181 break; 182 break;
182 default: 183 default:
183 data = 0x0000; 184 data = 0x0000;
@@ -196,17 +197,15 @@ nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
196 u16 data; 197 u16 data;
197 198
198 if (*ver >= 0x30) { 199 if (*ver >= 0x30) {
199 /*XXX: there's a second set of these on at least 4.1, that
200 * i've witnessed nvidia using instead of the first
201 * on gm204. figure out what/why
202 */
203 const u8 vsoff[] = { 0, 4, 7, 9 }; 200 const u8 vsoff[] = { 0, 4, 7, 9 };
204 idx = (pc * 10) + vsoff[vs] + pe; 201 idx = (pc * 10) + vsoff[vs] + pe;
202 if (*ver >= 0x40 && *hdr >= 0x12)
203 idx += nvbios_rd08(bios, outp + 0x11) * 40;
205 } else { 204 } else {
206 while ((data = nvbios_dpcfg_entry(bios, outp, ++idx, 205 while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
207 ver, hdr, cnt, len))) { 206 ver, hdr, cnt, len))) {
208 if (nv_ro08(bios, data + 0x00) == vs && 207 if (nvbios_rd08(bios, data + 0x00) == vs &&
209 nv_ro08(bios, data + 0x01) == pe) 208 nvbios_rd08(bios, data + 0x01) == pe)
210 break; 209 break;
211 } 210 }
212 } 211 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index a8503a1854c4..c9e6f6ff7c50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -35,14 +35,14 @@ extdev_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
35 if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40)) 35 if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
36 return 0x0000; 36 return 0x0000;
37 37
38 extdev = nv_ro16(bios, dcb + 18); 38 extdev = nvbios_rd16(bios, dcb + 18);
39 if (!extdev) 39 if (!extdev)
40 return 0x0000; 40 return 0x0000;
41 41
42 *ver = nv_ro08(bios, extdev + 0); 42 *ver = nvbios_rd08(bios, extdev + 0);
43 *hdr = nv_ro08(bios, extdev + 1); 43 *hdr = nvbios_rd08(bios, extdev + 1);
44 *cnt = nv_ro08(bios, extdev + 2); 44 *cnt = nvbios_rd08(bios, extdev + 2);
45 *len = nv_ro08(bios, extdev + 3); 45 *len = nvbios_rd08(bios, extdev + 3);
46 return extdev + *hdr; 46 return extdev + *hdr;
47} 47}
48 48
@@ -60,9 +60,9 @@ static void
60extdev_parse_entry(struct nvkm_bios *bios, u16 offset, 60extdev_parse_entry(struct nvkm_bios *bios, u16 offset,
61 struct nvbios_extdev_func *entry) 61 struct nvbios_extdev_func *entry)
62{ 62{
63 entry->type = nv_ro08(bios, offset + 0); 63 entry->type = nvbios_rd08(bios, offset + 0);
64 entry->addr = nv_ro08(bios, offset + 1); 64 entry->addr = nvbios_rd08(bios, offset + 1);
65 entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1; 65 entry->bus = (nvbios_rd08(bios, offset + 2) >> 4) & 1;
66} 66}
67 67
68int 68int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 8dba70d9d9a9..43006db6fd58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -33,15 +33,15 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33 33
34 if (!bit_entry(bios, 'P', &bit_P)) { 34 if (!bit_entry(bios, 'P', &bit_P)) {
35 if (bit_P.version == 2 && bit_P.length >= 0x5a) 35 if (bit_P.version == 2 && bit_P.length >= 0x5a)
36 fan = nv_ro16(bios, bit_P.offset + 0x58); 36 fan = nvbios_rd16(bios, bit_P.offset + 0x58);
37 37
38 if (fan) { 38 if (fan) {
39 *ver = nv_ro08(bios, fan + 0); 39 *ver = nvbios_rd08(bios, fan + 0);
40 switch (*ver) { 40 switch (*ver) {
41 case 0x10: 41 case 0x10:
42 *hdr = nv_ro08(bios, fan + 1); 42 *hdr = nvbios_rd08(bios, fan + 1);
43 *len = nv_ro08(bios, fan + 2); 43 *len = nvbios_rd08(bios, fan + 2);
44 *cnt = nv_ro08(bios, fan + 3); 44 *cnt = nvbios_rd08(bios, fan + 3);
45 return fan; 45 return fan;
46 default: 46 default:
47 break; 47 break;
@@ -69,7 +69,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
69 69
70 u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len); 70 u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
71 if (data) { 71 if (data) {
72 u8 type = nv_ro08(bios, data + 0x00); 72 u8 type = nvbios_rd08(bios, data + 0x00);
73 switch (type) { 73 switch (type) {
74 case 0: 74 case 0:
75 fan->type = NVBIOS_THERM_FAN_TOGGLE; 75 fan->type = NVBIOS_THERM_FAN_TOGGLE;
@@ -83,10 +83,10 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
83 fan->type = NVBIOS_THERM_FAN_UNK; 83 fan->type = NVBIOS_THERM_FAN_UNK;
84 } 84 }
85 85
86 fan->min_duty = nv_ro08(bios, data + 0x02); 86 fan->min_duty = nvbios_rd08(bios, data + 0x02);
87 fan->max_duty = nv_ro08(bios, data + 0x03); 87 fan->max_duty = nvbios_rd08(bios, data + 0x03);
88 88
89 fan->pwm_freq = nv_ro32(bios, data + 0x0b) & 0xffffff; 89 fan->pwm_freq = nvbios_rd32(bios, data + 0x0b) & 0xffffff;
90 } 90 }
91 91
92 return data; 92 return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
index 8ce154d88f51..2107b558437a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
@@ -33,22 +33,22 @@ dcb_gpio_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33 u16 dcb = dcb_table(bios, ver, hdr, cnt, len); 33 u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
34 if (dcb) { 34 if (dcb) {
35 if (*ver >= 0x30 && *hdr >= 0x0c) 35 if (*ver >= 0x30 && *hdr >= 0x0c)
36 data = nv_ro16(bios, dcb + 0x0a); 36 data = nvbios_rd16(bios, dcb + 0x0a);
37 else 37 else
38 if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13) 38 if (*ver >= 0x22 && nvbios_rd08(bios, dcb - 1) >= 0x13)
39 data = nv_ro16(bios, dcb - 0x0f); 39 data = nvbios_rd16(bios, dcb - 0x0f);
40 40
41 if (data) { 41 if (data) {
42 *ver = nv_ro08(bios, data + 0x00); 42 *ver = nvbios_rd08(bios, data + 0x00);
43 if (*ver < 0x30) { 43 if (*ver < 0x30) {
44 *hdr = 3; 44 *hdr = 3;
45 *cnt = nv_ro08(bios, data + 0x02); 45 *cnt = nvbios_rd08(bios, data + 0x02);
46 *len = nv_ro08(bios, data + 0x01); 46 *len = nvbios_rd08(bios, data + 0x01);
47 } else 47 } else
48 if (*ver <= 0x41) { 48 if (*ver <= 0x41) {
49 *hdr = nv_ro08(bios, data + 0x01); 49 *hdr = nvbios_rd08(bios, data + 0x01);
50 *cnt = nv_ro08(bios, data + 0x02); 50 *cnt = nvbios_rd08(bios, data + 0x02);
51 *len = nv_ro08(bios, data + 0x03); 51 *len = nvbios_rd08(bios, data + 0x03);
52 } else { 52 } else {
53 data = 0x0000; 53 data = 0x0000;
54 } 54 }
@@ -81,7 +81,7 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
81 u16 data = dcb_gpio_entry(bios, idx, ent, ver, len); 81 u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
82 if (data) { 82 if (data) {
83 if (*ver < 0x40) { 83 if (*ver < 0x40) {
84 u16 info = nv_ro16(bios, data); 84 u16 info = nvbios_rd16(bios, data);
85 *gpio = (struct dcb_gpio_func) { 85 *gpio = (struct dcb_gpio_func) {
86 .line = (info & 0x001f) >> 0, 86 .line = (info & 0x001f) >> 0,
87 .func = (info & 0x07e0) >> 5, 87 .func = (info & 0x07e0) >> 5,
@@ -91,7 +91,7 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
91 }; 91 };
92 } else 92 } else
93 if (*ver < 0x41) { 93 if (*ver < 0x41) {
94 u32 info = nv_ro32(bios, data); 94 u32 info = nvbios_rd32(bios, data);
95 *gpio = (struct dcb_gpio_func) { 95 *gpio = (struct dcb_gpio_func) {
96 .line = (info & 0x0000001f) >> 0, 96 .line = (info & 0x0000001f) >> 0,
97 .func = (info & 0x0000ff00) >> 8, 97 .func = (info & 0x0000ff00) >> 8,
@@ -100,8 +100,8 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
100 .param = !!(info & 0x80000000), 100 .param = !!(info & 0x80000000),
101 }; 101 };
102 } else { 102 } else {
103 u32 info = nv_ro32(bios, data + 0); 103 u32 info = nvbios_rd32(bios, data + 0);
104 u8 info1 = nv_ro32(bios, data + 4); 104 u8 info1 = nvbios_rd32(bios, data + 4);
105 *gpio = (struct dcb_gpio_func) { 105 *gpio = (struct dcb_gpio_func) {
106 .line = (info & 0x0000003f) >> 0, 106 .line = (info & 0x0000003f) >> 0,
107 .func = (info & 0x0000ff00) >> 8, 107 .func = (info & 0x0000ff00) >> 8,
@@ -131,8 +131,8 @@ dcb_gpio_match(struct nvkm_bios *bios, int idx, u8 func, u8 line,
131 /* DCB 2.2, fixed TVDAC GPIO data */ 131 /* DCB 2.2, fixed TVDAC GPIO data */
132 if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) { 132 if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
133 if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) { 133 if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
134 u8 conf = nv_ro08(bios, data - 5); 134 u8 conf = nvbios_rd08(bios, data - 5);
135 u8 addr = nv_ro08(bios, data - 4); 135 u8 addr = nvbios_rd08(bios, data - 4);
136 if (conf & 0x01) { 136 if (conf & 0x01) {
137 *gpio = (struct dcb_gpio_func) { 137 *gpio = (struct dcb_gpio_func) {
138 .func = DCB_GPIO_TVDAC0, 138 .func = DCB_GPIO_TVDAC0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index c4e1f085ee10..0fc60be32727 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -32,21 +32,21 @@ dcb_i2c_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
32 u16 dcb = dcb_table(bios, ver, hdr, cnt, len); 32 u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
33 if (dcb) { 33 if (dcb) {
34 if (*ver >= 0x15) 34 if (*ver >= 0x15)
35 i2c = nv_ro16(bios, dcb + 2); 35 i2c = nvbios_rd16(bios, dcb + 2);
36 if (*ver >= 0x30) 36 if (*ver >= 0x30)
37 i2c = nv_ro16(bios, dcb + 4); 37 i2c = nvbios_rd16(bios, dcb + 4);
38 } 38 }
39 39
40 if (i2c && *ver >= 0x42) { 40 if (i2c && *ver >= 0x42) {
41 nv_warn(bios, "ccb %02x not supported\n", *ver); 41 nvkm_warn(&bios->subdev, "ccb %02x not supported\n", *ver);
42 return 0x0000; 42 return 0x0000;
43 } 43 }
44 44
45 if (i2c && *ver >= 0x30) { 45 if (i2c && *ver >= 0x30) {
46 *ver = nv_ro08(bios, i2c + 0); 46 *ver = nvbios_rd08(bios, i2c + 0);
47 *hdr = nv_ro08(bios, i2c + 1); 47 *hdr = nvbios_rd08(bios, i2c + 1);
48 *cnt = nv_ro08(bios, i2c + 2); 48 *cnt = nvbios_rd08(bios, i2c + 2);
49 *len = nv_ro08(bios, i2c + 3); 49 *len = nvbios_rd08(bios, i2c + 3);
50 } else { 50 } else {
51 *ver = *ver; /* use DCB version */ 51 *ver = *ver; /* use DCB version */
52 *hdr = 0; 52 *hdr = 0;
@@ -70,13 +70,14 @@ dcb_i2c_entry(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
70int 70int
71dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info) 71dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
72{ 72{
73 struct nvkm_subdev *subdev = &bios->subdev;
73 u8 ver, len; 74 u8 ver, len;
74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); 75 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
75 if (ent) { 76 if (ent) {
76 if (ver >= 0x41) { 77 if (ver >= 0x41) {
77 u32 ent_value = nv_ro32(bios, ent); 78 u32 ent_value = nvbios_rd32(bios, ent);
78 u8 i2c_port = (ent_value >> 27) & 0x1f; 79 u8 i2c_port = (ent_value >> 0) & 0x1f;
79 u8 dpaux_port = (ent_value >> 22) & 0x1f; 80 u8 dpaux_port = (ent_value >> 5) & 0x1f;
80 /* value 0x1f means unused according to DCB 4.x spec */ 81 /* value 0x1f means unused according to DCB 4.x spec */
81 if (i2c_port == 0x1f && dpaux_port == 0x1f) 82 if (i2c_port == 0x1f && dpaux_port == 0x1f)
82 info->type = DCB_I2C_UNUSED; 83 info->type = DCB_I2C_UNUSED;
@@ -84,9 +85,9 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
84 info->type = DCB_I2C_PMGR; 85 info->type = DCB_I2C_PMGR;
85 } else 86 } else
86 if (ver >= 0x30) { 87 if (ver >= 0x30) {
87 info->type = nv_ro08(bios, ent + 0x03); 88 info->type = nvbios_rd08(bios, ent + 0x03);
88 } else { 89 } else {
89 info->type = nv_ro08(bios, ent + 0x03) & 0x07; 90 info->type = nvbios_rd08(bios, ent + 0x03) & 0x07;
90 if (info->type == 0x07) 91 if (info->type == 0x07)
91 info->type = DCB_I2C_UNUSED; 92 info->type = DCB_I2C_UNUSED;
92 } 93 }
@@ -98,27 +99,27 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
98 99
99 switch (info->type) { 100 switch (info->type) {
100 case DCB_I2C_NV04_BIT: 101 case DCB_I2C_NV04_BIT:
101 info->drive = nv_ro08(bios, ent + 0); 102 info->drive = nvbios_rd08(bios, ent + 0);
102 info->sense = nv_ro08(bios, ent + 1); 103 info->sense = nvbios_rd08(bios, ent + 1);
103 return 0; 104 return 0;
104 case DCB_I2C_NV4E_BIT: 105 case DCB_I2C_NV4E_BIT:
105 info->drive = nv_ro08(bios, ent + 1); 106 info->drive = nvbios_rd08(bios, ent + 1);
106 return 0; 107 return 0;
107 case DCB_I2C_NVIO_BIT: 108 case DCB_I2C_NVIO_BIT:
108 info->drive = nv_ro08(bios, ent + 0) & 0x0f; 109 info->drive = nvbios_rd08(bios, ent + 0) & 0x0f;
109 if (nv_ro08(bios, ent + 1) & 0x01) 110 if (nvbios_rd08(bios, ent + 1) & 0x01)
110 info->share = nv_ro08(bios, ent + 1) >> 1; 111 info->share = nvbios_rd08(bios, ent + 1) >> 1;
111 return 0; 112 return 0;
112 case DCB_I2C_NVIO_AUX: 113 case DCB_I2C_NVIO_AUX:
113 info->auxch = nv_ro08(bios, ent + 0) & 0x0f; 114 info->auxch = nvbios_rd08(bios, ent + 0) & 0x0f;
114 if (nv_ro08(bios, ent + 1) & 0x01) 115 if (nvbios_rd08(bios, ent + 1) & 0x01)
115 info->share = info->auxch; 116 info->share = info->auxch;
116 return 0; 117 return 0;
117 case DCB_I2C_PMGR: 118 case DCB_I2C_PMGR:
118 info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0; 119 info->drive = (nvbios_rd16(bios, ent + 0) & 0x01f) >> 0;
119 if (info->drive == 0x1f) 120 if (info->drive == 0x1f)
120 info->drive = DCB_I2C_UNUSED; 121 info->drive = DCB_I2C_UNUSED;
121 info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5; 122 info->auxch = (nvbios_rd16(bios, ent + 0) & 0x3e0) >> 5;
122 if (info->auxch == 0x1f) 123 if (info->auxch == 0x1f)
123 info->auxch = DCB_I2C_UNUSED; 124 info->auxch = DCB_I2C_UNUSED;
124 info->share = info->auxch; 125 info->share = info->auxch;
@@ -126,7 +127,7 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
126 case DCB_I2C_UNUSED: 127 case DCB_I2C_UNUSED:
127 return 0; 128 return 0;
128 default: 129 default:
129 nv_warn(bios, "unknown i2c type %d\n", info->type); 130 nvkm_warn(subdev, "unknown i2c type %d\n", info->type);
130 info->type = DCB_I2C_UNUSED; 131 info->type = DCB_I2C_UNUSED;
131 return 0; 132 return 0;
132 } 133 }
@@ -136,21 +137,21 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
136 /* BMP (from v4.0 has i2c info in the structure, it's in a 137 /* BMP (from v4.0 has i2c info in the structure, it's in a
137 * fixed location on earlier VBIOS 138 * fixed location on earlier VBIOS
138 */ 139 */
139 if (nv_ro08(bios, bios->bmp_offset + 5) < 4) 140 if (nvbios_rd08(bios, bios->bmp_offset + 5) < 4)
140 ent = 0x0048; 141 ent = 0x0048;
141 else 142 else
142 ent = 0x0036 + bios->bmp_offset; 143 ent = 0x0036 + bios->bmp_offset;
143 144
144 if (idx == 0) { 145 if (idx == 0) {
145 info->drive = nv_ro08(bios, ent + 4); 146 info->drive = nvbios_rd08(bios, ent + 4);
146 if (!info->drive) info->drive = 0x3f; 147 if (!info->drive) info->drive = 0x3f;
147 info->sense = nv_ro08(bios, ent + 5); 148 info->sense = nvbios_rd08(bios, ent + 5);
148 if (!info->sense) info->sense = 0x3e; 149 if (!info->sense) info->sense = 0x3e;
149 } else 150 } else
150 if (idx == 1) { 151 if (idx == 1) {
151 info->drive = nv_ro08(bios, ent + 6); 152 info->drive = nvbios_rd08(bios, ent + 6);
152 if (!info->drive) info->drive = 0x37; 153 if (!info->drive) info->drive = 0x37;
153 info->sense = nv_ro08(bios, ent + 7); 154 info->sense = nvbios_rd08(bios, ent + 7);
154 if (!info->sense) info->sense = 0x36; 155 if (!info->sense) info->sense = 0x36;
155 } 156 }
156 157
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 1815540a0e8b..74b14cf09308 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -29,20 +29,21 @@
29static bool 29static bool
30nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image) 30nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
31{ 31{
32 struct nvkm_subdev *subdev = &bios->subdev;
32 struct nvbios_pcirT pcir; 33 struct nvbios_pcirT pcir;
33 struct nvbios_npdeT npde; 34 struct nvbios_npdeT npde;
34 u8 ver; 35 u8 ver;
35 u16 hdr; 36 u16 hdr;
36 u32 data; 37 u32 data;
37 38
38 switch ((data = nv_ro16(bios, image->base + 0x00))) { 39 switch ((data = nvbios_rd16(bios, image->base + 0x00))) {
39 case 0xaa55: 40 case 0xaa55:
40 case 0xbb77: 41 case 0xbb77:
41 case 0x4e56: /* NV */ 42 case 0x4e56: /* NV */
42 break; 43 break;
43 default: 44 default:
44 nv_debug(bios, "%08x: ROM signature (%04x) unknown\n", 45 nvkm_debug(subdev, "%08x: ROM signature (%04x) unknown\n",
45 image->base, data); 46 image->base, data);
46 return false; 47 return false;
47 } 48 }
48 49
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f4611e3f0971..65af31441e9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -31,18 +31,18 @@
31#include <subdev/bios/init.h> 31#include <subdev/bios/init.h>
32#include <subdev/bios/ramcfg.h> 32#include <subdev/bios/ramcfg.h>
33 33
34#include <core/device.h>
35#include <subdev/devinit.h> 34#include <subdev/devinit.h>
36#include <subdev/gpio.h> 35#include <subdev/gpio.h>
37#include <subdev/i2c.h> 36#include <subdev/i2c.h>
38#include <subdev/vga.h> 37#include <subdev/vga.h>
39 38
40#define bioslog(lvl, fmt, args...) do { \ 39#define bioslog(lvl, fmt, args...) do { \
41 nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset, \ 40 nvkm_printk(init->subdev, lvl, info, "0x%04x[%c]: "fmt, \
42 init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args); \ 41 init->offset, init_exec(init) ? \
42 '0' + (init->nested - 1) : ' ', ##args); \
43} while(0) 43} while(0)
44#define cont(fmt, args...) do { \ 44#define cont(fmt, args...) do { \
45 if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE) \ 45 if (init->subdev->debug >= NV_DBG_TRACE) \
46 printk(fmt, ##args); \ 46 printk(fmt, ##args); \
47} while(0) 47} while(0)
48#define trace(fmt, args...) bioslog(TRACE, fmt, ##args) 48#define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
@@ -141,7 +141,7 @@ init_conn(struct nvbios_init *init)
141static inline u32 141static inline u32
142init_nvreg(struct nvbios_init *init, u32 reg) 142init_nvreg(struct nvbios_init *init, u32 reg)
143{ 143{
144 struct nvkm_devinit *devinit = nvkm_devinit(init->bios); 144 struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
145 145
146 /* C51 (at least) sometimes has the lower bits set which the VBIOS 146 /* C51 (at least) sometimes has the lower bits set which the VBIOS
147 * interprets to mean that access needs to go through certain IO 147 * interprets to mean that access needs to go through certain IO
@@ -154,7 +154,7 @@ init_nvreg(struct nvbios_init *init, u32 reg)
154 /* GF8+ display scripts need register addresses mangled a bit to 154 /* GF8+ display scripts need register addresses mangled a bit to
155 * select a specific CRTC/OR 155 * select a specific CRTC/OR
156 */ 156 */
157 if (nv_device(init->bios)->card_type >= NV_50) { 157 if (init->bios->subdev.device->card_type >= NV_50) {
158 if (reg & 0x80000000) { 158 if (reg & 0x80000000) {
159 reg += init_crtc(init) * 0x800; 159 reg += init_crtc(init) * 0x800;
160 reg &= ~0x80000000; 160 reg &= ~0x80000000;
@@ -173,35 +173,36 @@ init_nvreg(struct nvbios_init *init, u32 reg)
173 if (reg & ~0x00fffffc) 173 if (reg & ~0x00fffffc)
174 warn("unknown bits in register 0x%08x\n", reg); 174 warn("unknown bits in register 0x%08x\n", reg);
175 175
176 if (devinit->mmio) 176 return nvkm_devinit_mmio(devinit, reg);
177 reg = devinit->mmio(devinit, reg);
178 return reg;
179} 177}
180 178
181static u32 179static u32
182init_rd32(struct nvbios_init *init, u32 reg) 180init_rd32(struct nvbios_init *init, u32 reg)
183{ 181{
182 struct nvkm_device *device = init->bios->subdev.device;
184 reg = init_nvreg(init, reg); 183 reg = init_nvreg(init, reg);
185 if (reg != ~0 && init_exec(init)) 184 if (reg != ~0 && init_exec(init))
186 return nv_rd32(init->subdev, reg); 185 return nvkm_rd32(device, reg);
187 return 0x00000000; 186 return 0x00000000;
188} 187}
189 188
190static void 189static void
191init_wr32(struct nvbios_init *init, u32 reg, u32 val) 190init_wr32(struct nvbios_init *init, u32 reg, u32 val)
192{ 191{
192 struct nvkm_device *device = init->bios->subdev.device;
193 reg = init_nvreg(init, reg); 193 reg = init_nvreg(init, reg);
194 if (reg != ~0 && init_exec(init)) 194 if (reg != ~0 && init_exec(init))
195 nv_wr32(init->subdev, reg, val); 195 nvkm_wr32(device, reg, val);
196} 196}
197 197
198static u32 198static u32
199init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val) 199init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
200{ 200{
201 struct nvkm_device *device = init->bios->subdev.device;
201 reg = init_nvreg(init, reg); 202 reg = init_nvreg(init, reg);
202 if (reg != ~0 && init_exec(init)) { 203 if (reg != ~0 && init_exec(init)) {
203 u32 tmp = nv_rd32(init->subdev, reg); 204 u32 tmp = nvkm_rd32(device, reg);
204 nv_wr32(init->subdev, reg, (tmp & ~mask) | val); 205 nvkm_wr32(device, reg, (tmp & ~mask) | val);
205 return tmp; 206 return tmp;
206 } 207 }
207 return 0x00000000; 208 return 0x00000000;
@@ -211,7 +212,7 @@ static u8
211init_rdport(struct nvbios_init *init, u16 port) 212init_rdport(struct nvbios_init *init, u16 port)
212{ 213{
213 if (init_exec(init)) 214 if (init_exec(init))
214 return nv_rdport(init->subdev, init->crtc, port); 215 return nvkm_rdport(init->subdev->device, init->crtc, port);
215 return 0x00; 216 return 0x00;
216} 217}
217 218
@@ -219,7 +220,7 @@ static void
219init_wrport(struct nvbios_init *init, u16 port, u8 value) 220init_wrport(struct nvbios_init *init, u16 port, u8 value)
220{ 221{
221 if (init_exec(init)) 222 if (init_exec(init))
222 nv_wrport(init->subdev, init->crtc, port, value); 223 nvkm_wrport(init->subdev->device, init->crtc, port, value);
223} 224}
224 225
225static u8 226static u8
@@ -228,7 +229,7 @@ init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
228 struct nvkm_subdev *subdev = init->subdev; 229 struct nvkm_subdev *subdev = init->subdev;
229 if (init_exec(init)) { 230 if (init_exec(init)) {
230 int head = init->crtc < 0 ? 0 : init->crtc; 231 int head = init->crtc < 0 ? 0 : init->crtc;
231 return nv_rdvgai(subdev, head, port, index); 232 return nvkm_rdvgai(subdev->device, head, port, index);
232 } 233 }
233 return 0x00; 234 return 0x00;
234} 235}
@@ -236,80 +237,80 @@ init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
236static void 237static void
237init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value) 238init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
238{ 239{
240 struct nvkm_device *device = init->subdev->device;
241
239 /* force head 0 for updates to cr44, it only exists on first head */ 242 /* force head 0 for updates to cr44, it only exists on first head */
240 if (nv_device(init->subdev)->card_type < NV_50) { 243 if (device->card_type < NV_50) {
241 if (port == 0x03d4 && index == 0x44) 244 if (port == 0x03d4 && index == 0x44)
242 init->crtc = 0; 245 init->crtc = 0;
243 } 246 }
244 247
245 if (init_exec(init)) { 248 if (init_exec(init)) {
246 int head = init->crtc < 0 ? 0 : init->crtc; 249 int head = init->crtc < 0 ? 0 : init->crtc;
247 nv_wrvgai(init->subdev, head, port, index, value); 250 nvkm_wrvgai(device, head, port, index, value);
248 } 251 }
249 252
250 /* select head 1 if cr44 write selected it */ 253 /* select head 1 if cr44 write selected it */
251 if (nv_device(init->subdev)->card_type < NV_50) { 254 if (device->card_type < NV_50) {
252 if (port == 0x03d4 && index == 0x44 && value == 3) 255 if (port == 0x03d4 && index == 0x44 && value == 3)
253 init->crtc = 1; 256 init->crtc = 1;
254 } 257 }
255} 258}
256 259
257static struct nvkm_i2c_port * 260static struct i2c_adapter *
258init_i2c(struct nvbios_init *init, int index) 261init_i2c(struct nvbios_init *init, int index)
259{ 262{
260 struct nvkm_i2c *i2c = nvkm_i2c(init->bios); 263 struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
264 struct nvkm_i2c_bus *bus;
261 265
262 if (index == 0xff) { 266 if (index == 0xff) {
263 index = NV_I2C_DEFAULT(0); 267 index = NVKM_I2C_BUS_PRI;
264 if (init->outp && init->outp->i2c_upper_default) 268 if (init->outp && init->outp->i2c_upper_default)
265 index = NV_I2C_DEFAULT(1); 269 index = NVKM_I2C_BUS_SEC;
266 } else
267 if (index < 0) {
268 if (!init->outp) {
269 if (init_exec(init))
270 error("script needs output for i2c\n");
271 return NULL;
272 }
273
274 if (index == -2 && init->outp->location) {
275 index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
276 return i2c->find_type(i2c, index);
277 }
278
279 index = init->outp->i2c_index;
280 if (init->outp->type == DCB_OUTPUT_DP)
281 index += NV_I2C_AUX(0);
282 } 270 }
283 271
284 return i2c->find(i2c, index); 272 bus = nvkm_i2c_bus_find(i2c, index);
273 return bus ? &bus->i2c : NULL;
285} 274}
286 275
287static int 276static int
288init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg) 277init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
289{ 278{
290 struct nvkm_i2c_port *port = init_i2c(init, index); 279 struct i2c_adapter *adap = init_i2c(init, index);
291 if (port && init_exec(init)) 280 if (adap && init_exec(init))
292 return nv_rdi2cr(port, addr, reg); 281 return nvkm_rdi2cr(adap, addr, reg);
293 return -ENODEV; 282 return -ENODEV;
294} 283}
295 284
296static int 285static int
297init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val) 286init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
298{ 287{
299 struct nvkm_i2c_port *port = init_i2c(init, index); 288 struct i2c_adapter *adap = init_i2c(init, index);
300 if (port && init_exec(init)) 289 if (adap && init_exec(init))
301 return nv_wri2cr(port, addr, reg, val); 290 return nvkm_wri2cr(adap, addr, reg, val);
302 return -ENODEV; 291 return -ENODEV;
303} 292}
304 293
294static struct nvkm_i2c_aux *
295init_aux(struct nvbios_init *init)
296{
297 struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
298 if (!init->outp) {
299 if (init_exec(init))
300 error("script needs output for aux\n");
301 return NULL;
302 }
303 return nvkm_i2c_aux_find(i2c, init->outp->i2c_index);
304}
305
305static u8 306static u8
306init_rdauxr(struct nvbios_init *init, u32 addr) 307init_rdauxr(struct nvbios_init *init, u32 addr)
307{ 308{
308 struct nvkm_i2c_port *port = init_i2c(init, -2); 309 struct nvkm_i2c_aux *aux = init_aux(init);
309 u8 data; 310 u8 data;
310 311
311 if (port && init_exec(init)) { 312 if (aux && init_exec(init)) {
312 int ret = nv_rdaux(port, addr, &data, 1); 313 int ret = nvkm_rdaux(aux, addr, &data, 1);
313 if (ret == 0) 314 if (ret == 0)
314 return data; 315 return data;
315 trace("auxch read failed with %d\n", ret); 316 trace("auxch read failed with %d\n", ret);
@@ -321,9 +322,9 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
321static int 322static int
322init_wrauxr(struct nvbios_init *init, u32 addr, u8 data) 323init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
323{ 324{
324 struct nvkm_i2c_port *port = init_i2c(init, -2); 325 struct nvkm_i2c_aux *aux = init_aux(init);
325 if (port && init_exec(init)) { 326 if (aux && init_exec(init)) {
326 int ret = nv_wraux(port, addr, &data, 1); 327 int ret = nvkm_wraux(aux, addr, &data, 1);
327 if (ret) 328 if (ret)
328 trace("auxch write failed with %d\n", ret); 329 trace("auxch write failed with %d\n", ret);
329 return ret; 330 return ret;
@@ -334,9 +335,9 @@ init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
334static void 335static void
335init_prog_pll(struct nvbios_init *init, u32 id, u32 freq) 336init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
336{ 337{
337 struct nvkm_devinit *devinit = nvkm_devinit(init->bios); 338 struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
338 if (devinit->pll_set && init_exec(init)) { 339 if (init_exec(init)) {
339 int ret = devinit->pll_set(devinit, id, freq); 340 int ret = nvkm_devinit_pll_set(devinit, id, freq);
340 if (ret) 341 if (ret)
341 warn("failed to prog pll 0x%08x to %dkHz\n", id, freq); 342 warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
342 } 343 }
@@ -371,7 +372,7 @@ init_table_(struct nvbios_init *init, u16 offset, const char *name)
371 u16 len, data = init_table(bios, &len); 372 u16 len, data = init_table(bios, &len);
372 if (data) { 373 if (data) {
373 if (len >= offset + 2) { 374 if (len >= offset + 2) {
374 data = nv_ro16(bios, data + offset); 375 data = nvbios_rd16(bios, data + offset);
375 if (data) 376 if (data)
376 return data; 377 return data;
377 378
@@ -407,12 +408,12 @@ init_script(struct nvkm_bios *bios, int index)
407 return 0x0000; 408 return 0x0000;
408 409
409 data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18); 410 data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
410 return nv_ro16(bios, data + (index * 2)); 411 return nvbios_rd16(bios, data + (index * 2));
411 } 412 }
412 413
413 data = init_script_table(&init); 414 data = init_script_table(&init);
414 if (data) 415 if (data)
415 return nv_ro16(bios, data + (index * 2)); 416 return nvbios_rd16(bios, data + (index * 2));
416 417
417 return 0x0000; 418 return 0x0000;
418} 419}
@@ -422,7 +423,7 @@ init_unknown_script(struct nvkm_bios *bios)
422{ 423{
423 u16 len, data = init_table(bios, &len); 424 u16 len, data = init_table(bios, &len);
424 if (data && len >= 16) 425 if (data && len >= 16)
425 return nv_ro16(bios, data + 14); 426 return nvbios_rd16(bios, data + 14);
426 return 0x0000; 427 return 0x0000;
427} 428}
428 429
@@ -454,9 +455,9 @@ init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
454 struct nvkm_bios *bios = init->bios; 455 struct nvkm_bios *bios = init->bios;
455 u16 table = init_xlat_table(init); 456 u16 table = init_xlat_table(init);
456 if (table) { 457 if (table) {
457 u16 data = nv_ro16(bios, table + (index * 2)); 458 u16 data = nvbios_rd16(bios, table + (index * 2));
458 if (data) 459 if (data)
459 return nv_ro08(bios, data + offset); 460 return nvbios_rd08(bios, data + offset);
460 warn("xlat table pointer %d invalid\n", index); 461 warn("xlat table pointer %d invalid\n", index);
461 } 462 }
462 return 0x00; 463 return 0x00;
@@ -472,9 +473,9 @@ init_condition_met(struct nvbios_init *init, u8 cond)
472 struct nvkm_bios *bios = init->bios; 473 struct nvkm_bios *bios = init->bios;
473 u16 table = init_condition_table(init); 474 u16 table = init_condition_table(init);
474 if (table) { 475 if (table) {
475 u32 reg = nv_ro32(bios, table + (cond * 12) + 0); 476 u32 reg = nvbios_rd32(bios, table + (cond * 12) + 0);
476 u32 msk = nv_ro32(bios, table + (cond * 12) + 4); 477 u32 msk = nvbios_rd32(bios, table + (cond * 12) + 4);
477 u32 val = nv_ro32(bios, table + (cond * 12) + 8); 478 u32 val = nvbios_rd32(bios, table + (cond * 12) + 8);
478 trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n", 479 trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
479 cond, reg, msk, val); 480 cond, reg, msk, val);
480 return (init_rd32(init, reg) & msk) == val; 481 return (init_rd32(init, reg) & msk) == val;
@@ -488,10 +489,10 @@ init_io_condition_met(struct nvbios_init *init, u8 cond)
488 struct nvkm_bios *bios = init->bios; 489 struct nvkm_bios *bios = init->bios;
489 u16 table = init_io_condition_table(init); 490 u16 table = init_io_condition_table(init);
490 if (table) { 491 if (table) {
491 u16 port = nv_ro16(bios, table + (cond * 5) + 0); 492 u16 port = nvbios_rd16(bios, table + (cond * 5) + 0);
492 u8 index = nv_ro08(bios, table + (cond * 5) + 2); 493 u8 index = nvbios_rd08(bios, table + (cond * 5) + 2);
493 u8 mask = nv_ro08(bios, table + (cond * 5) + 3); 494 u8 mask = nvbios_rd08(bios, table + (cond * 5) + 3);
494 u8 value = nv_ro08(bios, table + (cond * 5) + 4); 495 u8 value = nvbios_rd08(bios, table + (cond * 5) + 4);
495 trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n", 496 trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
496 cond, port, index, mask, value); 497 cond, port, index, mask, value);
497 return (init_rdvgai(init, port, index) & mask) == value; 498 return (init_rdvgai(init, port, index) & mask) == value;
@@ -505,15 +506,15 @@ init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
505 struct nvkm_bios *bios = init->bios; 506 struct nvkm_bios *bios = init->bios;
506 u16 table = init_io_flag_condition_table(init); 507 u16 table = init_io_flag_condition_table(init);
507 if (table) { 508 if (table) {
508 u16 port = nv_ro16(bios, table + (cond * 9) + 0); 509 u16 port = nvbios_rd16(bios, table + (cond * 9) + 0);
509 u8 index = nv_ro08(bios, table + (cond * 9) + 2); 510 u8 index = nvbios_rd08(bios, table + (cond * 9) + 2);
510 u8 mask = nv_ro08(bios, table + (cond * 9) + 3); 511 u8 mask = nvbios_rd08(bios, table + (cond * 9) + 3);
511 u8 shift = nv_ro08(bios, table + (cond * 9) + 4); 512 u8 shift = nvbios_rd08(bios, table + (cond * 9) + 4);
512 u16 data = nv_ro16(bios, table + (cond * 9) + 5); 513 u16 data = nvbios_rd16(bios, table + (cond * 9) + 5);
513 u8 dmask = nv_ro08(bios, table + (cond * 9) + 7); 514 u8 dmask = nvbios_rd08(bios, table + (cond * 9) + 7);
514 u8 value = nv_ro08(bios, table + (cond * 9) + 8); 515 u8 value = nvbios_rd08(bios, table + (cond * 9) + 8);
515 u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift; 516 u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
516 return (nv_ro08(bios, data + ioval) & dmask) == value; 517 return (nvbios_rd08(bios, data + ioval) & dmask) == value;
517 } 518 }
518 return false; 519 return false;
519} 520}
@@ -573,7 +574,7 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
573static void 574static void
574init_reserved(struct nvbios_init *init) 575init_reserved(struct nvbios_init *init)
575{ 576{
576 u8 opcode = nv_ro08(init->bios, init->offset); 577 u8 opcode = nvbios_rd08(init->bios, init->offset);
577 u8 length, i; 578 u8 length, i;
578 579
579 switch (opcode) { 580 switch (opcode) {
@@ -587,7 +588,7 @@ init_reserved(struct nvbios_init *init)
587 588
588 trace("RESERVED 0x%02x\t", opcode); 589 trace("RESERVED 0x%02x\t", opcode);
589 for (i = 1; i < length; i++) 590 for (i = 1; i < length; i++)
590 cont(" 0x%02x", nv_ro08(init->bios, init->offset + i)); 591 cont(" 0x%02x", nvbios_rd08(init->bios, init->offset + i));
591 cont("\n"); 592 cont("\n");
592 init->offset += length; 593 init->offset += length;
593} 594}
@@ -611,12 +612,12 @@ static void
611init_io_restrict_prog(struct nvbios_init *init) 612init_io_restrict_prog(struct nvbios_init *init)
612{ 613{
613 struct nvkm_bios *bios = init->bios; 614 struct nvkm_bios *bios = init->bios;
614 u16 port = nv_ro16(bios, init->offset + 1); 615 u16 port = nvbios_rd16(bios, init->offset + 1);
615 u8 index = nv_ro08(bios, init->offset + 3); 616 u8 index = nvbios_rd08(bios, init->offset + 3);
616 u8 mask = nv_ro08(bios, init->offset + 4); 617 u8 mask = nvbios_rd08(bios, init->offset + 4);
617 u8 shift = nv_ro08(bios, init->offset + 5); 618 u8 shift = nvbios_rd08(bios, init->offset + 5);
618 u8 count = nv_ro08(bios, init->offset + 6); 619 u8 count = nvbios_rd08(bios, init->offset + 6);
619 u32 reg = nv_ro32(bios, init->offset + 7); 620 u32 reg = nvbios_rd32(bios, init->offset + 7);
620 u8 conf, i; 621 u8 conf, i;
621 622
622 trace("IO_RESTRICT_PROG\tR[0x%06x] = " 623 trace("IO_RESTRICT_PROG\tR[0x%06x] = "
@@ -626,7 +627,7 @@ init_io_restrict_prog(struct nvbios_init *init)
626 627
627 conf = (init_rdvgai(init, port, index) & mask) >> shift; 628 conf = (init_rdvgai(init, port, index) & mask) >> shift;
628 for (i = 0; i < count; i++) { 629 for (i = 0; i < count; i++) {
629 u32 data = nv_ro32(bios, init->offset); 630 u32 data = nvbios_rd32(bios, init->offset);
630 631
631 if (i == conf) { 632 if (i == conf) {
632 trace("\t0x%08x *\n", data); 633 trace("\t0x%08x *\n", data);
@@ -648,7 +649,7 @@ static void
648init_repeat(struct nvbios_init *init) 649init_repeat(struct nvbios_init *init)
649{ 650{
650 struct nvkm_bios *bios = init->bios; 651 struct nvkm_bios *bios = init->bios;
651 u8 count = nv_ro08(bios, init->offset + 1); 652 u8 count = nvbios_rd08(bios, init->offset + 1);
652 u16 repeat = init->repeat; 653 u16 repeat = init->repeat;
653 654
654 trace("REPEAT\t0x%02x\n", count); 655 trace("REPEAT\t0x%02x\n", count);
@@ -674,13 +675,13 @@ static void
674init_io_restrict_pll(struct nvbios_init *init) 675init_io_restrict_pll(struct nvbios_init *init)
675{ 676{
676 struct nvkm_bios *bios = init->bios; 677 struct nvkm_bios *bios = init->bios;
677 u16 port = nv_ro16(bios, init->offset + 1); 678 u16 port = nvbios_rd16(bios, init->offset + 1);
678 u8 index = nv_ro08(bios, init->offset + 3); 679 u8 index = nvbios_rd08(bios, init->offset + 3);
679 u8 mask = nv_ro08(bios, init->offset + 4); 680 u8 mask = nvbios_rd08(bios, init->offset + 4);
680 u8 shift = nv_ro08(bios, init->offset + 5); 681 u8 shift = nvbios_rd08(bios, init->offset + 5);
681 s8 iofc = nv_ro08(bios, init->offset + 6); 682 s8 iofc = nvbios_rd08(bios, init->offset + 6);
682 u8 count = nv_ro08(bios, init->offset + 7); 683 u8 count = nvbios_rd08(bios, init->offset + 7);
683 u32 reg = nv_ro32(bios, init->offset + 8); 684 u32 reg = nvbios_rd32(bios, init->offset + 8);
684 u8 conf, i; 685 u8 conf, i;
685 686
686 trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= " 687 trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
@@ -690,7 +691,7 @@ init_io_restrict_pll(struct nvbios_init *init)
690 691
691 conf = (init_rdvgai(init, port, index) & mask) >> shift; 692 conf = (init_rdvgai(init, port, index) & mask) >> shift;
692 for (i = 0; i < count; i++) { 693 for (i = 0; i < count; i++) {
693 u32 freq = nv_ro16(bios, init->offset) * 10; 694 u32 freq = nvbios_rd16(bios, init->offset) * 10;
694 695
695 if (i == conf) { 696 if (i == conf) {
696 trace("\t%dkHz *\n", freq); 697 trace("\t%dkHz *\n", freq);
@@ -730,12 +731,12 @@ static void
730init_copy(struct nvbios_init *init) 731init_copy(struct nvbios_init *init)
731{ 732{
732 struct nvkm_bios *bios = init->bios; 733 struct nvkm_bios *bios = init->bios;
733 u32 reg = nv_ro32(bios, init->offset + 1); 734 u32 reg = nvbios_rd32(bios, init->offset + 1);
734 u8 shift = nv_ro08(bios, init->offset + 5); 735 u8 shift = nvbios_rd08(bios, init->offset + 5);
735 u8 smask = nv_ro08(bios, init->offset + 6); 736 u8 smask = nvbios_rd08(bios, init->offset + 6);
736 u16 port = nv_ro16(bios, init->offset + 7); 737 u16 port = nvbios_rd16(bios, init->offset + 7);
737 u8 index = nv_ro08(bios, init->offset + 9); 738 u8 index = nvbios_rd08(bios, init->offset + 9);
738 u8 mask = nv_ro08(bios, init->offset + 10); 739 u8 mask = nvbios_rd08(bios, init->offset + 10);
739 u8 data; 740 u8 data;
740 741
741 trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= " 742 trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
@@ -769,7 +770,7 @@ static void
769init_io_flag_condition(struct nvbios_init *init) 770init_io_flag_condition(struct nvbios_init *init)
770{ 771{
771 struct nvkm_bios *bios = init->bios; 772 struct nvkm_bios *bios = init->bios;
772 u8 cond = nv_ro08(bios, init->offset + 1); 773 u8 cond = nvbios_rd08(bios, init->offset + 1);
773 774
774 trace("IO_FLAG_CONDITION\t0x%02x\n", cond); 775 trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
775 init->offset += 2; 776 init->offset += 2;
@@ -787,8 +788,8 @@ init_dp_condition(struct nvbios_init *init)
787{ 788{
788 struct nvkm_bios *bios = init->bios; 789 struct nvkm_bios *bios = init->bios;
789 struct nvbios_dpout info; 790 struct nvbios_dpout info;
790 u8 cond = nv_ro08(bios, init->offset + 1); 791 u8 cond = nvbios_rd08(bios, init->offset + 1);
791 u8 unkn = nv_ro08(bios, init->offset + 2); 792 u8 unkn = nvbios_rd08(bios, init->offset + 2);
792 u8 ver, hdr, cnt, len; 793 u8 ver, hdr, cnt, len;
793 u16 data; 794 u16 data;
794 795
@@ -834,7 +835,7 @@ static void
834init_io_mask_or(struct nvbios_init *init) 835init_io_mask_or(struct nvbios_init *init)
835{ 836{
836 struct nvkm_bios *bios = init->bios; 837 struct nvkm_bios *bios = init->bios;
837 u8 index = nv_ro08(bios, init->offset + 1); 838 u8 index = nvbios_rd08(bios, init->offset + 1);
838 u8 or = init_or(init); 839 u8 or = init_or(init);
839 u8 data; 840 u8 data;
840 841
@@ -853,7 +854,7 @@ static void
853init_io_or(struct nvbios_init *init) 854init_io_or(struct nvbios_init *init)
854{ 855{
855 struct nvkm_bios *bios = init->bios; 856 struct nvkm_bios *bios = init->bios;
856 u8 index = nv_ro08(bios, init->offset + 1); 857 u8 index = nvbios_rd08(bios, init->offset + 1);
857 u8 or = init_or(init); 858 u8 or = init_or(init);
858 u8 data; 859 u8 data;
859 860
@@ -872,8 +873,8 @@ static void
872init_andn_reg(struct nvbios_init *init) 873init_andn_reg(struct nvbios_init *init)
873{ 874{
874 struct nvkm_bios *bios = init->bios; 875 struct nvkm_bios *bios = init->bios;
875 u32 reg = nv_ro32(bios, init->offset + 1); 876 u32 reg = nvbios_rd32(bios, init->offset + 1);
876 u32 mask = nv_ro32(bios, init->offset + 5); 877 u32 mask = nvbios_rd32(bios, init->offset + 5);
877 878
878 trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask); 879 trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
879 init->offset += 9; 880 init->offset += 9;
@@ -889,8 +890,8 @@ static void
889init_or_reg(struct nvbios_init *init) 890init_or_reg(struct nvbios_init *init)
890{ 891{
891 struct nvkm_bios *bios = init->bios; 892 struct nvkm_bios *bios = init->bios;
892 u32 reg = nv_ro32(bios, init->offset + 1); 893 u32 reg = nvbios_rd32(bios, init->offset + 1);
893 u32 mask = nv_ro32(bios, init->offset + 5); 894 u32 mask = nvbios_rd32(bios, init->offset + 5);
894 895
895 trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask); 896 trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
896 init->offset += 9; 897 init->offset += 9;
@@ -906,19 +907,19 @@ static void
906init_idx_addr_latched(struct nvbios_init *init) 907init_idx_addr_latched(struct nvbios_init *init)
907{ 908{
908 struct nvkm_bios *bios = init->bios; 909 struct nvkm_bios *bios = init->bios;
909 u32 creg = nv_ro32(bios, init->offset + 1); 910 u32 creg = nvbios_rd32(bios, init->offset + 1);
910 u32 dreg = nv_ro32(bios, init->offset + 5); 911 u32 dreg = nvbios_rd32(bios, init->offset + 5);
911 u32 mask = nv_ro32(bios, init->offset + 9); 912 u32 mask = nvbios_rd32(bios, init->offset + 9);
912 u32 data = nv_ro32(bios, init->offset + 13); 913 u32 data = nvbios_rd32(bios, init->offset + 13);
913 u8 count = nv_ro08(bios, init->offset + 17); 914 u8 count = nvbios_rd08(bios, init->offset + 17);
914 915
915 trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg); 916 trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg);
916 trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data); 917 trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data);
917 init->offset += 18; 918 init->offset += 18;
918 919
919 while (count--) { 920 while (count--) {
920 u8 iaddr = nv_ro08(bios, init->offset + 0); 921 u8 iaddr = nvbios_rd08(bios, init->offset + 0);
921 u8 idata = nv_ro08(bios, init->offset + 1); 922 u8 idata = nvbios_rd08(bios, init->offset + 1);
922 923
923 trace("\t[0x%02x] = 0x%02x\n", iaddr, idata); 924 trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
924 init->offset += 2; 925 init->offset += 2;
@@ -936,12 +937,12 @@ static void
936init_io_restrict_pll2(struct nvbios_init *init) 937init_io_restrict_pll2(struct nvbios_init *init)
937{ 938{
938 struct nvkm_bios *bios = init->bios; 939 struct nvkm_bios *bios = init->bios;
939 u16 port = nv_ro16(bios, init->offset + 1); 940 u16 port = nvbios_rd16(bios, init->offset + 1);
940 u8 index = nv_ro08(bios, init->offset + 3); 941 u8 index = nvbios_rd08(bios, init->offset + 3);
941 u8 mask = nv_ro08(bios, init->offset + 4); 942 u8 mask = nvbios_rd08(bios, init->offset + 4);
942 u8 shift = nv_ro08(bios, init->offset + 5); 943 u8 shift = nvbios_rd08(bios, init->offset + 5);
943 u8 count = nv_ro08(bios, init->offset + 6); 944 u8 count = nvbios_rd08(bios, init->offset + 6);
944 u32 reg = nv_ro32(bios, init->offset + 7); 945 u32 reg = nvbios_rd32(bios, init->offset + 7);
945 u8 conf, i; 946 u8 conf, i;
946 947
947 trace("IO_RESTRICT_PLL2\t" 948 trace("IO_RESTRICT_PLL2\t"
@@ -951,7 +952,7 @@ init_io_restrict_pll2(struct nvbios_init *init)
951 952
952 conf = (init_rdvgai(init, port, index) & mask) >> shift; 953 conf = (init_rdvgai(init, port, index) & mask) >> shift;
953 for (i = 0; i < count; i++) { 954 for (i = 0; i < count; i++) {
954 u32 freq = nv_ro32(bios, init->offset); 955 u32 freq = nvbios_rd32(bios, init->offset);
955 if (i == conf) { 956 if (i == conf) {
956 trace("\t%dkHz *\n", freq); 957 trace("\t%dkHz *\n", freq);
957 init_prog_pll(init, reg, freq); 958 init_prog_pll(init, reg, freq);
@@ -971,8 +972,8 @@ static void
971init_pll2(struct nvbios_init *init) 972init_pll2(struct nvbios_init *init)
972{ 973{
973 struct nvkm_bios *bios = init->bios; 974 struct nvkm_bios *bios = init->bios;
974 u32 reg = nv_ro32(bios, init->offset + 1); 975 u32 reg = nvbios_rd32(bios, init->offset + 1);
975 u32 freq = nv_ro32(bios, init->offset + 5); 976 u32 freq = nvbios_rd32(bios, init->offset + 5);
976 977
977 trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq); 978 trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
978 init->offset += 9; 979 init->offset += 9;
@@ -988,17 +989,17 @@ static void
988init_i2c_byte(struct nvbios_init *init) 989init_i2c_byte(struct nvbios_init *init)
989{ 990{
990 struct nvkm_bios *bios = init->bios; 991 struct nvkm_bios *bios = init->bios;
991 u8 index = nv_ro08(bios, init->offset + 1); 992 u8 index = nvbios_rd08(bios, init->offset + 1);
992 u8 addr = nv_ro08(bios, init->offset + 2) >> 1; 993 u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
993 u8 count = nv_ro08(bios, init->offset + 3); 994 u8 count = nvbios_rd08(bios, init->offset + 3);
994 995
995 trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr); 996 trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
996 init->offset += 4; 997 init->offset += 4;
997 998
998 while (count--) { 999 while (count--) {
999 u8 reg = nv_ro08(bios, init->offset + 0); 1000 u8 reg = nvbios_rd08(bios, init->offset + 0);
1000 u8 mask = nv_ro08(bios, init->offset + 1); 1001 u8 mask = nvbios_rd08(bios, init->offset + 1);
1001 u8 data = nv_ro08(bios, init->offset + 2); 1002 u8 data = nvbios_rd08(bios, init->offset + 2);
1002 int val; 1003 int val;
1003 1004
1004 trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data); 1005 trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
@@ -1019,16 +1020,16 @@ static void
1019init_zm_i2c_byte(struct nvbios_init *init) 1020init_zm_i2c_byte(struct nvbios_init *init)
1020{ 1021{
1021 struct nvkm_bios *bios = init->bios; 1022 struct nvkm_bios *bios = init->bios;
1022 u8 index = nv_ro08(bios, init->offset + 1); 1023 u8 index = nvbios_rd08(bios, init->offset + 1);
1023 u8 addr = nv_ro08(bios, init->offset + 2) >> 1; 1024 u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
1024 u8 count = nv_ro08(bios, init->offset + 3); 1025 u8 count = nvbios_rd08(bios, init->offset + 3);
1025 1026
1026 trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr); 1027 trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
1027 init->offset += 4; 1028 init->offset += 4;
1028 1029
1029 while (count--) { 1030 while (count--) {
1030 u8 reg = nv_ro08(bios, init->offset + 0); 1031 u8 reg = nvbios_rd08(bios, init->offset + 0);
1031 u8 data = nv_ro08(bios, init->offset + 1); 1032 u8 data = nvbios_rd08(bios, init->offset + 1);
1032 1033
1033 trace("\t[0x%02x] = 0x%02x\n", reg, data); 1034 trace("\t[0x%02x] = 0x%02x\n", reg, data);
1034 init->offset += 2; 1035 init->offset += 2;
@@ -1045,28 +1046,28 @@ static void
1045init_zm_i2c(struct nvbios_init *init) 1046init_zm_i2c(struct nvbios_init *init)
1046{ 1047{
1047 struct nvkm_bios *bios = init->bios; 1048 struct nvkm_bios *bios = init->bios;
1048 u8 index = nv_ro08(bios, init->offset + 1); 1049 u8 index = nvbios_rd08(bios, init->offset + 1);
1049 u8 addr = nv_ro08(bios, init->offset + 2) >> 1; 1050 u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
1050 u8 count = nv_ro08(bios, init->offset + 3); 1051 u8 count = nvbios_rd08(bios, init->offset + 3);
1051 u8 data[256], i; 1052 u8 data[256], i;
1052 1053
1053 trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr); 1054 trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
1054 init->offset += 4; 1055 init->offset += 4;
1055 1056
1056 for (i = 0; i < count; i++) { 1057 for (i = 0; i < count; i++) {
1057 data[i] = nv_ro08(bios, init->offset); 1058 data[i] = nvbios_rd08(bios, init->offset);
1058 trace("\t0x%02x\n", data[i]); 1059 trace("\t0x%02x\n", data[i]);
1059 init->offset++; 1060 init->offset++;
1060 } 1061 }
1061 1062
1062 if (init_exec(init)) { 1063 if (init_exec(init)) {
1063 struct nvkm_i2c_port *port = init_i2c(init, index); 1064 struct i2c_adapter *adap = init_i2c(init, index);
1064 struct i2c_msg msg = { 1065 struct i2c_msg msg = {
1065 .addr = addr, .flags = 0, .len = count, .buf = data, 1066 .addr = addr, .flags = 0, .len = count, .buf = data,
1066 }; 1067 };
1067 int ret; 1068 int ret;
1068 1069
1069 if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1) 1070 if (adap && (ret = i2c_transfer(adap, &msg, 1)) != 1)
1070 warn("i2c wr failed, %d\n", ret); 1071 warn("i2c wr failed, %d\n", ret);
1071 } 1072 }
1072} 1073}
@@ -1079,10 +1080,10 @@ static void
1079init_tmds(struct nvbios_init *init) 1080init_tmds(struct nvbios_init *init)
1080{ 1081{
1081 struct nvkm_bios *bios = init->bios; 1082 struct nvkm_bios *bios = init->bios;
1082 u8 tmds = nv_ro08(bios, init->offset + 1); 1083 u8 tmds = nvbios_rd08(bios, init->offset + 1);
1083 u8 addr = nv_ro08(bios, init->offset + 2); 1084 u8 addr = nvbios_rd08(bios, init->offset + 2);
1084 u8 mask = nv_ro08(bios, init->offset + 3); 1085 u8 mask = nvbios_rd08(bios, init->offset + 3);
1085 u8 data = nv_ro08(bios, init->offset + 4); 1086 u8 data = nvbios_rd08(bios, init->offset + 4);
1086 u32 reg = init_tmds_reg(init, tmds); 1087 u32 reg = init_tmds_reg(init, tmds);
1087 1088
1088 trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n", 1089 trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1105,16 +1106,16 @@ static void
1105init_zm_tmds_group(struct nvbios_init *init) 1106init_zm_tmds_group(struct nvbios_init *init)
1106{ 1107{
1107 struct nvkm_bios *bios = init->bios; 1108 struct nvkm_bios *bios = init->bios;
1108 u8 tmds = nv_ro08(bios, init->offset + 1); 1109 u8 tmds = nvbios_rd08(bios, init->offset + 1);
1109 u8 count = nv_ro08(bios, init->offset + 2); 1110 u8 count = nvbios_rd08(bios, init->offset + 2);
1110 u32 reg = init_tmds_reg(init, tmds); 1111 u32 reg = init_tmds_reg(init, tmds);
1111 1112
1112 trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds); 1113 trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
1113 init->offset += 3; 1114 init->offset += 3;
1114 1115
1115 while (count--) { 1116 while (count--) {
1116 u8 addr = nv_ro08(bios, init->offset + 0); 1117 u8 addr = nvbios_rd08(bios, init->offset + 0);
1117 u8 data = nv_ro08(bios, init->offset + 1); 1118 u8 data = nvbios_rd08(bios, init->offset + 1);
1118 1119
1119 trace("\t[0x%02x] = 0x%02x\n", addr, data); 1120 trace("\t[0x%02x] = 0x%02x\n", addr, data);
1120 init->offset += 2; 1121 init->offset += 2;
@@ -1132,10 +1133,10 @@ static void
1132init_cr_idx_adr_latch(struct nvbios_init *init) 1133init_cr_idx_adr_latch(struct nvbios_init *init)
1133{ 1134{
1134 struct nvkm_bios *bios = init->bios; 1135 struct nvkm_bios *bios = init->bios;
1135 u8 addr0 = nv_ro08(bios, init->offset + 1); 1136 u8 addr0 = nvbios_rd08(bios, init->offset + 1);
1136 u8 addr1 = nv_ro08(bios, init->offset + 2); 1137 u8 addr1 = nvbios_rd08(bios, init->offset + 2);
1137 u8 base = nv_ro08(bios, init->offset + 3); 1138 u8 base = nvbios_rd08(bios, init->offset + 3);
1138 u8 count = nv_ro08(bios, init->offset + 4); 1139 u8 count = nvbios_rd08(bios, init->offset + 4);
1139 u8 save0; 1140 u8 save0;
1140 1141
1141 trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1); 1142 trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
@@ -1143,7 +1144,7 @@ init_cr_idx_adr_latch(struct nvbios_init *init)
1143 1144
1144 save0 = init_rdvgai(init, 0x03d4, addr0); 1145 save0 = init_rdvgai(init, 0x03d4, addr0);
1145 while (count--) { 1146 while (count--) {
1146 u8 data = nv_ro08(bios, init->offset); 1147 u8 data = nvbios_rd08(bios, init->offset);
1147 1148
1148 trace("\t\t[0x%02x] = 0x%02x\n", base, data); 1149 trace("\t\t[0x%02x] = 0x%02x\n", base, data);
1149 init->offset += 1; 1150 init->offset += 1;
@@ -1162,9 +1163,9 @@ static void
1162init_cr(struct nvbios_init *init) 1163init_cr(struct nvbios_init *init)
1163{ 1164{
1164 struct nvkm_bios *bios = init->bios; 1165 struct nvkm_bios *bios = init->bios;
1165 u8 addr = nv_ro08(bios, init->offset + 1); 1166 u8 addr = nvbios_rd08(bios, init->offset + 1);
1166 u8 mask = nv_ro08(bios, init->offset + 2); 1167 u8 mask = nvbios_rd08(bios, init->offset + 2);
1167 u8 data = nv_ro08(bios, init->offset + 3); 1168 u8 data = nvbios_rd08(bios, init->offset + 3);
1168 u8 val; 1169 u8 val;
1169 1170
1170 trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data); 1171 trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
@@ -1182,8 +1183,8 @@ static void
1182init_zm_cr(struct nvbios_init *init) 1183init_zm_cr(struct nvbios_init *init)
1183{ 1184{
1184 struct nvkm_bios *bios = init->bios; 1185 struct nvkm_bios *bios = init->bios;
1185 u8 addr = nv_ro08(bios, init->offset + 1); 1186 u8 addr = nvbios_rd08(bios, init->offset + 1);
1186 u8 data = nv_ro08(bios, init->offset + 2); 1187 u8 data = nvbios_rd08(bios, init->offset + 2);
1187 1188
1188 trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr, data); 1189 trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr, data);
1189 init->offset += 3; 1190 init->offset += 3;
@@ -1199,14 +1200,14 @@ static void
1199init_zm_cr_group(struct nvbios_init *init) 1200init_zm_cr_group(struct nvbios_init *init)
1200{ 1201{
1201 struct nvkm_bios *bios = init->bios; 1202 struct nvkm_bios *bios = init->bios;
1202 u8 count = nv_ro08(bios, init->offset + 1); 1203 u8 count = nvbios_rd08(bios, init->offset + 1);
1203 1204
1204 trace("ZM_CR_GROUP\n"); 1205 trace("ZM_CR_GROUP\n");
1205 init->offset += 2; 1206 init->offset += 2;
1206 1207
1207 while (count--) { 1208 while (count--) {
1208 u8 addr = nv_ro08(bios, init->offset + 0); 1209 u8 addr = nvbios_rd08(bios, init->offset + 0);
1209 u8 data = nv_ro08(bios, init->offset + 1); 1210 u8 data = nvbios_rd08(bios, init->offset + 1);
1210 1211
1211 trace("\t\tC[0x%02x] = 0x%02x\n", addr, data); 1212 trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
1212 init->offset += 2; 1213 init->offset += 2;
@@ -1223,8 +1224,8 @@ static void
1223init_condition_time(struct nvbios_init *init) 1224init_condition_time(struct nvbios_init *init)
1224{ 1225{
1225 struct nvkm_bios *bios = init->bios; 1226 struct nvkm_bios *bios = init->bios;
1226 u8 cond = nv_ro08(bios, init->offset + 1); 1227 u8 cond = nvbios_rd08(bios, init->offset + 1);
1227 u8 retry = nv_ro08(bios, init->offset + 2); 1228 u8 retry = nvbios_rd08(bios, init->offset + 2);
1228 u8 wait = min((u16)retry * 50, 100); 1229 u8 wait = min((u16)retry * 50, 100);
1229 1230
1230 trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry); 1231 trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
@@ -1250,7 +1251,7 @@ static void
1250init_ltime(struct nvbios_init *init) 1251init_ltime(struct nvbios_init *init)
1251{ 1252{
1252 struct nvkm_bios *bios = init->bios; 1253 struct nvkm_bios *bios = init->bios;
1253 u16 msec = nv_ro16(bios, init->offset + 1); 1254 u16 msec = nvbios_rd16(bios, init->offset + 1);
1254 1255
1255 trace("LTIME\t0x%04x\n", msec); 1256 trace("LTIME\t0x%04x\n", msec);
1256 init->offset += 3; 1257 init->offset += 3;
@@ -1267,14 +1268,14 @@ static void
1267init_zm_reg_sequence(struct nvbios_init *init) 1268init_zm_reg_sequence(struct nvbios_init *init)
1268{ 1269{
1269 struct nvkm_bios *bios = init->bios; 1270 struct nvkm_bios *bios = init->bios;
1270 u32 base = nv_ro32(bios, init->offset + 1); 1271 u32 base = nvbios_rd32(bios, init->offset + 1);
1271 u8 count = nv_ro08(bios, init->offset + 5); 1272 u8 count = nvbios_rd08(bios, init->offset + 5);
1272 1273
1273 trace("ZM_REG_SEQUENCE\t0x%02x\n", count); 1274 trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
1274 init->offset += 6; 1275 init->offset += 6;
1275 1276
1276 while (count--) { 1277 while (count--) {
1277 u32 data = nv_ro32(bios, init->offset); 1278 u32 data = nvbios_rd32(bios, init->offset);
1278 1279
1279 trace("\t\tR[0x%06x] = 0x%08x\n", base, data); 1280 trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
1280 init->offset += 4; 1281 init->offset += 4;
@@ -1292,9 +1293,9 @@ static void
1292init_pll_indirect(struct nvbios_init *init) 1293init_pll_indirect(struct nvbios_init *init)
1293{ 1294{
1294 struct nvkm_bios *bios = init->bios; 1295 struct nvkm_bios *bios = init->bios;
1295 u32 reg = nv_ro32(bios, init->offset + 1); 1296 u32 reg = nvbios_rd32(bios, init->offset + 1);
1296 u16 addr = nv_ro16(bios, init->offset + 5); 1297 u16 addr = nvbios_rd16(bios, init->offset + 5);
1297 u32 freq = (u32)nv_ro16(bios, addr) * 1000; 1298 u32 freq = (u32)nvbios_rd16(bios, addr) * 1000;
1298 1299
1299 trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n", 1300 trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
1300 reg, addr, freq); 1301 reg, addr, freq);
@@ -1311,9 +1312,9 @@ static void
1311init_zm_reg_indirect(struct nvbios_init *init) 1312init_zm_reg_indirect(struct nvbios_init *init)
1312{ 1313{
1313 struct nvkm_bios *bios = init->bios; 1314 struct nvkm_bios *bios = init->bios;
1314 u32 reg = nv_ro32(bios, init->offset + 1); 1315 u32 reg = nvbios_rd32(bios, init->offset + 1);
1315 u16 addr = nv_ro16(bios, init->offset + 5); 1316 u16 addr = nvbios_rd16(bios, init->offset + 5);
1316 u32 data = nv_ro32(bios, addr); 1317 u32 data = nvbios_rd32(bios, addr);
1317 1318
1318 trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n", 1319 trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
1319 reg, addr, data); 1320 reg, addr, data);
@@ -1330,7 +1331,7 @@ static void
1330init_sub_direct(struct nvbios_init *init) 1331init_sub_direct(struct nvbios_init *init)
1331{ 1332{
1332 struct nvkm_bios *bios = init->bios; 1333 struct nvkm_bios *bios = init->bios;
1333 u16 addr = nv_ro16(bios, init->offset + 1); 1334 u16 addr = nvbios_rd16(bios, init->offset + 1);
1334 u16 save; 1335 u16 save;
1335 1336
1336 trace("SUB_DIRECT\t0x%04x\n", addr); 1337 trace("SUB_DIRECT\t0x%04x\n", addr);
@@ -1356,7 +1357,7 @@ static void
1356init_jump(struct nvbios_init *init) 1357init_jump(struct nvbios_init *init)
1357{ 1358{
1358 struct nvkm_bios *bios = init->bios; 1359 struct nvkm_bios *bios = init->bios;
1359 u16 offset = nv_ro16(bios, init->offset + 1); 1360 u16 offset = nvbios_rd16(bios, init->offset + 1);
1360 1361
1361 trace("JUMP\t0x%04x\n", offset); 1362 trace("JUMP\t0x%04x\n", offset);
1362 1363
@@ -1374,11 +1375,11 @@ static void
1374init_i2c_if(struct nvbios_init *init) 1375init_i2c_if(struct nvbios_init *init)
1375{ 1376{
1376 struct nvkm_bios *bios = init->bios; 1377 struct nvkm_bios *bios = init->bios;
1377 u8 index = nv_ro08(bios, init->offset + 1); 1378 u8 index = nvbios_rd08(bios, init->offset + 1);
1378 u8 addr = nv_ro08(bios, init->offset + 2); 1379 u8 addr = nvbios_rd08(bios, init->offset + 2);
1379 u8 reg = nv_ro08(bios, init->offset + 3); 1380 u8 reg = nvbios_rd08(bios, init->offset + 3);
1380 u8 mask = nv_ro08(bios, init->offset + 4); 1381 u8 mask = nvbios_rd08(bios, init->offset + 4);
1381 u8 data = nv_ro08(bios, init->offset + 5); 1382 u8 data = nvbios_rd08(bios, init->offset + 5);
1382 u8 value; 1383 u8 value;
1383 1384
1384 trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n", 1385 trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
@@ -1401,12 +1402,12 @@ static void
1401init_copy_nv_reg(struct nvbios_init *init) 1402init_copy_nv_reg(struct nvbios_init *init)
1402{ 1403{
1403 struct nvkm_bios *bios = init->bios; 1404 struct nvkm_bios *bios = init->bios;
1404 u32 sreg = nv_ro32(bios, init->offset + 1); 1405 u32 sreg = nvbios_rd32(bios, init->offset + 1);
1405 u8 shift = nv_ro08(bios, init->offset + 5); 1406 u8 shift = nvbios_rd08(bios, init->offset + 5);
1406 u32 smask = nv_ro32(bios, init->offset + 6); 1407 u32 smask = nvbios_rd32(bios, init->offset + 6);
1407 u32 sxor = nv_ro32(bios, init->offset + 10); 1408 u32 sxor = nvbios_rd32(bios, init->offset + 10);
1408 u32 dreg = nv_ro32(bios, init->offset + 14); 1409 u32 dreg = nvbios_rd32(bios, init->offset + 14);
1409 u32 dmask = nv_ro32(bios, init->offset + 18); 1410 u32 dmask = nvbios_rd32(bios, init->offset + 18);
1410 u32 data; 1411 u32 data;
1411 1412
1412 trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= " 1413 trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
@@ -1427,9 +1428,9 @@ static void
1427init_zm_index_io(struct nvbios_init *init) 1428init_zm_index_io(struct nvbios_init *init)
1428{ 1429{
1429 struct nvkm_bios *bios = init->bios; 1430 struct nvkm_bios *bios = init->bios;
1430 u16 port = nv_ro16(bios, init->offset + 1); 1431 u16 port = nvbios_rd16(bios, init->offset + 1);
1431 u8 index = nv_ro08(bios, init->offset + 3); 1432 u8 index = nvbios_rd08(bios, init->offset + 3);
1432 u8 data = nv_ro08(bios, init->offset + 4); 1433 u8 data = nvbios_rd08(bios, init->offset + 4);
1433 1434
1434 trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data); 1435 trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
1435 init->offset += 5; 1436 init->offset += 5;
@@ -1444,14 +1445,14 @@ init_zm_index_io(struct nvbios_init *init)
1444static void 1445static void
1445init_compute_mem(struct nvbios_init *init) 1446init_compute_mem(struct nvbios_init *init)
1446{ 1447{
1447 struct nvkm_devinit *devinit = nvkm_devinit(init->bios); 1448 struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
1448 1449
1449 trace("COMPUTE_MEM\n"); 1450 trace("COMPUTE_MEM\n");
1450 init->offset += 1; 1451 init->offset += 1;
1451 1452
1452 init_exec_force(init, true); 1453 init_exec_force(init, true);
1453 if (init_exec(init) && devinit->meminit) 1454 if (init_exec(init))
1454 devinit->meminit(devinit); 1455 nvkm_devinit_meminit(devinit);
1455 init_exec_force(init, false); 1456 init_exec_force(init, false);
1456} 1457}
1457 1458
@@ -1463,9 +1464,9 @@ static void
1463init_reset(struct nvbios_init *init) 1464init_reset(struct nvbios_init *init)
1464{ 1465{
1465 struct nvkm_bios *bios = init->bios; 1466 struct nvkm_bios *bios = init->bios;
1466 u32 reg = nv_ro32(bios, init->offset + 1); 1467 u32 reg = nvbios_rd32(bios, init->offset + 1);
1467 u32 data1 = nv_ro32(bios, init->offset + 5); 1468 u32 data1 = nvbios_rd32(bios, init->offset + 5);
1468 u32 data2 = nv_ro32(bios, init->offset + 9); 1469 u32 data2 = nvbios_rd32(bios, init->offset + 9);
1469 u32 savepci19; 1470 u32 savepci19;
1470 1471
1471 trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2); 1472 trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
@@ -1513,14 +1514,14 @@ init_configure_mem(struct nvbios_init *init)
1513 1514
1514 mdata = init_configure_mem_clk(init); 1515 mdata = init_configure_mem_clk(init);
1515 sdata = bmp_sdr_seq_table(bios); 1516 sdata = bmp_sdr_seq_table(bios);
1516 if (nv_ro08(bios, mdata) & 0x01) 1517 if (nvbios_rd08(bios, mdata) & 0x01)
1517 sdata = bmp_ddr_seq_table(bios); 1518 sdata = bmp_ddr_seq_table(bios);
1518 mdata += 6; /* skip to data */ 1519 mdata += 6; /* skip to data */
1519 1520
1520 data = init_rdvgai(init, 0x03c4, 0x01); 1521 data = init_rdvgai(init, 0x03c4, 0x01);
1521 init_wrvgai(init, 0x03c4, 0x01, data | 0x20); 1522 init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
1522 1523
1523 for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) { 1524 for (; (addr = nvbios_rd32(bios, sdata)) != 0xffffffff; sdata += 4) {
1524 switch (addr) { 1525 switch (addr) {
1525 case 0x10021c: /* CKE_NORMAL */ 1526 case 0x10021c: /* CKE_NORMAL */
1526 case 0x1002d0: /* CMD_REFRESH */ 1527 case 0x1002d0: /* CMD_REFRESH */
@@ -1528,7 +1529,7 @@ init_configure_mem(struct nvbios_init *init)
1528 data = 0x00000001; 1529 data = 0x00000001;
1529 break; 1530 break;
1530 default: 1531 default:
1531 data = nv_ro32(bios, mdata); 1532 data = nvbios_rd32(bios, mdata);
1532 mdata += 4; 1533 mdata += 4;
1533 if (data == 0xffffffff) 1534 if (data == 0xffffffff)
1534 continue; 1535 continue;
@@ -1563,12 +1564,12 @@ init_configure_clk(struct nvbios_init *init)
1563 mdata = init_configure_mem_clk(init); 1564 mdata = init_configure_mem_clk(init);
1564 1565
1565 /* NVPLL */ 1566 /* NVPLL */
1566 clock = nv_ro16(bios, mdata + 4) * 10; 1567 clock = nvbios_rd16(bios, mdata + 4) * 10;
1567 init_prog_pll(init, 0x680500, clock); 1568 init_prog_pll(init, 0x680500, clock);
1568 1569
1569 /* MPLL */ 1570 /* MPLL */
1570 clock = nv_ro16(bios, mdata + 2) * 10; 1571 clock = nvbios_rd16(bios, mdata + 2) * 10;
1571 if (nv_ro08(bios, mdata) & 0x01) 1572 if (nvbios_rd08(bios, mdata) & 0x01)
1572 clock *= 2; 1573 clock *= 2;
1573 init_prog_pll(init, 0x680504, clock); 1574 init_prog_pll(init, 0x680504, clock);
1574 1575
@@ -1609,9 +1610,9 @@ static void
1609init_io(struct nvbios_init *init) 1610init_io(struct nvbios_init *init)
1610{ 1611{
1611 struct nvkm_bios *bios = init->bios; 1612 struct nvkm_bios *bios = init->bios;
1612 u16 port = nv_ro16(bios, init->offset + 1); 1613 u16 port = nvbios_rd16(bios, init->offset + 1);
1613 u8 mask = nv_ro16(bios, init->offset + 3); 1614 u8 mask = nvbios_rd16(bios, init->offset + 3);
1614 u8 data = nv_ro16(bios, init->offset + 4); 1615 u8 data = nvbios_rd16(bios, init->offset + 4);
1615 u8 value; 1616 u8 value;
1616 1617
1617 trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data); 1618 trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
@@ -1621,7 +1622,7 @@ init_io(struct nvbios_init *init)
1621 * needed some day.. it's almost certainly wrong, but, it also 1622 * needed some day.. it's almost certainly wrong, but, it also
1622 * somehow makes things work... 1623 * somehow makes things work...
1623 */ 1624 */
1624 if (nv_device(init->bios)->card_type >= NV_50 && 1625 if (bios->subdev.device->card_type >= NV_50 &&
1625 port == 0x03c3 && data == 0x01) { 1626 port == 0x03c3 && data == 0x01) {
1626 init_mask(init, 0x614100, 0xf0800000, 0x00800000); 1627 init_mask(init, 0x614100, 0xf0800000, 0x00800000);
1627 init_mask(init, 0x00e18c, 0x00020000, 0x00020000); 1628 init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
@@ -1649,7 +1650,7 @@ static void
1649init_sub(struct nvbios_init *init) 1650init_sub(struct nvbios_init *init)
1650{ 1651{
1651 struct nvkm_bios *bios = init->bios; 1652 struct nvkm_bios *bios = init->bios;
1652 u8 index = nv_ro08(bios, init->offset + 1); 1653 u8 index = nvbios_rd08(bios, init->offset + 1);
1653 u16 addr, save; 1654 u16 addr, save;
1654 1655
1655 trace("SUB\t0x%02x\n", index); 1656 trace("SUB\t0x%02x\n", index);
@@ -1676,8 +1677,8 @@ static void
1676init_ram_condition(struct nvbios_init *init) 1677init_ram_condition(struct nvbios_init *init)
1677{ 1678{
1678 struct nvkm_bios *bios = init->bios; 1679 struct nvkm_bios *bios = init->bios;
1679 u8 mask = nv_ro08(bios, init->offset + 1); 1680 u8 mask = nvbios_rd08(bios, init->offset + 1);
1680 u8 value = nv_ro08(bios, init->offset + 2); 1681 u8 value = nvbios_rd08(bios, init->offset + 2);
1681 1682
1682 trace("RAM_CONDITION\t" 1683 trace("RAM_CONDITION\t"
1683 "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value); 1684 "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
@@ -1695,9 +1696,9 @@ static void
1695init_nv_reg(struct nvbios_init *init) 1696init_nv_reg(struct nvbios_init *init)
1696{ 1697{
1697 struct nvkm_bios *bios = init->bios; 1698 struct nvkm_bios *bios = init->bios;
1698 u32 reg = nv_ro32(bios, init->offset + 1); 1699 u32 reg = nvbios_rd32(bios, init->offset + 1);
1699 u32 mask = nv_ro32(bios, init->offset + 5); 1700 u32 mask = nvbios_rd32(bios, init->offset + 5);
1700 u32 data = nv_ro32(bios, init->offset + 9); 1701 u32 data = nvbios_rd32(bios, init->offset + 9);
1701 1702
1702 trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data); 1703 trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
1703 init->offset += 13; 1704 init->offset += 13;
@@ -1713,15 +1714,15 @@ static void
1713init_macro(struct nvbios_init *init) 1714init_macro(struct nvbios_init *init)
1714{ 1715{
1715 struct nvkm_bios *bios = init->bios; 1716 struct nvkm_bios *bios = init->bios;
1716 u8 macro = nv_ro08(bios, init->offset + 1); 1717 u8 macro = nvbios_rd08(bios, init->offset + 1);
1717 u16 table; 1718 u16 table;
1718 1719
1719 trace("MACRO\t0x%02x\n", macro); 1720 trace("MACRO\t0x%02x\n", macro);
1720 1721
1721 table = init_macro_table(init); 1722 table = init_macro_table(init);
1722 if (table) { 1723 if (table) {
1723 u32 addr = nv_ro32(bios, table + (macro * 8) + 0); 1724 u32 addr = nvbios_rd32(bios, table + (macro * 8) + 0);
1724 u32 data = nv_ro32(bios, table + (macro * 8) + 4); 1725 u32 data = nvbios_rd32(bios, table + (macro * 8) + 4);
1725 trace("\t\tR[0x%06x] = 0x%08x\n", addr, data); 1726 trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
1726 init_wr32(init, addr, data); 1727 init_wr32(init, addr, data);
1727 } 1728 }
@@ -1742,6 +1743,24 @@ init_resume(struct nvbios_init *init)
1742} 1743}
1743 1744
1744/** 1745/**
1746 * INIT_STRAP_CONDITION - opcode 0x73
1747 *
1748 */
1749static void
1750init_strap_condition(struct nvbios_init *init)
1751{
1752 struct nvkm_bios *bios = init->bios;
1753 u32 mask = nvbios_rd32(bios, init->offset + 1);
1754 u32 value = nvbios_rd32(bios, init->offset + 5);
1755
1756 trace("STRAP_CONDITION\t(R[0x101000] & 0x%08x) == 0x%08x\n", mask, value);
1757 init->offset += 9;
1758
1759 if ((init_rd32(init, 0x101000) & mask) != value)
1760 init_exec_set(init, false);
1761}
1762
1763/**
1745 * INIT_TIME - opcode 0x74 1764 * INIT_TIME - opcode 0x74
1746 * 1765 *
1747 */ 1766 */
@@ -1749,7 +1768,7 @@ static void
1749init_time(struct nvbios_init *init) 1768init_time(struct nvbios_init *init)
1750{ 1769{
1751 struct nvkm_bios *bios = init->bios; 1770 struct nvkm_bios *bios = init->bios;
1752 u16 usec = nv_ro16(bios, init->offset + 1); 1771 u16 usec = nvbios_rd16(bios, init->offset + 1);
1753 1772
1754 trace("TIME\t0x%04x\n", usec); 1773 trace("TIME\t0x%04x\n", usec);
1755 init->offset += 3; 1774 init->offset += 3;
@@ -1770,7 +1789,7 @@ static void
1770init_condition(struct nvbios_init *init) 1789init_condition(struct nvbios_init *init)
1771{ 1790{
1772 struct nvkm_bios *bios = init->bios; 1791 struct nvkm_bios *bios = init->bios;
1773 u8 cond = nv_ro08(bios, init->offset + 1); 1792 u8 cond = nvbios_rd08(bios, init->offset + 1);
1774 1793
1775 trace("CONDITION\t0x%02x\n", cond); 1794 trace("CONDITION\t0x%02x\n", cond);
1776 init->offset += 2; 1795 init->offset += 2;
@@ -1787,7 +1806,7 @@ static void
1787init_io_condition(struct nvbios_init *init) 1806init_io_condition(struct nvbios_init *init)
1788{ 1807{
1789 struct nvkm_bios *bios = init->bios; 1808 struct nvkm_bios *bios = init->bios;
1790 u8 cond = nv_ro08(bios, init->offset + 1); 1809 u8 cond = nvbios_rd08(bios, init->offset + 1);
1791 1810
1792 trace("IO_CONDITION\t0x%02x\n", cond); 1811 trace("IO_CONDITION\t0x%02x\n", cond);
1793 init->offset += 2; 1812 init->offset += 2;
@@ -1797,6 +1816,23 @@ init_io_condition(struct nvbios_init *init)
1797} 1816}
1798 1817
1799/** 1818/**
1819 * INIT_ZM_REG16 - opcode 0x77
1820 *
1821 */
1822static void
1823init_zm_reg16(struct nvbios_init *init)
1824{
1825 struct nvkm_bios *bios = init->bios;
1826 u32 addr = nvbios_rd32(bios, init->offset + 1);
1827 u16 data = nvbios_rd16(bios, init->offset + 5);
1828
1829 trace("ZM_REG\tR[0x%06x] = 0x%04x\n", addr, data);
1830 init->offset += 7;
1831
1832 init_wr32(init, addr, data);
1833}
1834
1835/**
1800 * INIT_INDEX_IO - opcode 0x78 1836 * INIT_INDEX_IO - opcode 0x78
1801 * 1837 *
1802 */ 1838 */
@@ -1804,10 +1840,10 @@ static void
1804init_index_io(struct nvbios_init *init) 1840init_index_io(struct nvbios_init *init)
1805{ 1841{
1806 struct nvkm_bios *bios = init->bios; 1842 struct nvkm_bios *bios = init->bios;
1807 u16 port = nv_ro16(bios, init->offset + 1); 1843 u16 port = nvbios_rd16(bios, init->offset + 1);
1808 u8 index = nv_ro16(bios, init->offset + 3); 1844 u8 index = nvbios_rd16(bios, init->offset + 3);
1809 u8 mask = nv_ro08(bios, init->offset + 4); 1845 u8 mask = nvbios_rd08(bios, init->offset + 4);
1810 u8 data = nv_ro08(bios, init->offset + 5); 1846 u8 data = nvbios_rd08(bios, init->offset + 5);
1811 u8 value; 1847 u8 value;
1812 1848
1813 trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n", 1849 trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1826,8 +1862,8 @@ static void
1826init_pll(struct nvbios_init *init) 1862init_pll(struct nvbios_init *init)
1827{ 1863{
1828 struct nvkm_bios *bios = init->bios; 1864 struct nvkm_bios *bios = init->bios;
1829 u32 reg = nv_ro32(bios, init->offset + 1); 1865 u32 reg = nvbios_rd32(bios, init->offset + 1);
1830 u32 freq = nv_ro16(bios, init->offset + 5) * 10; 1866 u32 freq = nvbios_rd16(bios, init->offset + 5) * 10;
1831 1867
1832 trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq); 1868 trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
1833 init->offset += 7; 1869 init->offset += 7;
@@ -1843,8 +1879,8 @@ static void
1843init_zm_reg(struct nvbios_init *init) 1879init_zm_reg(struct nvbios_init *init)
1844{ 1880{
1845 struct nvkm_bios *bios = init->bios; 1881 struct nvkm_bios *bios = init->bios;
1846 u32 addr = nv_ro32(bios, init->offset + 1); 1882 u32 addr = nvbios_rd32(bios, init->offset + 1);
1847 u32 data = nv_ro32(bios, init->offset + 5); 1883 u32 data = nvbios_rd32(bios, init->offset + 5);
1848 1884
1849 trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data); 1885 trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
1850 init->offset += 9; 1886 init->offset += 9;
@@ -1863,7 +1899,7 @@ static void
1863init_ram_restrict_pll(struct nvbios_init *init) 1899init_ram_restrict_pll(struct nvbios_init *init)
1864{ 1900{
1865 struct nvkm_bios *bios = init->bios; 1901 struct nvkm_bios *bios = init->bios;
1866 u8 type = nv_ro08(bios, init->offset + 1); 1902 u8 type = nvbios_rd08(bios, init->offset + 1);
1867 u8 count = init_ram_restrict_group_count(init); 1903 u8 count = init_ram_restrict_group_count(init);
1868 u8 strap = init_ram_restrict(init); 1904 u8 strap = init_ram_restrict(init);
1869 u8 cconf; 1905 u8 cconf;
@@ -1872,7 +1908,7 @@ init_ram_restrict_pll(struct nvbios_init *init)
1872 init->offset += 2; 1908 init->offset += 2;
1873 1909
1874 for (cconf = 0; cconf < count; cconf++) { 1910 for (cconf = 0; cconf < count; cconf++) {
1875 u32 freq = nv_ro32(bios, init->offset); 1911 u32 freq = nvbios_rd32(bios, init->offset);
1876 1912
1877 if (cconf == strap) { 1913 if (cconf == strap) {
1878 trace("%dkHz *\n", freq); 1914 trace("%dkHz *\n", freq);
@@ -1892,13 +1928,13 @@ init_ram_restrict_pll(struct nvbios_init *init)
1892static void 1928static void
1893init_gpio(struct nvbios_init *init) 1929init_gpio(struct nvbios_init *init)
1894{ 1930{
1895 struct nvkm_gpio *gpio = nvkm_gpio(init->bios); 1931 struct nvkm_gpio *gpio = init->bios->subdev.device->gpio;
1896 1932
1897 trace("GPIO\n"); 1933 trace("GPIO\n");
1898 init->offset += 1; 1934 init->offset += 1;
1899 1935
1900 if (init_exec(init) && gpio && gpio->reset) 1936 if (init_exec(init))
1901 gpio->reset(gpio, DCB_GPIO_UNUSED); 1937 nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
1902} 1938}
1903 1939
1904/** 1940/**
@@ -1909,9 +1945,9 @@ static void
1909init_ram_restrict_zm_reg_group(struct nvbios_init *init) 1945init_ram_restrict_zm_reg_group(struct nvbios_init *init)
1910{ 1946{
1911 struct nvkm_bios *bios = init->bios; 1947 struct nvkm_bios *bios = init->bios;
1912 u32 addr = nv_ro32(bios, init->offset + 1); 1948 u32 addr = nvbios_rd32(bios, init->offset + 1);
1913 u8 incr = nv_ro08(bios, init->offset + 5); 1949 u8 incr = nvbios_rd08(bios, init->offset + 5);
1914 u8 num = nv_ro08(bios, init->offset + 6); 1950 u8 num = nvbios_rd08(bios, init->offset + 6);
1915 u8 count = init_ram_restrict_group_count(init); 1951 u8 count = init_ram_restrict_group_count(init);
1916 u8 index = init_ram_restrict(init); 1952 u8 index = init_ram_restrict(init);
1917 u8 i, j; 1953 u8 i, j;
@@ -1923,7 +1959,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
1923 for (i = 0; i < num; i++) { 1959 for (i = 0; i < num; i++) {
1924 trace("\tR[0x%06x] = {\n", addr); 1960 trace("\tR[0x%06x] = {\n", addr);
1925 for (j = 0; j < count; j++) { 1961 for (j = 0; j < count; j++) {
1926 u32 data = nv_ro32(bios, init->offset); 1962 u32 data = nvbios_rd32(bios, init->offset);
1927 1963
1928 if (j == index) { 1964 if (j == index) {
1929 trace("\t\t0x%08x *\n", data); 1965 trace("\t\t0x%08x *\n", data);
@@ -1947,8 +1983,8 @@ static void
1947init_copy_zm_reg(struct nvbios_init *init) 1983init_copy_zm_reg(struct nvbios_init *init)
1948{ 1984{
1949 struct nvkm_bios *bios = init->bios; 1985 struct nvkm_bios *bios = init->bios;
1950 u32 sreg = nv_ro32(bios, init->offset + 1); 1986 u32 sreg = nvbios_rd32(bios, init->offset + 1);
1951 u32 dreg = nv_ro32(bios, init->offset + 5); 1987 u32 dreg = nvbios_rd32(bios, init->offset + 5);
1952 1988
1953 trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg); 1989 trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
1954 init->offset += 9; 1990 init->offset += 9;
@@ -1964,14 +2000,14 @@ static void
1964init_zm_reg_group(struct nvbios_init *init) 2000init_zm_reg_group(struct nvbios_init *init)
1965{ 2001{
1966 struct nvkm_bios *bios = init->bios; 2002 struct nvkm_bios *bios = init->bios;
1967 u32 addr = nv_ro32(bios, init->offset + 1); 2003 u32 addr = nvbios_rd32(bios, init->offset + 1);
1968 u8 count = nv_ro08(bios, init->offset + 5); 2004 u8 count = nvbios_rd08(bios, init->offset + 5);
1969 2005
1970 trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr); 2006 trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
1971 init->offset += 6; 2007 init->offset += 6;
1972 2008
1973 while (count--) { 2009 while (count--) {
1974 u32 data = nv_ro32(bios, init->offset); 2010 u32 data = nvbios_rd32(bios, init->offset);
1975 trace("\t0x%08x\n", data); 2011 trace("\t0x%08x\n", data);
1976 init_wr32(init, addr, data); 2012 init_wr32(init, addr, data);
1977 init->offset += 4; 2013 init->offset += 4;
@@ -1986,13 +2022,13 @@ static void
1986init_xlat(struct nvbios_init *init) 2022init_xlat(struct nvbios_init *init)
1987{ 2023{
1988 struct nvkm_bios *bios = init->bios; 2024 struct nvkm_bios *bios = init->bios;
1989 u32 saddr = nv_ro32(bios, init->offset + 1); 2025 u32 saddr = nvbios_rd32(bios, init->offset + 1);
1990 u8 sshift = nv_ro08(bios, init->offset + 5); 2026 u8 sshift = nvbios_rd08(bios, init->offset + 5);
1991 u8 smask = nv_ro08(bios, init->offset + 6); 2027 u8 smask = nvbios_rd08(bios, init->offset + 6);
1992 u8 index = nv_ro08(bios, init->offset + 7); 2028 u8 index = nvbios_rd08(bios, init->offset + 7);
1993 u32 daddr = nv_ro32(bios, init->offset + 8); 2029 u32 daddr = nvbios_rd32(bios, init->offset + 8);
1994 u32 dmask = nv_ro32(bios, init->offset + 12); 2030 u32 dmask = nvbios_rd32(bios, init->offset + 12);
1995 u8 shift = nv_ro08(bios, init->offset + 16); 2031 u8 shift = nvbios_rd08(bios, init->offset + 16);
1996 u32 data; 2032 u32 data;
1997 2033
1998 trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= " 2034 trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
@@ -2014,9 +2050,9 @@ static void
2014init_zm_mask_add(struct nvbios_init *init) 2050init_zm_mask_add(struct nvbios_init *init)
2015{ 2051{
2016 struct nvkm_bios *bios = init->bios; 2052 struct nvkm_bios *bios = init->bios;
2017 u32 addr = nv_ro32(bios, init->offset + 1); 2053 u32 addr = nvbios_rd32(bios, init->offset + 1);
2018 u32 mask = nv_ro32(bios, init->offset + 5); 2054 u32 mask = nvbios_rd32(bios, init->offset + 5);
2019 u32 add = nv_ro32(bios, init->offset + 9); 2055 u32 add = nvbios_rd32(bios, init->offset + 9);
2020 u32 data; 2056 u32 data;
2021 2057
2022 trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add); 2058 trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
@@ -2035,15 +2071,15 @@ static void
2035init_auxch(struct nvbios_init *init) 2071init_auxch(struct nvbios_init *init)
2036{ 2072{
2037 struct nvkm_bios *bios = init->bios; 2073 struct nvkm_bios *bios = init->bios;
2038 u32 addr = nv_ro32(bios, init->offset + 1); 2074 u32 addr = nvbios_rd32(bios, init->offset + 1);
2039 u8 count = nv_ro08(bios, init->offset + 5); 2075 u8 count = nvbios_rd08(bios, init->offset + 5);
2040 2076
2041 trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count); 2077 trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
2042 init->offset += 6; 2078 init->offset += 6;
2043 2079
2044 while (count--) { 2080 while (count--) {
2045 u8 mask = nv_ro08(bios, init->offset + 0); 2081 u8 mask = nvbios_rd08(bios, init->offset + 0);
2046 u8 data = nv_ro08(bios, init->offset + 1); 2082 u8 data = nvbios_rd08(bios, init->offset + 1);
2047 trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data); 2083 trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
2048 mask = init_rdauxr(init, addr) & mask; 2084 mask = init_rdauxr(init, addr) & mask;
2049 init_wrauxr(init, addr, mask | data); 2085 init_wrauxr(init, addr, mask | data);
@@ -2059,14 +2095,14 @@ static void
2059init_zm_auxch(struct nvbios_init *init) 2095init_zm_auxch(struct nvbios_init *init)
2060{ 2096{
2061 struct nvkm_bios *bios = init->bios; 2097 struct nvkm_bios *bios = init->bios;
2062 u32 addr = nv_ro32(bios, init->offset + 1); 2098 u32 addr = nvbios_rd32(bios, init->offset + 1);
2063 u8 count = nv_ro08(bios, init->offset + 5); 2099 u8 count = nvbios_rd08(bios, init->offset + 5);
2064 2100
2065 trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count); 2101 trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
2066 init->offset += 6; 2102 init->offset += 6;
2067 2103
2068 while (count--) { 2104 while (count--) {
2069 u8 data = nv_ro08(bios, init->offset + 0); 2105 u8 data = nvbios_rd08(bios, init->offset + 0);
2070 trace("\tAUX[0x%08x] = 0x%02x\n", addr, data); 2106 trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
2071 init_wrauxr(init, addr, data); 2107 init_wrauxr(init, addr, data);
2072 init->offset += 1; 2108 init->offset += 1;
@@ -2081,21 +2117,21 @@ static void
2081init_i2c_long_if(struct nvbios_init *init) 2117init_i2c_long_if(struct nvbios_init *init)
2082{ 2118{
2083 struct nvkm_bios *bios = init->bios; 2119 struct nvkm_bios *bios = init->bios;
2084 u8 index = nv_ro08(bios, init->offset + 1); 2120 u8 index = nvbios_rd08(bios, init->offset + 1);
2085 u8 addr = nv_ro08(bios, init->offset + 2) >> 1; 2121 u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
2086 u8 reglo = nv_ro08(bios, init->offset + 3); 2122 u8 reglo = nvbios_rd08(bios, init->offset + 3);
2087 u8 reghi = nv_ro08(bios, init->offset + 4); 2123 u8 reghi = nvbios_rd08(bios, init->offset + 4);
2088 u8 mask = nv_ro08(bios, init->offset + 5); 2124 u8 mask = nvbios_rd08(bios, init->offset + 5);
2089 u8 data = nv_ro08(bios, init->offset + 6); 2125 u8 data = nvbios_rd08(bios, init->offset + 6);
2090 struct nvkm_i2c_port *port; 2126 struct i2c_adapter *adap;
2091 2127
2092 trace("I2C_LONG_IF\t" 2128 trace("I2C_LONG_IF\t"
2093 "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n", 2129 "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
2094 index, addr, reglo, reghi, mask, data); 2130 index, addr, reglo, reghi, mask, data);
2095 init->offset += 7; 2131 init->offset += 7;
2096 2132
2097 port = init_i2c(init, index); 2133 adap = init_i2c(init, index);
2098 if (port) { 2134 if (adap) {
2099 u8 i[2] = { reghi, reglo }; 2135 u8 i[2] = { reghi, reglo };
2100 u8 o[1] = {}; 2136 u8 o[1] = {};
2101 struct i2c_msg msg[] = { 2137 struct i2c_msg msg[] = {
@@ -2104,7 +2140,7 @@ init_i2c_long_if(struct nvbios_init *init)
2104 }; 2140 };
2105 int ret; 2141 int ret;
2106 2142
2107 ret = i2c_transfer(&port->adapter, msg, 2); 2143 ret = i2c_transfer(adap, msg, 2);
2108 if (ret == 2 && ((o[0] & mask) == data)) 2144 if (ret == 2 && ((o[0] & mask) == data))
2109 return; 2145 return;
2110 } 2146 }
@@ -2120,9 +2156,9 @@ static void
2120init_gpio_ne(struct nvbios_init *init) 2156init_gpio_ne(struct nvbios_init *init)
2121{ 2157{
2122 struct nvkm_bios *bios = init->bios; 2158 struct nvkm_bios *bios = init->bios;
2123 struct nvkm_gpio *gpio = nvkm_gpio(bios); 2159 struct nvkm_gpio *gpio = bios->subdev.device->gpio;
2124 struct dcb_gpio_func func; 2160 struct dcb_gpio_func func;
2125 u8 count = nv_ro08(bios, init->offset + 1); 2161 u8 count = nvbios_rd08(bios, init->offset + 1);
2126 u8 idx = 0, ver, len; 2162 u8 idx = 0, ver, len;
2127 u16 data, i; 2163 u16 data, i;
2128 2164
@@ -2130,21 +2166,21 @@ init_gpio_ne(struct nvbios_init *init)
2130 init->offset += 2; 2166 init->offset += 2;
2131 2167
2132 for (i = init->offset; i < init->offset + count; i++) 2168 for (i = init->offset; i < init->offset + count; i++)
2133 cont("0x%02x ", nv_ro08(bios, i)); 2169 cont("0x%02x ", nvbios_rd08(bios, i));
2134 cont("\n"); 2170 cont("\n");
2135 2171
2136 while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) { 2172 while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
2137 if (func.func != DCB_GPIO_UNUSED) { 2173 if (func.func != DCB_GPIO_UNUSED) {
2138 for (i = init->offset; i < init->offset + count; i++) { 2174 for (i = init->offset; i < init->offset + count; i++) {
2139 if (func.func == nv_ro08(bios, i)) 2175 if (func.func == nvbios_rd08(bios, i))
2140 break; 2176 break;
2141 } 2177 }
2142 2178
2143 trace("\tFUNC[0x%02x]", func.func); 2179 trace("\tFUNC[0x%02x]", func.func);
2144 if (i == (init->offset + count)) { 2180 if (i == (init->offset + count)) {
2145 cont(" *"); 2181 cont(" *");
2146 if (init_exec(init) && gpio && gpio->reset) 2182 if (init_exec(init))
2147 gpio->reset(gpio, func.func); 2183 nvkm_gpio_reset(gpio, func.func);
2148 } 2184 }
2149 cont("\n"); 2185 cont("\n");
2150 } 2186 }
@@ -2202,9 +2238,11 @@ static struct nvbios_init_opcode {
2202 [0x6f] = { init_macro }, 2238 [0x6f] = { init_macro },
2203 [0x71] = { init_done }, 2239 [0x71] = { init_done },
2204 [0x72] = { init_resume }, 2240 [0x72] = { init_resume },
2241 [0x73] = { init_strap_condition },
2205 [0x74] = { init_time }, 2242 [0x74] = { init_time },
2206 [0x75] = { init_condition }, 2243 [0x75] = { init_condition },
2207 [0x76] = { init_io_condition }, 2244 [0x76] = { init_io_condition },
2245 [0x77] = { init_zm_reg16 },
2208 [0x78] = { init_index_io }, 2246 [0x78] = { init_index_io },
2209 [0x79] = { init_pll }, 2247 [0x79] = { init_pll },
2210 [0x7a] = { init_zm_reg }, 2248 [0x7a] = { init_zm_reg },
@@ -2232,7 +2270,7 @@ nvbios_exec(struct nvbios_init *init)
2232{ 2270{
2233 init->nested++; 2271 init->nested++;
2234 while (init->offset) { 2272 while (init->offset) {
2235 u8 opcode = nv_ro08(init->bios, init->offset); 2273 u8 opcode = nvbios_rd08(init->bios, init->offset);
2236 if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) { 2274 if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
2237 error("unknown opcode 0x%02x\n", opcode); 2275 error("unknown opcode 0x%02x\n", opcode);
2238 return -EINVAL; 2276 return -EINVAL;
@@ -2247,13 +2285,13 @@ nvbios_exec(struct nvbios_init *init)
2247int 2285int
2248nvbios_init(struct nvkm_subdev *subdev, bool execute) 2286nvbios_init(struct nvkm_subdev *subdev, bool execute)
2249{ 2287{
2250 struct nvkm_bios *bios = nvkm_bios(subdev); 2288 struct nvkm_bios *bios = subdev->device->bios;
2251 int ret = 0; 2289 int ret = 0;
2252 int i = -1; 2290 int i = -1;
2253 u16 data; 2291 u16 data;
2254 2292
2255 if (execute) 2293 if (execute)
2256 nv_info(bios, "running init tables\n"); 2294 nvkm_debug(subdev, "running init tables\n");
2257 while (!ret && (data = (init_script(bios, ++i)))) { 2295 while (!ret && (data = (init_script(bios, ++i)))) {
2258 struct nvbios_init init = { 2296 struct nvbios_init init = {
2259 .subdev = subdev, 2297 .subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index c4087df4f85e..3ddf0939ded3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -28,17 +28,18 @@
28u16 28u16
29mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr) 29mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr)
30{ 30{
31 struct nvkm_subdev *subdev = &bios->subdev;
31 struct bit_entry x; 32 struct bit_entry x;
32 33
33 if (bit_entry(bios, 'x', &x)) { 34 if (bit_entry(bios, 'x', &x)) {
34 nv_debug(bios, "BIT 'x' table not present\n"); 35 nvkm_debug(subdev, "BIT 'x' table not present\n");
35 return 0x0000; 36 return 0x0000;
36 } 37 }
37 38
38 *ver = x.version; 39 *ver = x.version;
39 *hdr = x.length; 40 *hdr = x.length;
40 if (*ver != 1 || *hdr < 3) { 41 if (*ver != 1 || *hdr < 3) {
41 nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr); 42 nvkm_warn(subdev, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
42 return 0x0000; 43 return 0x0000;
43 } 44 }
44 45
@@ -73,23 +74,24 @@ static u8 g98_sor_map[16] = {
73u8 74u8
74mxm_sor_map(struct nvkm_bios *bios, u8 conn) 75mxm_sor_map(struct nvkm_bios *bios, u8 conn)
75{ 76{
77 struct nvkm_subdev *subdev = &bios->subdev;
76 u8 ver, hdr; 78 u8 ver, hdr;
77 u16 mxm = mxm_table(bios, &ver, &hdr); 79 u16 mxm = mxm_table(bios, &ver, &hdr);
78 if (mxm && hdr >= 6) { 80 if (mxm && hdr >= 6) {
79 u16 map = nv_ro16(bios, mxm + 4); 81 u16 map = nvbios_rd16(bios, mxm + 4);
80 if (map) { 82 if (map) {
81 ver = nv_ro08(bios, map); 83 ver = nvbios_rd08(bios, map);
82 if (ver == 0x10) { 84 if (ver == 0x10) {
83 if (conn < nv_ro08(bios, map + 3)) { 85 if (conn < nvbios_rd08(bios, map + 3)) {
84 map += nv_ro08(bios, map + 1); 86 map += nvbios_rd08(bios, map + 1);
85 map += conn; 87 map += conn;
86 return nv_ro08(bios, map); 88 return nvbios_rd08(bios, map);
87 } 89 }
88 90
89 return 0x00; 91 return 0x00;
90 } 92 }
91 93
92 nv_warn(bios, "unknown sor map v%02x\n", ver); 94 nvkm_warn(subdev, "unknown sor map v%02x\n", ver);
93 } 95 }
94 } 96 }
95 97
@@ -102,30 +104,31 @@ mxm_sor_map(struct nvkm_bios *bios, u8 conn)
102 if (bios->version.chip == 0x98) 104 if (bios->version.chip == 0x98)
103 return g98_sor_map[conn]; 105 return g98_sor_map[conn];
104 106
105 nv_warn(bios, "missing sor map\n"); 107 nvkm_warn(subdev, "missing sor map\n");
106 return 0x00; 108 return 0x00;
107} 109}
108 110
109u8 111u8
110mxm_ddc_map(struct nvkm_bios *bios, u8 port) 112mxm_ddc_map(struct nvkm_bios *bios, u8 port)
111{ 113{
114 struct nvkm_subdev *subdev = &bios->subdev;
112 u8 ver, hdr; 115 u8 ver, hdr;
113 u16 mxm = mxm_table(bios, &ver, &hdr); 116 u16 mxm = mxm_table(bios, &ver, &hdr);
114 if (mxm && hdr >= 8) { 117 if (mxm && hdr >= 8) {
115 u16 map = nv_ro16(bios, mxm + 6); 118 u16 map = nvbios_rd16(bios, mxm + 6);
116 if (map) { 119 if (map) {
117 ver = nv_ro08(bios, map); 120 ver = nvbios_rd08(bios, map);
118 if (ver == 0x10) { 121 if (ver == 0x10) {
119 if (port < nv_ro08(bios, map + 3)) { 122 if (port < nvbios_rd08(bios, map + 3)) {
120 map += nv_ro08(bios, map + 1); 123 map += nvbios_rd08(bios, map + 1);
121 map += port; 124 map += port;
122 return nv_ro08(bios, map); 125 return nvbios_rd08(bios, map);
123 } 126 }
124 127
125 return 0x00; 128 return 0x00;
126 } 129 }
127 130
128 nv_warn(bios, "unknown ddc map v%02x\n", ver); 131 nvkm_warn(subdev, "unknown ddc map v%02x\n", ver);
129 } 132 }
130 } 133 }
131 134
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
index fd7dd718b2bf..955df29635c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
@@ -32,12 +32,13 @@ nvbios_npdeTe(struct nvkm_bios *bios, u32 base)
32 u8 ver; u16 hdr; 32 u8 ver; u16 hdr;
33 u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir); 33 u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
34 if (data = (data + hdr + 0x0f) & ~0x0f, data) { 34 if (data = (data + hdr + 0x0f) & ~0x0f, data) {
35 switch (nv_ro32(bios, data + 0x00)) { 35 switch (nvbios_rd32(bios, data + 0x00)) {
36 case 0x4544504e: /* NPDE */ 36 case 0x4544504e: /* NPDE */
37 break; 37 break;
38 default: 38 default:
39 nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n", 39 nvkm_debug(&bios->subdev,
40 data, nv_ro32(bios, data + 0x00)); 40 "%08x: NPDE signature (%08x) unknown\n",
41 data, nvbios_rd32(bios, data + 0x00));
41 data = 0; 42 data = 0;
42 break; 43 break;
43 } 44 }
@@ -51,8 +52,8 @@ nvbios_npdeTp(struct nvkm_bios *bios, u32 base, struct nvbios_npdeT *info)
51 u32 data = nvbios_npdeTe(bios, base); 52 u32 data = nvbios_npdeTe(bios, base);
52 memset(info, 0x00, sizeof(*info)); 53 memset(info, 0x00, sizeof(*info));
53 if (data) { 54 if (data) {
54 info->image_size = nv_ro16(bios, data + 0x08) * 512; 55 info->image_size = nvbios_rd16(bios, data + 0x08) * 512;
55 info->last = nv_ro08(bios, data + 0x0a) & 0x80; 56 info->last = nvbios_rd08(bios, data + 0x0a) & 0x80;
56 } 57 }
57 return data; 58 return data;
58} 59}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
index df5978753ae8..67cb3aeb2da7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
@@ -27,19 +27,20 @@
27u32 27u32
28nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr) 28nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr)
29{ 29{
30 u32 data = nv_ro16(bios, base + 0x18); 30 u32 data = nvbios_rd16(bios, base + 0x18);
31 if (data) { 31 if (data) {
32 data += base; 32 data += base;
33 switch (nv_ro32(bios, data + 0x00)) { 33 switch (nvbios_rd32(bios, data + 0x00)) {
34 case 0x52494350: /* PCIR */ 34 case 0x52494350: /* PCIR */
35 case 0x53494752: /* RGIS */ 35 case 0x53494752: /* RGIS */
36 case 0x5344504e: /* NPDS */ 36 case 0x5344504e: /* NPDS */
37 *hdr = nv_ro16(bios, data + 0x0a); 37 *hdr = nvbios_rd16(bios, data + 0x0a);
38 *ver = nv_ro08(bios, data + 0x0c); 38 *ver = nvbios_rd08(bios, data + 0x0c);
39 break; 39 break;
40 default: 40 default:
41 nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n", 41 nvkm_debug(&bios->subdev,
42 data, nv_ro32(bios, data + 0x00)); 42 "%08x: PCIR signature (%08x) unknown\n",
43 data, nvbios_rd32(bios, data + 0x00));
43 data = 0; 44 data = 0;
44 break; 45 break;
45 } 46 }
@@ -54,15 +55,15 @@ nvbios_pcirTp(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr,
54 u32 data = nvbios_pcirTe(bios, base, ver, hdr); 55 u32 data = nvbios_pcirTe(bios, base, ver, hdr);
55 memset(info, 0x00, sizeof(*info)); 56 memset(info, 0x00, sizeof(*info));
56 if (data) { 57 if (data) {
57 info->vendor_id = nv_ro16(bios, data + 0x04); 58 info->vendor_id = nvbios_rd16(bios, data + 0x04);
58 info->device_id = nv_ro16(bios, data + 0x06); 59 info->device_id = nvbios_rd16(bios, data + 0x06);
59 info->class_code[0] = nv_ro08(bios, data + 0x0d); 60 info->class_code[0] = nvbios_rd08(bios, data + 0x0d);
60 info->class_code[1] = nv_ro08(bios, data + 0x0e); 61 info->class_code[1] = nvbios_rd08(bios, data + 0x0e);
61 info->class_code[2] = nv_ro08(bios, data + 0x0f); 62 info->class_code[2] = nvbios_rd08(bios, data + 0x0f);
62 info->image_size = nv_ro16(bios, data + 0x10) * 512; 63 info->image_size = nvbios_rd16(bios, data + 0x10) * 512;
63 info->image_rev = nv_ro16(bios, data + 0x12); 64 info->image_rev = nvbios_rd16(bios, data + 0x12);
64 info->image_type = nv_ro08(bios, data + 0x14); 65 info->image_type = nvbios_rd08(bios, data + 0x14);
65 info->last = nv_ro08(bios, data + 0x15) & 0x80; 66 info->last = nvbios_rd08(bios, data + 0x15) & 0x80;
66 } 67 }
67 return data; 68 return data;
68} 69}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 382ae9cdbf58..aa7e33b42b30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -25,8 +25,6 @@
25#include <subdev/bios/bit.h> 25#include <subdev/bios/bit.h>
26#include <subdev/bios/perf.h> 26#include <subdev/bios/perf.h>
27 27
28#include <core/device.h>
29
30u16 28u16
31nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, 29nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
32 u8 *cnt, u8 *len, u8 *snr, u8 *ssz) 30 u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
@@ -36,22 +34,22 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
36 34
37 if (!bit_entry(bios, 'P', &bit_P)) { 35 if (!bit_entry(bios, 'P', &bit_P)) {
38 if (bit_P.version <= 2) { 36 if (bit_P.version <= 2) {
39 perf = nv_ro16(bios, bit_P.offset + 0); 37 perf = nvbios_rd16(bios, bit_P.offset + 0);
40 if (perf) { 38 if (perf) {
41 *ver = nv_ro08(bios, perf + 0); 39 *ver = nvbios_rd08(bios, perf + 0);
42 *hdr = nv_ro08(bios, perf + 1); 40 *hdr = nvbios_rd08(bios, perf + 1);
43 if (*ver >= 0x40 && *ver < 0x41) { 41 if (*ver >= 0x40 && *ver < 0x41) {
44 *cnt = nv_ro08(bios, perf + 5); 42 *cnt = nvbios_rd08(bios, perf + 5);
45 *len = nv_ro08(bios, perf + 2); 43 *len = nvbios_rd08(bios, perf + 2);
46 *snr = nv_ro08(bios, perf + 4); 44 *snr = nvbios_rd08(bios, perf + 4);
47 *ssz = nv_ro08(bios, perf + 3); 45 *ssz = nvbios_rd08(bios, perf + 3);
48 return perf; 46 return perf;
49 } else 47 } else
50 if (*ver >= 0x20 && *ver < 0x40) { 48 if (*ver >= 0x20 && *ver < 0x40) {
51 *cnt = nv_ro08(bios, perf + 2); 49 *cnt = nvbios_rd08(bios, perf + 2);
52 *len = nv_ro08(bios, perf + 3); 50 *len = nvbios_rd08(bios, perf + 3);
53 *snr = nv_ro08(bios, perf + 4); 51 *snr = nvbios_rd08(bios, perf + 4);
54 *ssz = nv_ro08(bios, perf + 5); 52 *ssz = nvbios_rd08(bios, perf + 5);
55 return perf; 53 return perf;
56 } 54 }
57 } 55 }
@@ -59,13 +57,13 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
59 } 57 }
60 58
61 if (bios->bmp_offset) { 59 if (bios->bmp_offset) {
62 if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) { 60 if (nvbios_rd08(bios, bios->bmp_offset + 6) >= 0x25) {
63 perf = nv_ro16(bios, bios->bmp_offset + 0x94); 61 perf = nvbios_rd16(bios, bios->bmp_offset + 0x94);
64 if (perf) { 62 if (perf) {
65 *hdr = nv_ro08(bios, perf + 0); 63 *hdr = nvbios_rd08(bios, perf + 0);
66 *ver = nv_ro08(bios, perf + 1); 64 *ver = nvbios_rd08(bios, perf + 1);
67 *cnt = nv_ro08(bios, perf + 2); 65 *cnt = nvbios_rd08(bios, perf + 2);
68 *len = nv_ro08(bios, perf + 3); 66 *len = nvbios_rd08(bios, perf + 3);
69 *snr = 0; 67 *snr = 0;
70 *ssz = 0; 68 *ssz = 0;
71 return perf; 69 return perf;
@@ -98,55 +96,55 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
98{ 96{
99 u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len); 97 u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
100 memset(info, 0x00, sizeof(*info)); 98 memset(info, 0x00, sizeof(*info));
101 info->pstate = nv_ro08(bios, perf + 0x00); 99 info->pstate = nvbios_rd08(bios, perf + 0x00);
102 switch (!!perf * *ver) { 100 switch (!!perf * *ver) {
103 case 0x12: 101 case 0x12:
104 case 0x13: 102 case 0x13:
105 case 0x14: 103 case 0x14:
106 info->core = nv_ro32(bios, perf + 0x01) * 10; 104 info->core = nvbios_rd32(bios, perf + 0x01) * 10;
107 info->memory = nv_ro32(bios, perf + 0x05) * 20; 105 info->memory = nvbios_rd32(bios, perf + 0x05) * 20;
108 info->fanspeed = nv_ro08(bios, perf + 0x37); 106 info->fanspeed = nvbios_rd08(bios, perf + 0x37);
109 if (*hdr > 0x38) 107 if (*hdr > 0x38)
110 info->voltage = nv_ro08(bios, perf + 0x38); 108 info->voltage = nvbios_rd08(bios, perf + 0x38);
111 break; 109 break;
112 case 0x21: 110 case 0x21:
113 case 0x23: 111 case 0x23:
114 case 0x24: 112 case 0x24:
115 info->fanspeed = nv_ro08(bios, perf + 0x04); 113 info->fanspeed = nvbios_rd08(bios, perf + 0x04);
116 info->voltage = nv_ro08(bios, perf + 0x05); 114 info->voltage = nvbios_rd08(bios, perf + 0x05);
117 info->shader = nv_ro16(bios, perf + 0x06) * 1000; 115 info->shader = nvbios_rd16(bios, perf + 0x06) * 1000;
118 info->core = info->shader + (signed char) 116 info->core = info->shader + (signed char)
119 nv_ro08(bios, perf + 0x08) * 1000; 117 nvbios_rd08(bios, perf + 0x08) * 1000;
120 switch (nv_device(bios)->chipset) { 118 switch (bios->subdev.device->chipset) {
121 case 0x49: 119 case 0x49:
122 case 0x4b: 120 case 0x4b:
123 info->memory = nv_ro16(bios, perf + 0x0b) * 1000; 121 info->memory = nvbios_rd16(bios, perf + 0x0b) * 1000;
124 break; 122 break;
125 default: 123 default:
126 info->memory = nv_ro16(bios, perf + 0x0b) * 2000; 124 info->memory = nvbios_rd16(bios, perf + 0x0b) * 2000;
127 break; 125 break;
128 } 126 }
129 break; 127 break;
130 case 0x25: 128 case 0x25:
131 info->fanspeed = nv_ro08(bios, perf + 0x04); 129 info->fanspeed = nvbios_rd08(bios, perf + 0x04);
132 info->voltage = nv_ro08(bios, perf + 0x05); 130 info->voltage = nvbios_rd08(bios, perf + 0x05);
133 info->core = nv_ro16(bios, perf + 0x06) * 1000; 131 info->core = nvbios_rd16(bios, perf + 0x06) * 1000;
134 info->shader = nv_ro16(bios, perf + 0x0a) * 1000; 132 info->shader = nvbios_rd16(bios, perf + 0x0a) * 1000;
135 info->memory = nv_ro16(bios, perf + 0x0c) * 1000; 133 info->memory = nvbios_rd16(bios, perf + 0x0c) * 1000;
136 break; 134 break;
137 case 0x30: 135 case 0x30:
138 info->script = nv_ro16(bios, perf + 0x02); 136 info->script = nvbios_rd16(bios, perf + 0x02);
139 case 0x35: 137 case 0x35:
140 info->fanspeed = nv_ro08(bios, perf + 0x06); 138 info->fanspeed = nvbios_rd08(bios, perf + 0x06);
141 info->voltage = nv_ro08(bios, perf + 0x07); 139 info->voltage = nvbios_rd08(bios, perf + 0x07);
142 info->core = nv_ro16(bios, perf + 0x08) * 1000; 140 info->core = nvbios_rd16(bios, perf + 0x08) * 1000;
143 info->shader = nv_ro16(bios, perf + 0x0a) * 1000; 141 info->shader = nvbios_rd16(bios, perf + 0x0a) * 1000;
144 info->memory = nv_ro16(bios, perf + 0x0c) * 1000; 142 info->memory = nvbios_rd16(bios, perf + 0x0c) * 1000;
145 info->vdec = nv_ro16(bios, perf + 0x10) * 1000; 143 info->vdec = nvbios_rd16(bios, perf + 0x10) * 1000;
146 info->disp = nv_ro16(bios, perf + 0x14) * 1000; 144 info->disp = nvbios_rd16(bios, perf + 0x14) * 1000;
147 break; 145 break;
148 case 0x40: 146 case 0x40:
149 info->voltage = nv_ro08(bios, perf + 0x02); 147 info->voltage = nvbios_rd08(bios, perf + 0x02);
150 break; 148 break;
151 default: 149 default:
152 return 0x0000; 150 return 0x0000;
@@ -175,7 +173,7 @@ nvbios_perfSp(struct nvkm_bios *bios, u32 perfE, int idx,
175 memset(info, 0x00, sizeof(*info)); 173 memset(info, 0x00, sizeof(*info));
176 switch (!!data * *ver) { 174 switch (!!data * *ver) {
177 case 0x40: 175 case 0x40:
178 info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000; 176 info->v40.freq = (nvbios_rd16(bios, data + 0x00) & 0x3fff) * 1000;
179 break; 177 break;
180 default: 178 default:
181 break; 179 break;
@@ -193,7 +191,7 @@ nvbios_perf_fan_parse(struct nvkm_bios *bios,
193 return -ENODEV; 191 return -ENODEV;
194 192
195 if (ver >= 0x20 && ver < 0x40 && hdr > 6) 193 if (ver >= 0x20 && ver < 0x40 && hdr > 6)
196 fan->pwm_divisor = nv_ro16(bios, perf + 6); 194 fan->pwm_divisor = nvbios_rd16(bios, perf + 6);
197 else 195 else
198 fan->pwm_divisor = 0; 196 fan->pwm_divisor = 0;
199 197
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index ebd402e19dbf..125ec2ed6c2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -27,7 +27,6 @@
27#include <subdev/bios/pll.h> 27#include <subdev/bios/pll.h>
28#include <subdev/vga.h> 28#include <subdev/vga.h>
29 29
30#include <core/device.h>
31 30
32struct pll_mapping { 31struct pll_mapping {
33 u8 type; 32 u8 type;
@@ -84,20 +83,20 @@ pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
84 struct bit_entry bit_C; 83 struct bit_entry bit_C;
85 84
86 if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) { 85 if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
87 u16 data = nv_ro16(bios, bit_C.offset + 8); 86 u16 data = nvbios_rd16(bios, bit_C.offset + 8);
88 if (data) { 87 if (data) {
89 *ver = nv_ro08(bios, data + 0); 88 *ver = nvbios_rd08(bios, data + 0);
90 *hdr = nv_ro08(bios, data + 1); 89 *hdr = nvbios_rd08(bios, data + 1);
91 *len = nv_ro08(bios, data + 2); 90 *len = nvbios_rd08(bios, data + 2);
92 *cnt = nv_ro08(bios, data + 3); 91 *cnt = nvbios_rd08(bios, data + 3);
93 return data; 92 return data;
94 } 93 }
95 } 94 }
96 95
97 if (bmp_version(bios) >= 0x0524) { 96 if (bmp_version(bios) >= 0x0524) {
98 u16 data = nv_ro16(bios, bios->bmp_offset + 142); 97 u16 data = nvbios_rd16(bios, bios->bmp_offset + 142);
99 if (data) { 98 if (data) {
100 *ver = nv_ro08(bios, data + 0); 99 *ver = nvbios_rd08(bios, data + 0);
101 *hdr = 1; 100 *hdr = 1;
102 *cnt = 1; 101 *cnt = 1;
103 *len = 0x18; 102 *len = 0x18;
@@ -112,7 +111,8 @@ pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
112static struct pll_mapping * 111static struct pll_mapping *
113pll_map(struct nvkm_bios *bios) 112pll_map(struct nvkm_bios *bios)
114{ 113{
115 switch (nv_device(bios)->card_type) { 114 struct nvkm_device *device = bios->subdev.device;
115 switch (device->card_type) {
116 case NV_04: 116 case NV_04:
117 case NV_10: 117 case NV_10:
118 case NV_11: 118 case NV_11:
@@ -123,12 +123,12 @@ pll_map(struct nvkm_bios *bios)
123 case NV_40: 123 case NV_40:
124 return nv40_pll_mapping; 124 return nv40_pll_mapping;
125 case NV_50: 125 case NV_50:
126 if (nv_device(bios)->chipset == 0x50) 126 if (device->chipset == 0x50)
127 return nv50_pll_mapping; 127 return nv50_pll_mapping;
128 else 128 else
129 if (nv_device(bios)->chipset < 0xa3 || 129 if (device->chipset < 0xa3 ||
130 nv_device(bios)->chipset == 0xaa || 130 device->chipset == 0xaa ||
131 nv_device(bios)->chipset == 0xac) 131 device->chipset == 0xac)
132 return g84_pll_mapping; 132 return g84_pll_mapping;
133 default: 133 default:
134 return NULL; 134 return NULL;
@@ -146,8 +146,8 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
146 if (data && *ver >= 0x30) { 146 if (data && *ver >= 0x30) {
147 data += hdr; 147 data += hdr;
148 while (cnt--) { 148 while (cnt--) {
149 if (nv_ro32(bios, data + 3) == reg) { 149 if (nvbios_rd32(bios, data + 3) == reg) {
150 *type = nv_ro08(bios, data + 0); 150 *type = nvbios_rd08(bios, data + 0);
151 return data; 151 return data;
152 } 152 }
153 data += *len; 153 data += *len;
@@ -161,7 +161,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
161 u16 addr = (data += hdr); 161 u16 addr = (data += hdr);
162 *type = map->type; 162 *type = map->type;
163 while (cnt--) { 163 while (cnt--) {
164 if (nv_ro32(bios, data) == map->reg) 164 if (nvbios_rd32(bios, data) == map->reg)
165 return data; 165 return data;
166 data += *len; 166 data += *len;
167 } 167 }
@@ -188,8 +188,8 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
188 if (data && *ver >= 0x30) { 188 if (data && *ver >= 0x30) {
189 data += hdr; 189 data += hdr;
190 while (cnt--) { 190 while (cnt--) {
191 if (nv_ro08(bios, data + 0) == type) { 191 if (nvbios_rd08(bios, data + 0) == type) {
192 *reg = nv_ro32(bios, data + 3); 192 *reg = nvbios_rd32(bios, data + 3);
193 return data; 193 return data;
194 } 194 }
195 data += *len; 195 data += *len;
@@ -203,7 +203,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
203 u16 addr = (data += hdr); 203 u16 addr = (data += hdr);
204 *reg = map->reg; 204 *reg = map->reg;
205 while (cnt--) { 205 while (cnt--) {
206 if (nv_ro32(bios, data) == map->reg) 206 if (nvbios_rd32(bios, data) == map->reg)
207 return data; 207 return data;
208 data += *len; 208 data += *len;
209 } 209 }
@@ -222,6 +222,8 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
222int 222int
223nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info) 223nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
224{ 224{
225 struct nvkm_subdev *subdev = &bios->subdev;
226 struct nvkm_device *device = subdev->device;
225 u8 ver, len; 227 u8 ver, len;
226 u32 reg = type; 228 u32 reg = type;
227 u16 data; 229 u16 data;
@@ -245,12 +247,12 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
245 break; 247 break;
246 case 0x10: 248 case 0x10:
247 case 0x11: 249 case 0x11:
248 info->vco1.min_freq = nv_ro32(bios, data + 0); 250 info->vco1.min_freq = nvbios_rd32(bios, data + 0);
249 info->vco1.max_freq = nv_ro32(bios, data + 4); 251 info->vco1.max_freq = nvbios_rd32(bios, data + 4);
250 info->vco2.min_freq = nv_ro32(bios, data + 8); 252 info->vco2.min_freq = nvbios_rd32(bios, data + 8);
251 info->vco2.max_freq = nv_ro32(bios, data + 12); 253 info->vco2.max_freq = nvbios_rd32(bios, data + 12);
252 info->vco1.min_inputfreq = nv_ro32(bios, data + 16); 254 info->vco1.min_inputfreq = nvbios_rd32(bios, data + 16);
253 info->vco2.min_inputfreq = nv_ro32(bios, data + 20); 255 info->vco2.min_inputfreq = nvbios_rd32(bios, data + 20);
254 info->vco1.max_inputfreq = INT_MAX; 256 info->vco1.max_inputfreq = INT_MAX;
255 info->vco2.max_inputfreq = INT_MAX; 257 info->vco2.max_inputfreq = INT_MAX;
256 258
@@ -291,82 +293,82 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
291 break; 293 break;
292 case 0x20: 294 case 0x20:
293 case 0x21: 295 case 0x21:
294 info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000; 296 info->vco1.min_freq = nvbios_rd16(bios, data + 4) * 1000;
295 info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000; 297 info->vco1.max_freq = nvbios_rd16(bios, data + 6) * 1000;
296 info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000; 298 info->vco2.min_freq = nvbios_rd16(bios, data + 8) * 1000;
297 info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000; 299 info->vco2.max_freq = nvbios_rd16(bios, data + 10) * 1000;
298 info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000; 300 info->vco1.min_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
299 info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000; 301 info->vco2.min_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
300 info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000; 302 info->vco1.max_inputfreq = nvbios_rd16(bios, data + 16) * 1000;
301 info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000; 303 info->vco2.max_inputfreq = nvbios_rd16(bios, data + 18) * 1000;
302 info->vco1.min_n = nv_ro08(bios, data + 20); 304 info->vco1.min_n = nvbios_rd08(bios, data + 20);
303 info->vco1.max_n = nv_ro08(bios, data + 21); 305 info->vco1.max_n = nvbios_rd08(bios, data + 21);
304 info->vco1.min_m = nv_ro08(bios, data + 22); 306 info->vco1.min_m = nvbios_rd08(bios, data + 22);
305 info->vco1.max_m = nv_ro08(bios, data + 23); 307 info->vco1.max_m = nvbios_rd08(bios, data + 23);
306 info->vco2.min_n = nv_ro08(bios, data + 24); 308 info->vco2.min_n = nvbios_rd08(bios, data + 24);
307 info->vco2.max_n = nv_ro08(bios, data + 25); 309 info->vco2.max_n = nvbios_rd08(bios, data + 25);
308 info->vco2.min_m = nv_ro08(bios, data + 26); 310 info->vco2.min_m = nvbios_rd08(bios, data + 26);
309 info->vco2.max_m = nv_ro08(bios, data + 27); 311 info->vco2.max_m = nvbios_rd08(bios, data + 27);
310 312
311 info->max_p = nv_ro08(bios, data + 29); 313 info->max_p = nvbios_rd08(bios, data + 29);
312 info->max_p_usable = info->max_p; 314 info->max_p_usable = info->max_p;
313 if (bios->version.chip < 0x60) 315 if (bios->version.chip < 0x60)
314 info->max_p_usable = 0x6; 316 info->max_p_usable = 0x6;
315 info->bias_p = nv_ro08(bios, data + 30); 317 info->bias_p = nvbios_rd08(bios, data + 30);
316 318
317 if (len > 0x22) 319 if (len > 0x22)
318 info->refclk = nv_ro32(bios, data + 31); 320 info->refclk = nvbios_rd32(bios, data + 31);
319 break; 321 break;
320 case 0x30: 322 case 0x30:
321 data = nv_ro16(bios, data + 1); 323 data = nvbios_rd16(bios, data + 1);
322 324
323 info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000; 325 info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
324 info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000; 326 info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
325 info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000; 327 info->vco2.min_freq = nvbios_rd16(bios, data + 4) * 1000;
326 info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000; 328 info->vco2.max_freq = nvbios_rd16(bios, data + 6) * 1000;
327 info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000; 329 info->vco1.min_inputfreq = nvbios_rd16(bios, data + 8) * 1000;
328 info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000; 330 info->vco2.min_inputfreq = nvbios_rd16(bios, data + 10) * 1000;
329 info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000; 331 info->vco1.max_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
330 info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000; 332 info->vco2.max_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
331 info->vco1.min_n = nv_ro08(bios, data + 16); 333 info->vco1.min_n = nvbios_rd08(bios, data + 16);
332 info->vco1.max_n = nv_ro08(bios, data + 17); 334 info->vco1.max_n = nvbios_rd08(bios, data + 17);
333 info->vco1.min_m = nv_ro08(bios, data + 18); 335 info->vco1.min_m = nvbios_rd08(bios, data + 18);
334 info->vco1.max_m = nv_ro08(bios, data + 19); 336 info->vco1.max_m = nvbios_rd08(bios, data + 19);
335 info->vco2.min_n = nv_ro08(bios, data + 20); 337 info->vco2.min_n = nvbios_rd08(bios, data + 20);
336 info->vco2.max_n = nv_ro08(bios, data + 21); 338 info->vco2.max_n = nvbios_rd08(bios, data + 21);
337 info->vco2.min_m = nv_ro08(bios, data + 22); 339 info->vco2.min_m = nvbios_rd08(bios, data + 22);
338 info->vco2.max_m = nv_ro08(bios, data + 23); 340 info->vco2.max_m = nvbios_rd08(bios, data + 23);
339 info->max_p_usable = info->max_p = nv_ro08(bios, data + 25); 341 info->max_p_usable = info->max_p = nvbios_rd08(bios, data + 25);
340 info->bias_p = nv_ro08(bios, data + 27); 342 info->bias_p = nvbios_rd08(bios, data + 27);
341 info->refclk = nv_ro32(bios, data + 28); 343 info->refclk = nvbios_rd32(bios, data + 28);
342 break; 344 break;
343 case 0x40: 345 case 0x40:
344 info->refclk = nv_ro16(bios, data + 9) * 1000; 346 info->refclk = nvbios_rd16(bios, data + 9) * 1000;
345 data = nv_ro16(bios, data + 1); 347 data = nvbios_rd16(bios, data + 1);
346 348
347 info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000; 349 info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
348 info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000; 350 info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
349 info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000; 351 info->vco1.min_inputfreq = nvbios_rd16(bios, data + 4) * 1000;
350 info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000; 352 info->vco1.max_inputfreq = nvbios_rd16(bios, data + 6) * 1000;
351 info->vco1.min_m = nv_ro08(bios, data + 8); 353 info->vco1.min_m = nvbios_rd08(bios, data + 8);
352 info->vco1.max_m = nv_ro08(bios, data + 9); 354 info->vco1.max_m = nvbios_rd08(bios, data + 9);
353 info->vco1.min_n = nv_ro08(bios, data + 10); 355 info->vco1.min_n = nvbios_rd08(bios, data + 10);
354 info->vco1.max_n = nv_ro08(bios, data + 11); 356 info->vco1.max_n = nvbios_rd08(bios, data + 11);
355 info->min_p = nv_ro08(bios, data + 12); 357 info->min_p = nvbios_rd08(bios, data + 12);
356 info->max_p = nv_ro08(bios, data + 13); 358 info->max_p = nvbios_rd08(bios, data + 13);
357 break; 359 break;
358 default: 360 default:
359 nv_error(bios, "unknown pll limits version 0x%02x\n", ver); 361 nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver);
360 return -EINVAL; 362 return -EINVAL;
361 } 363 }
362 364
363 if (!info->refclk) { 365 if (!info->refclk) {
364 info->refclk = nv_device(bios)->crystal; 366 info->refclk = device->crystal;
365 if (bios->version.chip == 0x51) { 367 if (bios->version.chip == 0x51) {
366 u32 sel_clk = nv_rd32(bios, 0x680524); 368 u32 sel_clk = nvkm_rd32(device, 0x680524);
367 if ((info->reg == 0x680508 && sel_clk & 0x20) || 369 if ((info->reg == 0x680508 && sel_clk & 0x20) ||
368 (info->reg == 0x680520 && sel_clk & 0x80)) { 370 (info->reg == 0x680520 && sel_clk & 0x80)) {
369 if (nv_rdvgac(bios, 0, 0x27) < 0xa3) 371 if (nvkm_rdvgac(device, 0, 0x27) < 0xa3)
370 info->refclk = 200000; 372 info->refclk = 200000;
371 else 373 else
372 info->refclk = 25000; 374 info->refclk = 25000;
@@ -380,8 +382,8 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
380 * with an empty limit table (seen on nv18) 382 * with an empty limit table (seen on nv18)
381 */ 383 */
382 if (!info->vco1.max_freq) { 384 if (!info->vco1.max_freq) {
383 info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67); 385 info->vco1.max_freq = nvbios_rd32(bios, bios->bmp_offset + 67);
384 info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71); 386 info->vco1.min_freq = nvbios_rd32(bios, bios->bmp_offset + 71);
385 if (bmp_version(bios) < 0x0506) { 387 if (bmp_version(bios) < 0x0506) {
386 info->vco1.max_freq = 256000; 388 info->vco1.max_freq = 256000;
387 info->vco1.min_freq = 128000; 389 info->vco1.min_freq = 128000;
@@ -393,7 +395,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
393 info->vco1.max_n = 0xff; 395 info->vco1.max_n = 0xff;
394 info->vco1.min_m = 0x1; 396 info->vco1.min_m = 0x1;
395 397
396 if (nv_device(bios)->crystal == 13500) { 398 if (device->crystal == 13500) {
397 /* nv05 does this, nv11 doesn't, nv10 unknown */ 399 /* nv05 does this, nv11 doesn't, nv10 unknown */
398 if (bios->version.chip < 0x11) 400 if (bios->version.chip < 0x11)
399 info->vco1.min_m = 0x7; 401 info->vco1.min_m = 0x7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index 20c5ce0cd573..441ec451b788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -49,12 +49,12 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
49 49
50 if (!bit_entry(bios, 'p', &bit_p)) { 50 if (!bit_entry(bios, 'p', &bit_p)) {
51 if (bit_p.version == 2 && bit_p.length >= 4) 51 if (bit_p.version == 2 && bit_p.length >= 4)
52 data = nv_ro32(bios, bit_p.offset + 0x00); 52 data = nvbios_rd32(bios, bit_p.offset + 0x00);
53 if ((data = weirdo_pointer(bios, data))) { 53 if ((data = weirdo_pointer(bios, data))) {
54 *ver = nv_ro08(bios, data + 0x00); /* maybe? */ 54 *ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
55 *hdr = nv_ro08(bios, data + 0x01); 55 *hdr = nvbios_rd08(bios, data + 0x01);
56 *len = nv_ro08(bios, data + 0x02); 56 *len = nvbios_rd08(bios, data + 0x02);
57 *cnt = nv_ro08(bios, data + 0x03); 57 *cnt = nvbios_rd08(bios, data + 0x03);
58 } 58 }
59 } 59 }
60 60
@@ -95,8 +95,8 @@ nvbios_pmuEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
95 memset(info, 0x00, sizeof(*info)); 95 memset(info, 0x00, sizeof(*info));
96 switch (!!data * *ver) { 96 switch (!!data * *ver) {
97 default: 97 default:
98 info->type = nv_ro08(bios, data + 0x00); 98 info->type = nvbios_rd08(bios, data + 0x00);
99 info->data = nv_ro32(bios, data + 0x02); 99 info->data = nvbios_rd32(bios, data + 0x02);
100 break; 100 break;
101 } 101 }
102 return data; 102 return data;
@@ -112,21 +112,21 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
112 while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) { 112 while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
113 if ( pmuE.type == type && 113 if ( pmuE.type == type &&
114 (data = weirdo_pointer(bios, pmuE.data))) { 114 (data = weirdo_pointer(bios, pmuE.data))) {
115 info->init_addr_pmu = nv_ro32(bios, data + 0x08); 115 info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
116 info->args_addr_pmu = nv_ro32(bios, data + 0x0c); 116 info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
117 info->boot_addr = data + 0x30; 117 info->boot_addr = data + 0x30;
118 info->boot_addr_pmu = nv_ro32(bios, data + 0x10) + 118 info->boot_addr_pmu = nvbios_rd32(bios, data + 0x10) +
119 nv_ro32(bios, data + 0x18); 119 nvbios_rd32(bios, data + 0x18);
120 info->boot_size = nv_ro32(bios, data + 0x1c) - 120 info->boot_size = nvbios_rd32(bios, data + 0x1c) -
121 nv_ro32(bios, data + 0x18); 121 nvbios_rd32(bios, data + 0x18);
122 info->code_addr = info->boot_addr + info->boot_size; 122 info->code_addr = info->boot_addr + info->boot_size;
123 info->code_addr_pmu = info->boot_addr_pmu + 123 info->code_addr_pmu = info->boot_addr_pmu +
124 info->boot_size; 124 info->boot_size;
125 info->code_size = nv_ro32(bios, data + 0x20); 125 info->code_size = nvbios_rd32(bios, data + 0x20);
126 info->data_addr = data + 0x30 + 126 info->data_addr = data + 0x30 +
127 nv_ro32(bios, data + 0x24); 127 nvbios_rd32(bios, data + 0x24);
128 info->data_addr_pmu = nv_ro32(bios, data + 0x28); 128 info->data_addr_pmu = nvbios_rd32(bios, data + 0x28);
129 info->data_size = nv_ro32(bios, data + 0x2c); 129 info->data_size = nvbios_rd32(bios, data + 0x2c);
130 return true; 130 return true;
131 } 131 }
132 } 132 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 95e4fa1531d6..e0ec2a6b7b79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,5 +1,6 @@
1#ifndef __NVKM_BIOS_PRIV_H__ 1#ifndef __NVKM_BIOS_PRIV_H__
2#define __NVKM_BIOS_PRIV_H__ 2#define __NVKM_BIOS_PRIV_H__
3#define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
3#include <subdev/bios.h> 4#include <subdev/bios.h>
4 5
5struct nvbios_source { 6struct nvbios_source {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
index a17b221119b2..d5222af10b96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
@@ -29,7 +29,7 @@
29static u8 29static u8
30nvbios_ramcfg_strap(struct nvkm_subdev *subdev) 30nvbios_ramcfg_strap(struct nvkm_subdev *subdev)
31{ 31{
32 return (nv_rd32(subdev, 0x101000) & 0x0000003c) >> 2; 32 return (nvkm_rd32(subdev->device, 0x101000) & 0x0000003c) >> 2;
33} 33}
34 34
35u8 35u8
@@ -39,9 +39,9 @@ nvbios_ramcfg_count(struct nvkm_bios *bios)
39 39
40 if (!bit_entry(bios, 'M', &bit_M)) { 40 if (!bit_entry(bios, 'M', &bit_M)) {
41 if (bit_M.version == 1 && bit_M.length >= 5) 41 if (bit_M.version == 1 && bit_M.length >= 5)
42 return nv_ro08(bios, bit_M.offset + 2); 42 return nvbios_rd08(bios, bit_M.offset + 2);
43 if (bit_M.version == 2 && bit_M.length >= 3) 43 if (bit_M.version == 2 && bit_M.length >= 3)
44 return nv_ro08(bios, bit_M.offset + 0); 44 return nvbios_rd08(bios, bit_M.offset + 0);
45 } 45 }
46 46
47 return 0x00; 47 return 0x00;
@@ -50,7 +50,7 @@ nvbios_ramcfg_count(struct nvkm_bios *bios)
50u8 50u8
51nvbios_ramcfg_index(struct nvkm_subdev *subdev) 51nvbios_ramcfg_index(struct nvkm_subdev *subdev)
52{ 52{
53 struct nvkm_bios *bios = nvkm_bios(subdev); 53 struct nvkm_bios *bios = subdev->device->bios;
54 u8 strap = nvbios_ramcfg_strap(subdev); 54 u8 strap = nvbios_ramcfg_strap(subdev);
55 u32 xlat = 0x00000000; 55 u32 xlat = 0x00000000;
56 struct bit_entry bit_M; 56 struct bit_entry bit_M;
@@ -59,7 +59,7 @@ nvbios_ramcfg_index(struct nvkm_subdev *subdev)
59 59
60 if (!bit_entry(bios, 'M', &bit_M)) { 60 if (!bit_entry(bios, 'M', &bit_M)) {
61 if (bit_M.version == 1 && bit_M.length >= 5) 61 if (bit_M.version == 1 && bit_M.length >= 5)
62 xlat = nv_ro16(bios, bit_M.offset + 3); 62 xlat = nvbios_rd16(bios, bit_M.offset + 3);
63 if (bit_M.version == 2 && bit_M.length >= 3) { 63 if (bit_M.version == 2 && bit_M.length >= 3) {
64 /*XXX: is M ever shorter than this? 64 /*XXX: is M ever shorter than this?
65 * if not - what is xlat used for now? 65 * if not - what is xlat used for now?
@@ -68,11 +68,11 @@ nvbios_ramcfg_index(struct nvkm_subdev *subdev)
68 if (bit_M.length >= 7 && 68 if (bit_M.length >= 7 &&
69 nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E)) 69 nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
70 return M0203E.group; 70 return M0203E.group;
71 xlat = nv_ro16(bios, bit_M.offset + 1); 71 xlat = nvbios_rd16(bios, bit_M.offset + 1);
72 } 72 }
73 } 73 }
74 74
75 if (xlat) 75 if (xlat)
76 strap = nv_ro08(bios, xlat + strap); 76 strap = nvbios_rd08(bios, xlat + strap);
77 return strap; 77 return strap;
78} 78}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index 8b17bb4b220c..f0e1fc74a52e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -34,18 +34,18 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
34 34
35 if (!bit_entry(bios, 'P', &bit_P)) { 35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2) 36 if (bit_P.version == 2)
37 rammap = nv_ro16(bios, bit_P.offset + 4); 37 rammap = nvbios_rd16(bios, bit_P.offset + 4);
38 38
39 if (rammap) { 39 if (rammap) {
40 *ver = nv_ro08(bios, rammap + 0); 40 *ver = nvbios_rd08(bios, rammap + 0);
41 switch (*ver) { 41 switch (*ver) {
42 case 0x10: 42 case 0x10:
43 case 0x11: 43 case 0x11:
44 *hdr = nv_ro08(bios, rammap + 1); 44 *hdr = nvbios_rd08(bios, rammap + 1);
45 *cnt = nv_ro08(bios, rammap + 5); 45 *cnt = nvbios_rd08(bios, rammap + 5);
46 *len = nv_ro08(bios, rammap + 2); 46 *len = nvbios_rd08(bios, rammap + 2);
47 *snr = nv_ro08(bios, rammap + 4); 47 *snr = nvbios_rd08(bios, rammap + 4);
48 *ssz = nv_ro08(bios, rammap + 3); 48 *ssz = nvbios_rd08(bios, rammap + 3);
49 return rammap; 49 return rammap;
50 default: 50 default:
51 break; 51 break;
@@ -72,6 +72,21 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
72 return 0x0000; 72 return 0x0000;
73} 73}
74 74
75/* Pretend a performance mode is also a rammap entry, helps coalesce entries
76 * later on */
77u32
78nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
79 struct nvbios_ramcfg *p)
80{
81 memset(p, 0x00, sizeof(*p));
82
83 p->rammap_00_16_20 = (nvbios_rd08(bios, data + 0x16) & 0x20) >> 5;
84 p->rammap_00_16_40 = (nvbios_rd08(bios, data + 0x16) & 0x40) >> 6;
85 p->rammap_00_17_02 = (nvbios_rd08(bios, data + 0x17) & 0x02) >> 1;
86
87 return data;
88}
89
75u32 90u32
76nvbios_rammapEp(struct nvkm_bios *bios, int idx, 91nvbios_rammapEp(struct nvkm_bios *bios, int idx,
77 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p) 92 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
@@ -82,18 +97,18 @@ nvbios_rammapEp(struct nvkm_bios *bios, int idx,
82 p->rammap_hdr = *hdr; 97 p->rammap_hdr = *hdr;
83 switch (!!data * *ver) { 98 switch (!!data * *ver) {
84 case 0x10: 99 case 0x10:
85 p->rammap_min = nv_ro16(bios, data + 0x00); 100 p->rammap_min = nvbios_rd16(bios, data + 0x00);
86 p->rammap_max = nv_ro16(bios, data + 0x02); 101 p->rammap_max = nvbios_rd16(bios, data + 0x02);
87 p->rammap_10_04_02 = (nv_ro08(bios, data + 0x04) & 0x02) >> 1; 102 p->rammap_10_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
88 p->rammap_10_04_08 = (nv_ro08(bios, data + 0x04) & 0x08) >> 3; 103 p->rammap_10_04_08 = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
89 break; 104 break;
90 case 0x11: 105 case 0x11:
91 p->rammap_min = nv_ro16(bios, data + 0x00); 106 p->rammap_min = nvbios_rd16(bios, data + 0x00);
92 p->rammap_max = nv_ro16(bios, data + 0x02); 107 p->rammap_max = nvbios_rd16(bios, data + 0x02);
93 p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0; 108 p->rammap_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
94 p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2; 109 p->rammap_11_08_0c = (nvbios_rd08(bios, data + 0x08) & 0x0c) >> 2;
95 p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4; 110 p->rammap_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
96 temp = nv_ro32(bios, data + 0x09); 111 temp = nvbios_rd32(bios, data + 0x09);
97 p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0; 112 p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0;
98 p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9; 113 p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9;
99 p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18; 114 p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18;
@@ -102,10 +117,10 @@ nvbios_rammapEp(struct nvkm_bios *bios, int idx,
102 p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25; 117 p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25;
103 p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26; 118 p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26;
104 p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27; 119 p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27;
105 p->rammap_11_0d = nv_ro08(bios, data + 0x0d); 120 p->rammap_11_0d = nvbios_rd08(bios, data + 0x0d);
106 p->rammap_11_0e = nv_ro08(bios, data + 0x0e); 121 p->rammap_11_0e = nvbios_rd08(bios, data + 0x0e);
107 p->rammap_11_0f = nv_ro08(bios, data + 0x0f); 122 p->rammap_11_0f = nvbios_rd08(bios, data + 0x0f);
108 p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2; 123 p->rammap_11_11_0c = (nvbios_rd08(bios, data + 0x11) & 0x0c) >> 2;
109 break; 124 break;
110 default: 125 default:
111 data = 0; 126 data = 0;
@@ -141,6 +156,36 @@ nvbios_rammapSe(struct nvkm_bios *bios, u32 data,
141} 156}
142 157
143u32 158u32
159nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
160 struct nvbios_ramcfg *p)
161{
162 data += (idx * size);
163
164 if (size < 11)
165 return 0x00000000;
166
167 p->ramcfg_ver = 0;
168 p->ramcfg_timing = nvbios_rd08(bios, data + 0x01);
169 p->ramcfg_00_03_01 = (nvbios_rd08(bios, data + 0x03) & 0x01) >> 0;
170 p->ramcfg_00_03_02 = (nvbios_rd08(bios, data + 0x03) & 0x02) >> 1;
171 p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2;
172 p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
173 p->ramcfg_RON = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3;
174 p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
175 p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2;
176 p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5;
177 p->ramcfg_00_05 = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
178 p->ramcfg_00_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
179 p->ramcfg_00_07 = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
180 p->ramcfg_00_08 = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
181 p->ramcfg_00_09 = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
182 p->ramcfg_00_0a_0f = (nvbios_rd08(bios, data + 0x0a) & 0x0f) >> 0;
183 p->ramcfg_00_0a_f0 = (nvbios_rd08(bios, data + 0x0a) & 0xf0) >> 4;
184
185 return data;
186}
187
188u32
144nvbios_rammapSp(struct nvkm_bios *bios, u32 data, 189nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
145 u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, 190 u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
146 u8 *ver, u8 *hdr, struct nvbios_ramcfg *p) 191 u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
@@ -150,58 +195,58 @@ nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
150 p->ramcfg_hdr = *hdr; 195 p->ramcfg_hdr = *hdr;
151 switch (!!data * *ver) { 196 switch (!!data * *ver) {
152 case 0x10: 197 case 0x10:
153 p->ramcfg_timing = nv_ro08(bios, data + 0x01); 198 p->ramcfg_timing = nvbios_rd08(bios, data + 0x01);
154 p->ramcfg_10_02_01 = (nv_ro08(bios, data + 0x02) & 0x01) >> 0; 199 p->ramcfg_10_02_01 = (nvbios_rd08(bios, data + 0x02) & 0x01) >> 0;
155 p->ramcfg_10_02_02 = (nv_ro08(bios, data + 0x02) & 0x02) >> 1; 200 p->ramcfg_10_02_02 = (nvbios_rd08(bios, data + 0x02) & 0x02) >> 1;
156 p->ramcfg_10_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2; 201 p->ramcfg_10_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
157 p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3; 202 p->ramcfg_10_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
158 p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4; 203 p->ramcfg_10_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
159 p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5; 204 p->ramcfg_10_02_20 = (nvbios_rd08(bios, data + 0x02) & 0x20) >> 5;
160 p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6; 205 p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
161 p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0; 206 p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
162 p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0; 207 p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0;
163 p->ramcfg_10_05 = (nv_ro08(bios, data + 0x05) & 0xff) >> 0; 208 p->ramcfg_10_05 = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
164 p->ramcfg_10_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0; 209 p->ramcfg_10_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
165 p->ramcfg_10_07 = (nv_ro08(bios, data + 0x07) & 0xff) >> 0; 210 p->ramcfg_10_07 = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
166 p->ramcfg_10_08 = (nv_ro08(bios, data + 0x08) & 0xff) >> 0; 211 p->ramcfg_10_08 = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
167 p->ramcfg_10_09_0f = (nv_ro08(bios, data + 0x09) & 0x0f) >> 0; 212 p->ramcfg_10_09_0f = (nvbios_rd08(bios, data + 0x09) & 0x0f) >> 0;
168 p->ramcfg_10_09_f0 = (nv_ro08(bios, data + 0x09) & 0xf0) >> 4; 213 p->ramcfg_10_09_f0 = (nvbios_rd08(bios, data + 0x09) & 0xf0) >> 4;
169 break; 214 break;
170 case 0x11: 215 case 0x11:
171 p->ramcfg_timing = nv_ro08(bios, data + 0x00); 216 p->ramcfg_timing = nvbios_rd08(bios, data + 0x00);
172 p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0; 217 p->ramcfg_11_01_01 = (nvbios_rd08(bios, data + 0x01) & 0x01) >> 0;
173 p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1; 218 p->ramcfg_11_01_02 = (nvbios_rd08(bios, data + 0x01) & 0x02) >> 1;
174 p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2; 219 p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2;
175 p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3; 220 p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3;
176 p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4; 221 p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4;
177 p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5; 222 p->ramcfg_11_01_20 = (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5;
178 p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6; 223 p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6;
179 p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7; 224 p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7;
180 p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0; 225 p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0;
181 p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2; 226 p->ramcfg_11_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
182 p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3; 227 p->ramcfg_11_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
183 p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4; 228 p->ramcfg_11_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
184 p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6; 229 p->ramcfg_11_02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
185 p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7; 230 p->ramcfg_11_02_80 = (nvbios_rd08(bios, data + 0x02) & 0x80) >> 7;
186 p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0; 231 p->ramcfg_11_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
187 p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4; 232 p->ramcfg_11_03_30 = (nvbios_rd08(bios, data + 0x03) & 0x30) >> 4;
188 p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6; 233 p->ramcfg_11_03_c0 = (nvbios_rd08(bios, data + 0x03) & 0xc0) >> 6;
189 p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4; 234 p->ramcfg_11_03_f0 = (nvbios_rd08(bios, data + 0x03) & 0xf0) >> 4;
190 p->ramcfg_11_04 = (nv_ro08(bios, data + 0x04) & 0xff) >> 0; 235 p->ramcfg_11_04 = (nvbios_rd08(bios, data + 0x04) & 0xff) >> 0;
191 p->ramcfg_11_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0; 236 p->ramcfg_11_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
192 p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1; 237 p->ramcfg_11_07_02 = (nvbios_rd08(bios, data + 0x07) & 0x02) >> 1;
193 p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2; 238 p->ramcfg_11_07_04 = (nvbios_rd08(bios, data + 0x07) & 0x04) >> 2;
194 p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3; 239 p->ramcfg_11_07_08 = (nvbios_rd08(bios, data + 0x07) & 0x08) >> 3;
195 p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4; 240 p->ramcfg_11_07_10 = (nvbios_rd08(bios, data + 0x07) & 0x10) >> 4;
196 p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6; 241 p->ramcfg_11_07_40 = (nvbios_rd08(bios, data + 0x07) & 0x40) >> 6;
197 p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7; 242 p->ramcfg_11_07_80 = (nvbios_rd08(bios, data + 0x07) & 0x80) >> 7;
198 p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0; 243 p->ramcfg_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
199 p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1; 244 p->ramcfg_11_08_02 = (nvbios_rd08(bios, data + 0x08) & 0x02) >> 1;
200 p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2; 245 p->ramcfg_11_08_04 = (nvbios_rd08(bios, data + 0x08) & 0x04) >> 2;
201 p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3; 246 p->ramcfg_11_08_08 = (nvbios_rd08(bios, data + 0x08) & 0x08) >> 3;
202 p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4; 247 p->ramcfg_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
203 p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5; 248 p->ramcfg_11_08_20 = (nvbios_rd08(bios, data + 0x08) & 0x20) >> 5;
204 p->ramcfg_11_09 = (nv_ro08(bios, data + 0x09) & 0xff) >> 0; 249 p->ramcfg_11_09 = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
205 break; 250 break;
206 default: 251 default:
207 data = 0; 252 data = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 8c2b7cba5cff..792f017525f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -23,13 +23,11 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/device.h>
27#include <core/option.h> 26#include <core/option.h>
28#include <subdev/bios.h> 27#include <subdev/bios.h>
29#include <subdev/bios/image.h> 28#include <subdev/bios/image.h>
30 29
31struct shadow { 30struct shadow {
32 struct nvkm_oclass base;
33 u32 skip; 31 u32 skip;
34 const struct nvbios_source *func; 32 const struct nvbios_source *func;
35 void *data; 33 void *data;
@@ -38,9 +36,8 @@ struct shadow {
38}; 36};
39 37
40static bool 38static bool
41shadow_fetch(struct nvkm_bios *bios, u32 upto) 39shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
42{ 40{
43 struct shadow *mthd = (void *)nv_object(bios)->oclass;
44 const u32 limit = (upto + 3) & ~3; 41 const u32 limit = (upto + 3) & ~3;
45 const u32 start = bios->size; 42 const u32 start = bios->size;
46 void *data = mthd->data; 43 void *data = mthd->data;
@@ -51,65 +48,35 @@ shadow_fetch(struct nvkm_bios *bios, u32 upto)
51 return bios->size >= limit; 48 return bios->size >= limit;
52} 49}
53 50
54static u8
55shadow_rd08(struct nvkm_object *object, u64 addr)
56{
57 struct nvkm_bios *bios = (void *)object;
58 if (shadow_fetch(bios, addr + 1))
59 return bios->data[addr];
60 return 0x00;
61}
62
63static u16
64shadow_rd16(struct nvkm_object *object, u64 addr)
65{
66 struct nvkm_bios *bios = (void *)object;
67 if (shadow_fetch(bios, addr + 2))
68 return get_unaligned_le16(&bios->data[addr]);
69 return 0x0000;
70}
71
72static u32
73shadow_rd32(struct nvkm_object *object, u64 addr)
74{
75 struct nvkm_bios *bios = (void *)object;
76 if (shadow_fetch(bios, addr + 4))
77 return get_unaligned_le32(&bios->data[addr]);
78 return 0x00000000;
79}
80
81static struct nvkm_oclass
82shadow_class = {
83 .handle = NV_SUBDEV(VBIOS, 0x00),
84 .ofuncs = &(struct nvkm_ofuncs) {
85 .rd08 = shadow_rd08,
86 .rd16 = shadow_rd16,
87 .rd32 = shadow_rd32,
88 },
89};
90
91static int 51static int
92shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd) 52shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
93{ 53{
54 struct nvkm_subdev *subdev = &bios->subdev;
94 struct nvbios_image image; 55 struct nvbios_image image;
95 int score = 1; 56 int score = 1;
96 57
58 if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
59 nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
60 return 0;
61 }
62
97 if (!nvbios_image(bios, idx, &image)) { 63 if (!nvbios_image(bios, idx, &image)) {
98 nv_debug(bios, "image %d invalid\n", idx); 64 nvkm_debug(subdev, "image %d invalid\n", idx);
99 return 0; 65 return 0;
100 } 66 }
101 nv_debug(bios, "%08x: type %02x, %d bytes\n", 67 nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
102 image.base, image.type, image.size); 68 image.base, image.type, image.size);
103 69
104 if (!shadow_fetch(bios, image.size)) { 70 if (!shadow_fetch(bios, mthd, image.size)) {
105 nv_debug(bios, "%08x: fetch failed\n", image.base); 71 nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
106 return 0; 72 return 0;
107 } 73 }
108 74
109 switch (image.type) { 75 switch (image.type) {
110 case 0x00: 76 case 0x00:
111 if (nvbios_checksum(&bios->data[image.base], image.size)) { 77 if (nvbios_checksum(&bios->data[image.base], image.size)) {
112 nv_debug(bios, "%08x: checksum failed\n", image.base); 78 nvkm_debug(subdev, "%08x: checksum failed\n",
79 image.base);
113 if (mthd->func->rw) 80 if (mthd->func->rw)
114 score += 1; 81 score += 1;
115 score += 1; 82 score += 1;
@@ -123,28 +90,17 @@ shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd)
123 } 90 }
124 91
125 if (!image.last) 92 if (!image.last)
126 score += shadow_image(bios, idx + 1, mthd); 93 score += shadow_image(bios, idx + 1, offset + image.size, mthd);
127 return score; 94 return score;
128} 95}
129 96
130static int 97static int
131shadow_score(struct nvkm_bios *bios, struct shadow *mthd)
132{
133 struct nvkm_oclass *oclass = nv_object(bios)->oclass;
134 int score;
135 nv_object(bios)->oclass = &mthd->base;
136 score = shadow_image(bios, 0, mthd);
137 nv_object(bios)->oclass = oclass;
138 return score;
139
140}
141
142static int
143shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name) 98shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
144{ 99{
145 const struct nvbios_source *func = mthd->func; 100 const struct nvbios_source *func = mthd->func;
101 struct nvkm_subdev *subdev = &bios->subdev;
146 if (func->name) { 102 if (func->name) {
147 nv_debug(bios, "trying %s...\n", name ? name : func->name); 103 nvkm_debug(subdev, "trying %s...\n", name ? name : func->name);
148 if (func->init) { 104 if (func->init) {
149 mthd->data = func->init(bios, name); 105 mthd->data = func->init(bios, name);
150 if (IS_ERR(mthd->data)) { 106 if (IS_ERR(mthd->data)) {
@@ -152,10 +108,10 @@ shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
152 return 0; 108 return 0;
153 } 109 }
154 } 110 }
155 mthd->score = shadow_score(bios, mthd); 111 mthd->score = shadow_image(bios, 0, 0, mthd);
156 if (func->fini) 112 if (func->fini)
157 func->fini(mthd->data); 113 func->fini(mthd->data);
158 nv_debug(bios, "scored %d\n", mthd->score); 114 nvkm_debug(subdev, "scored %d\n", mthd->score);
159 mthd->data = bios->data; 115 mthd->data = bios->data;
160 mthd->size = bios->size; 116 mthd->size = bios->size;
161 bios->data = NULL; 117 bios->data = NULL;
@@ -178,7 +134,7 @@ shadow_fw_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
178static void * 134static void *
179shadow_fw_init(struct nvkm_bios *bios, const char *name) 135shadow_fw_init(struct nvkm_bios *bios, const char *name)
180{ 136{
181 struct device *dev = &nv_device(bios)->pdev->dev; 137 struct device *dev = bios->subdev.device->dev;
182 const struct firmware *fw; 138 const struct firmware *fw;
183 int ret = request_firmware(&fw, name, dev); 139 int ret = request_firmware(&fw, name, dev);
184 if (ret) 140 if (ret)
@@ -198,22 +154,24 @@ shadow_fw = {
198int 154int
199nvbios_shadow(struct nvkm_bios *bios) 155nvbios_shadow(struct nvkm_bios *bios)
200{ 156{
157 struct nvkm_subdev *subdev = &bios->subdev;
158 struct nvkm_device *device = subdev->device;
201 struct shadow mthds[] = { 159 struct shadow mthds[] = {
202 { shadow_class, 0, &nvbios_of }, 160 { 0, &nvbios_of },
203 { shadow_class, 0, &nvbios_ramin }, 161 { 0, &nvbios_ramin },
204 { shadow_class, 0, &nvbios_rom }, 162 { 0, &nvbios_rom },
205 { shadow_class, 0, &nvbios_acpi_fast }, 163 { 0, &nvbios_acpi_fast },
206 { shadow_class, 4, &nvbios_acpi_slow }, 164 { 4, &nvbios_acpi_slow },
207 { shadow_class, 1, &nvbios_pcirom }, 165 { 1, &nvbios_pcirom },
208 { shadow_class, 1, &nvbios_platform }, 166 { 1, &nvbios_platform },
209 { shadow_class } 167 {}
210 }, *mthd = mthds, *best = NULL; 168 }, *mthd, *best = NULL;
211 const char *optarg; 169 const char *optarg;
212 char *source; 170 char *source;
213 int optlen; 171 int optlen;
214 172
215 /* handle user-specified bios source */ 173 /* handle user-specified bios source */
216 optarg = nvkm_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen); 174 optarg = nvkm_stropt(device->cfgopt, "NvBios", &optlen);
217 source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL; 175 source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
218 if (source) { 176 if (source) {
219 /* try to match one of the built-in methods */ 177 /* try to match one of the built-in methods */
@@ -234,7 +192,7 @@ nvbios_shadow(struct nvkm_bios *bios)
234 } 192 }
235 193
236 if (!best->score) { 194 if (!best->score) {
237 nv_error(bios, "%s invalid\n", source); 195 nvkm_error(subdev, "%s invalid\n", source);
238 kfree(source); 196 kfree(source);
239 source = NULL; 197 source = NULL;
240 } 198 }
@@ -259,12 +217,12 @@ nvbios_shadow(struct nvkm_bios *bios)
259 } 217 }
260 218
261 if (!best->score) { 219 if (!best->score) {
262 nv_fatal(bios, "unable to locate usable image\n"); 220 nvkm_error(subdev, "unable to locate usable image\n");
263 return -EINVAL; 221 return -EINVAL;
264 } 222 }
265 223
266 nv_info(bios, "using image from %s\n", best->func ? 224 nvkm_debug(subdev, "using image from %s\n", best->func ?
267 best->func->name : source); 225 best->func->name : source);
268 bios->data = best->data; 226 bios->data = best->data;
269 bios->size = best->size; 227 bios->size = best->size;
270 kfree(source); 228 kfree(source);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index f9d0eb5647fa..8fecb5ff22a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -22,14 +22,12 @@
22 */ 22 */
23#include "priv.h" 23#include "priv.h"
24 24
25#include <core/device.h>
26
27#if defined(CONFIG_ACPI) && defined(CONFIG_X86) 25#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
28int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); 26int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
29bool nouveau_acpi_rom_supported(struct pci_dev *pdev); 27bool nouveau_acpi_rom_supported(struct device *);
30#else 28#else
31static inline bool 29static inline bool
32nouveau_acpi_rom_supported(struct pci_dev *pdev) 30nouveau_acpi_rom_supported(struct device *dev)
33{ 31{
34 return false; 32 return false;
35} 33}
@@ -90,7 +88,7 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
90static void * 88static void *
91acpi_init(struct nvkm_bios *bios, const char *name) 89acpi_init(struct nvkm_bios *bios, const char *name)
92{ 90{
93 if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev)) 91 if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
94 return ERR_PTR(-ENODEV); 92 return ERR_PTR(-ENODEV);
95 return NULL; 93 return NULL;
96} 94}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4c19a7dba803..29a37f03ebf1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -22,7 +22,6 @@
22 */ 22 */
23#include "priv.h" 23#include "priv.h"
24 24
25#include <core/device.h>
26 25
27#if defined(__powerpc__) 26#if defined(__powerpc__)
28struct priv { 27struct priv {
@@ -44,7 +43,7 @@ of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
44static void * 43static void *
45of_init(struct nvkm_bios *bios, const char *name) 44of_init(struct nvkm_bios *bios, const char *name)
46{ 45{
47 struct pci_dev *pdev = nv_device(bios)->pdev; 46 struct pci_dev *pdev = bios->subdev.device->pdev;
48 struct device_node *dn; 47 struct device_node *dn;
49 struct priv *priv; 48 struct priv *priv;
50 if (!(dn = pci_device_to_OF_node(pdev))) 49 if (!(dn = pci_device_to_OF_node(pdev)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 1b045483dc87..9b91da09dc5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -22,7 +22,7 @@
22 */ 22 */
23#include "priv.h" 23#include "priv.h"
24 24
25#include <core/device.h> 25#include <core/pci.h>
26 26
27struct priv { 27struct priv {
28 struct pci_dev *pdev; 28 struct pci_dev *pdev;
@@ -53,10 +53,16 @@ pcirom_fini(void *data)
53static void * 53static void *
54pcirom_init(struct nvkm_bios *bios, const char *name) 54pcirom_init(struct nvkm_bios *bios, const char *name)
55{ 55{
56 struct pci_dev *pdev = nv_device(bios)->pdev; 56 struct nvkm_device *device = bios->subdev.device;
57 struct priv *priv = NULL; 57 struct priv *priv = NULL;
58 struct pci_dev *pdev;
58 int ret; 59 int ret;
59 60
61 if (device->func->pci)
62 pdev = device->func->pci(device)->pdev;
63 else
64 return ERR_PTR(-ENODEV);
65
60 if (!(ret = pci_enable_rom(pdev))) { 66 if (!(ret = pci_enable_rom(pdev))) {
61 if (ret = -ENOMEM, 67 if (ret = -ENOMEM,
62 (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { 68 (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
@@ -85,10 +91,16 @@ nvbios_pcirom = {
85static void * 91static void *
86platform_init(struct nvkm_bios *bios, const char *name) 92platform_init(struct nvkm_bios *bios, const char *name)
87{ 93{
88 struct pci_dev *pdev = nv_device(bios)->pdev; 94 struct nvkm_device *device = bios->subdev.device;
95 struct pci_dev *pdev;
89 struct priv *priv; 96 struct priv *priv;
90 int ret = -ENOMEM; 97 int ret = -ENOMEM;
91 98
99 if (device->func->pci)
100 pdev = device->func->pci(device)->pdev;
101 else
102 return ERR_PTR(-ENODEV);
103
92 if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { 104 if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
93 if (ret = -ENODEV, 105 if (ret = -ENODEV,
94 (priv->rom = pci_platform_rom(pdev, &priv->size))) 106 (priv->rom = pci_platform_rom(pdev, &priv->size)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index abe8ae4d3a9f..0f537c22804c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -22,8 +22,6 @@
22 */ 22 */
23#include "priv.h" 23#include "priv.h"
24 24
25#include <core/device.h>
26
27struct priv { 25struct priv {
28 struct nvkm_bios *bios; 26 struct nvkm_bios *bios;
29 u32 bar0; 27 u32 bar0;
@@ -32,10 +30,11 @@ struct priv {
32static u32 30static u32
33pramin_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios) 31pramin_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
34{ 32{
33 struct nvkm_device *device = bios->subdev.device;
35 u32 i; 34 u32 i;
36 if (offset + length <= 0x00100000) { 35 if (offset + length <= 0x00100000) {
37 for (i = offset; i < offset + length; i += 4) 36 for (i = offset; i < offset + length; i += 4)
38 *(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i); 37 *(u32 *)&bios->data[i] = nvkm_rd32(device, 0x700000 + i);
39 return length; 38 return length;
40 } 39 }
41 return 0; 40 return 0;
@@ -46,7 +45,8 @@ pramin_fini(void *data)
46{ 45{
47 struct priv *priv = data; 46 struct priv *priv = data;
48 if (priv) { 47 if (priv) {
49 nv_wr32(priv->bios, 0x001700, priv->bar0); 48 struct nvkm_device *device = priv->bios->subdev.device;
49 nvkm_wr32(device, 0x001700, priv->bar0);
50 kfree(priv); 50 kfree(priv);
51 } 51 }
52} 52}
@@ -54,21 +54,23 @@ pramin_fini(void *data)
54static void * 54static void *
55pramin_init(struct nvkm_bios *bios, const char *name) 55pramin_init(struct nvkm_bios *bios, const char *name)
56{ 56{
57 struct nvkm_subdev *subdev = &bios->subdev;
58 struct nvkm_device *device = subdev->device;
57 struct priv *priv = NULL; 59 struct priv *priv = NULL;
58 u64 addr = 0; 60 u64 addr = 0;
59 61
60 /* PRAMIN always potentially available prior to nv50 */ 62 /* PRAMIN always potentially available prior to nv50 */
61 if (nv_device(bios)->card_type < NV_50) 63 if (device->card_type < NV_50)
62 return NULL; 64 return NULL;
63 65
64 /* we can't get the bios image pointer without PDISP */ 66 /* we can't get the bios image pointer without PDISP */
65 if (nv_device(bios)->card_type >= GM100) 67 if (device->card_type >= GM100)
66 addr = nv_rd32(bios, 0x021c04); 68 addr = nvkm_rd32(device, 0x021c04);
67 else 69 else
68 if (nv_device(bios)->card_type >= NV_C0) 70 if (device->card_type >= NV_C0)
69 addr = nv_rd32(bios, 0x022500); 71 addr = nvkm_rd32(device, 0x022500);
70 if (addr & 0x00000001) { 72 if (addr & 0x00000001) {
71 nv_debug(bios, "... display disabled\n"); 73 nvkm_debug(subdev, "... display disabled\n");
72 return ERR_PTR(-ENODEV); 74 return ERR_PTR(-ENODEV);
73 } 75 }
74 76
@@ -76,32 +78,32 @@ pramin_init(struct nvkm_bios *bios, const char *name)
76 * important as we don't want to be touching vram on an 78 * important as we don't want to be touching vram on an
77 * uninitialised board 79 * uninitialised board
78 */ 80 */
79 addr = nv_rd32(bios, 0x619f04); 81 addr = nvkm_rd32(device, 0x619f04);
80 if (!(addr & 0x00000008)) { 82 if (!(addr & 0x00000008)) {
81 nv_debug(bios, "... not enabled\n"); 83 nvkm_debug(subdev, "... not enabled\n");
82 return ERR_PTR(-ENODEV); 84 return ERR_PTR(-ENODEV);
83 } 85 }
84 if ( (addr & 0x00000003) != 1) { 86 if ( (addr & 0x00000003) != 1) {
85 nv_debug(bios, "... not in vram\n"); 87 nvkm_debug(subdev, "... not in vram\n");
86 return ERR_PTR(-ENODEV); 88 return ERR_PTR(-ENODEV);
87 } 89 }
88 90
89 /* some alternate method inherited from xf86-video-nv... */ 91 /* some alternate method inherited from xf86-video-nv... */
90 addr = (addr & 0xffffff00) << 8; 92 addr = (addr & 0xffffff00) << 8;
91 if (!addr) { 93 if (!addr) {
92 addr = (u64)nv_rd32(bios, 0x001700) << 16; 94 addr = (u64)nvkm_rd32(device, 0x001700) << 16;
93 addr += 0xf0000; 95 addr += 0xf0000;
94 } 96 }
95 97
96 /* modify bar0 PRAMIN window to cover the bios image */ 98 /* modify bar0 PRAMIN window to cover the bios image */
97 if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { 99 if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
98 nv_error(bios, "... out of memory\n"); 100 nvkm_error(subdev, "... out of memory\n");
99 return ERR_PTR(-ENOMEM); 101 return ERR_PTR(-ENOMEM);
100 } 102 }
101 103
102 priv->bios = bios; 104 priv->bios = bios;
103 priv->bar0 = nv_rd32(bios, 0x001700); 105 priv->bar0 = nvkm_rd32(device, 0x001700);
104 nv_wr32(bios, 0x001700, addr >> 16); 106 nvkm_wr32(device, 0x001700, addr >> 16);
105 return priv; 107 return priv;
106} 108}
107 109
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
index 6ec3b237925e..ffa4b395220a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
@@ -22,15 +22,16 @@
22 */ 22 */
23#include "priv.h" 23#include "priv.h"
24 24
25#include <core/device.h> 25#include <subdev/pci.h>
26 26
27static u32 27static u32
28prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios) 28prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
29{ 29{
30 struct nvkm_device *device = data;
30 u32 i; 31 u32 i;
31 if (offset + length <= 0x00100000) { 32 if (offset + length <= 0x00100000) {
32 for (i = offset; i < offset + length; i += 4) 33 for (i = offset; i < offset + length; i += 4)
33 *(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i); 34 *(u32 *)&bios->data[i] = nvkm_rd32(device, 0x300000 + i);
34 return length; 35 return length;
35 } 36 }
36 return 0; 37 return 0;
@@ -39,25 +40,18 @@ prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
39static void 40static void
40prom_fini(void *data) 41prom_fini(void *data)
41{ 42{
42 struct nvkm_bios *bios = data; 43 struct nvkm_device *device = data;
43 if (nv_device(bios)->card_type < NV_50) 44 nvkm_pci_rom_shadow(device->pci, true);
44 nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
45 else
46 nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
47} 45}
48 46
49static void * 47static void *
50prom_init(struct nvkm_bios *bios, const char *name) 48prom_init(struct nvkm_bios *bios, const char *name)
51{ 49{
52 if (nv_device(bios)->card_type < NV_50) { 50 struct nvkm_device *device = bios->subdev.device;
53 if (nv_device(bios)->card_type == NV_40 && 51 if (device->card_type == NV_40 && device->chipset >= 0x4c)
54 nv_device(bios)->chipset >= 0x4c) 52 return ERR_PTR(-ENODEV);
55 return ERR_PTR(-ENODEV); 53 nvkm_pci_rom_shadow(device->pci, false);
56 nv_mask(bios, 0x001850, 0x00000001, 0x00000000); 54 return device;
57 } else {
58 nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
59 }
60 return bios;
61} 55}
62 56
63const struct nvbios_source 57const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index 249ff6d583df..a54cfec0550d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -25,8 +25,6 @@
25#include <subdev/bios/bit.h> 25#include <subdev/bios/bit.h>
26#include <subdev/bios/therm.h> 26#include <subdev/bios/therm.h>
27 27
28#include <core/device.h>
29
30static u16 28static u16
31therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt) 29therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
32{ 30{
@@ -35,24 +33,24 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
35 33
36 if (!bit_entry(bios, 'P', &bit_P)) { 34 if (!bit_entry(bios, 'P', &bit_P)) {
37 if (bit_P.version == 1) 35 if (bit_P.version == 1)
38 therm = nv_ro16(bios, bit_P.offset + 12); 36 therm = nvbios_rd16(bios, bit_P.offset + 12);
39 else if (bit_P.version == 2) 37 else if (bit_P.version == 2)
40 therm = nv_ro16(bios, bit_P.offset + 16); 38 therm = nvbios_rd16(bios, bit_P.offset + 16);
41 else 39 else
42 nv_error(bios, 40 nvkm_error(&bios->subdev,
43 "unknown offset for thermal in BIT P %d\n", 41 "unknown offset for thermal in BIT P %d\n",
44 bit_P.version); 42 bit_P.version);
45 } 43 }
46 44
47 /* exit now if we haven't found the thermal table */ 45 /* exit now if we haven't found the thermal table */
48 if (!therm) 46 if (!therm)
49 return 0x0000; 47 return 0x0000;
50 48
51 *ver = nv_ro08(bios, therm + 0); 49 *ver = nvbios_rd08(bios, therm + 0);
52 *hdr = nv_ro08(bios, therm + 1); 50 *hdr = nvbios_rd08(bios, therm + 1);
53 *len = nv_ro08(bios, therm + 2); 51 *len = nvbios_rd08(bios, therm + 2);
54 *cnt = nv_ro08(bios, therm + 3); 52 *cnt = nvbios_rd08(bios, therm + 3);
55 return therm + nv_ro08(bios, therm + 1); 53 return therm + nvbios_rd08(bios, therm + 1);
56} 54}
57 55
58static u16 56static u16
@@ -83,9 +81,9 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
83 sensor_section = -1; 81 sensor_section = -1;
84 i = 0; 82 i = 0;
85 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) { 83 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
86 s16 value = nv_ro16(bios, entry + 1); 84 s16 value = nvbios_rd16(bios, entry + 1);
87 85
88 switch (nv_ro08(bios, entry + 0)) { 86 switch (nvbios_rd08(bios, entry + 0)) {
89 case 0x0: 87 case 0x0:
90 thrs_section = value; 88 thrs_section = value;
91 if (value > 0) 89 if (value > 0)
@@ -94,7 +92,7 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
94 case 0x01: 92 case 0x01:
95 sensor_section++; 93 sensor_section++;
96 if (sensor_section == 0) { 94 if (sensor_section == 0) {
97 offset = ((s8) nv_ro08(bios, entry + 2)) / 2; 95 offset = ((s8) nvbios_rd08(bios, entry + 2)) / 2;
98 sensor->offset_constant = offset; 96 sensor->offset_constant = offset;
99 } 97 }
100 break; 98 break;
@@ -165,9 +163,9 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
165 fan->nr_fan_trip = 0; 163 fan->nr_fan_trip = 0;
166 fan->fan_mode = NVBIOS_THERM_FAN_OTHER; 164 fan->fan_mode = NVBIOS_THERM_FAN_OTHER;
167 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) { 165 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
168 s16 value = nv_ro16(bios, entry + 1); 166 s16 value = nvbios_rd16(bios, entry + 1);
169 167
170 switch (nv_ro08(bios, entry + 0)) { 168 switch (nvbios_rd08(bios, entry + 0)) {
171 case 0x22: 169 case 0x22:
172 fan->min_duty = value & 0xff; 170 fan->min_duty = value & 0xff;
173 fan->max_duty = (value & 0xff00) >> 8; 171 fan->max_duty = (value & 0xff00) >> 8;
@@ -198,14 +196,14 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
198 case 0x46: 196 case 0x46:
199 if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR) 197 if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR)
200 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR; 198 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
201 fan->linear_min_temp = nv_ro08(bios, entry + 1); 199 fan->linear_min_temp = nvbios_rd08(bios, entry + 1);
202 fan->linear_max_temp = nv_ro08(bios, entry + 2); 200 fan->linear_max_temp = nvbios_rd08(bios, entry + 2);
203 break; 201 break;
204 } 202 }
205 } 203 }
206 204
207 /* starting from fermi, fan management is always linear */ 205 /* starting from fermi, fan management is always linear */
208 if (nv_device(bios)->card_type >= NV_C0 && 206 if (bios->subdev.device->card_type >= NV_C0 &&
209 fan->fan_mode == NVBIOS_THERM_FAN_OTHER) { 207 fan->fan_mode == NVBIOS_THERM_FAN_OTHER) {
210 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR; 208 fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
211 } 209 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 763fd29a58f2..99f6432ac0af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -34,27 +34,27 @@ nvbios_timingTe(struct nvkm_bios *bios,
34 34
35 if (!bit_entry(bios, 'P', &bit_P)) { 35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 1) 36 if (bit_P.version == 1)
37 timing = nv_ro16(bios, bit_P.offset + 4); 37 timing = nvbios_rd16(bios, bit_P.offset + 4);
38 else 38 else
39 if (bit_P.version == 2) 39 if (bit_P.version == 2)
40 timing = nv_ro16(bios, bit_P.offset + 8); 40 timing = nvbios_rd16(bios, bit_P.offset + 8);
41 41
42 if (timing) { 42 if (timing) {
43 *ver = nv_ro08(bios, timing + 0); 43 *ver = nvbios_rd08(bios, timing + 0);
44 switch (*ver) { 44 switch (*ver) {
45 case 0x10: 45 case 0x10:
46 *hdr = nv_ro08(bios, timing + 1); 46 *hdr = nvbios_rd08(bios, timing + 1);
47 *cnt = nv_ro08(bios, timing + 2); 47 *cnt = nvbios_rd08(bios, timing + 2);
48 *len = nv_ro08(bios, timing + 3); 48 *len = nvbios_rd08(bios, timing + 3);
49 *snr = 0; 49 *snr = 0;
50 *ssz = 0; 50 *ssz = 0;
51 return timing; 51 return timing;
52 case 0x20: 52 case 0x20:
53 *hdr = nv_ro08(bios, timing + 1); 53 *hdr = nvbios_rd08(bios, timing + 1);
54 *cnt = nv_ro08(bios, timing + 5); 54 *cnt = nvbios_rd08(bios, timing + 5);
55 *len = nv_ro08(bios, timing + 2); 55 *len = nvbios_rd08(bios, timing + 2);
56 *snr = nv_ro08(bios, timing + 4); 56 *snr = nvbios_rd08(bios, timing + 4);
57 *ssz = nv_ro08(bios, timing + 3); 57 *ssz = nvbios_rd08(bios, timing + 3);
58 return timing; 58 return timing;
59 default: 59 default:
60 break; 60 break;
@@ -90,18 +90,20 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
90 p->timing_hdr = *hdr; 90 p->timing_hdr = *hdr;
91 switch (!!data * *ver) { 91 switch (!!data * *ver) {
92 case 0x10: 92 case 0x10:
93 p->timing_10_WR = nv_ro08(bios, data + 0x00); 93 p->timing_10_WR = nvbios_rd08(bios, data + 0x00);
94 p->timing_10_WTR = nv_ro08(bios, data + 0x01); 94 p->timing_10_WTR = nvbios_rd08(bios, data + 0x01);
95 p->timing_10_CL = nv_ro08(bios, data + 0x02); 95 p->timing_10_CL = nvbios_rd08(bios, data + 0x02);
96 p->timing_10_RC = nv_ro08(bios, data + 0x03); 96 p->timing_10_RC = nvbios_rd08(bios, data + 0x03);
97 p->timing_10_RFC = nv_ro08(bios, data + 0x05); 97 p->timing_10_RFC = nvbios_rd08(bios, data + 0x05);
98 p->timing_10_RAS = nv_ro08(bios, data + 0x07); 98 p->timing_10_RAS = nvbios_rd08(bios, data + 0x07);
99 p->timing_10_RP = nv_ro08(bios, data + 0x09); 99 p->timing_10_RP = nvbios_rd08(bios, data + 0x09);
100 p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a); 100 p->timing_10_RCDRD = nvbios_rd08(bios, data + 0x0a);
101 p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b); 101 p->timing_10_RCDWR = nvbios_rd08(bios, data + 0x0b);
102 p->timing_10_RRD = nv_ro08(bios, data + 0x0c); 102 p->timing_10_RRD = nvbios_rd08(bios, data + 0x0c);
103 p->timing_10_13 = nv_ro08(bios, data + 0x0d); 103 p->timing_10_13 = nvbios_rd08(bios, data + 0x0d);
104 p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07; 104 p->timing_10_ODT = nvbios_rd08(bios, data + 0x0e) & 0x07;
105 if (p->ramcfg_ver >= 0x10)
106 p->ramcfg_RON = nvbios_rd08(bios, data + 0x0e) & 0x07;
105 107
106 p->timing_10_24 = 0xff; 108 p->timing_10_24 = 0xff;
107 p->timing_10_21 = 0; 109 p->timing_10_21 = 0;
@@ -112,45 +114,45 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
112 114
113 switch (min_t(u8, *hdr, 25)) { 115 switch (min_t(u8, *hdr, 25)) {
114 case 25: 116 case 25:
115 p->timing_10_24 = nv_ro08(bios, data + 0x18); 117 p->timing_10_24 = nvbios_rd08(bios, data + 0x18);
116 case 24: 118 case 24:
117 case 23: 119 case 23:
118 case 22: 120 case 22:
119 p->timing_10_21 = nv_ro08(bios, data + 0x15); 121 p->timing_10_21 = nvbios_rd08(bios, data + 0x15);
120 case 21: 122 case 21:
121 p->timing_10_20 = nv_ro08(bios, data + 0x14); 123 p->timing_10_20 = nvbios_rd08(bios, data + 0x14);
122 case 20: 124 case 20:
123 p->timing_10_CWL = nv_ro08(bios, data + 0x13); 125 p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
124 case 19: 126 case 19:
125 p->timing_10_18 = nv_ro08(bios, data + 0x12); 127 p->timing_10_18 = nvbios_rd08(bios, data + 0x12);
126 case 18: 128 case 18:
127 case 17: 129 case 17:
128 p->timing_10_16 = nv_ro08(bios, data + 0x10); 130 p->timing_10_16 = nvbios_rd08(bios, data + 0x10);
129 } 131 }
130 132
131 break; 133 break;
132 case 0x20: 134 case 0x20:
133 p->timing[0] = nv_ro32(bios, data + 0x00); 135 p->timing[0] = nvbios_rd32(bios, data + 0x00);
134 p->timing[1] = nv_ro32(bios, data + 0x04); 136 p->timing[1] = nvbios_rd32(bios, data + 0x04);
135 p->timing[2] = nv_ro32(bios, data + 0x08); 137 p->timing[2] = nvbios_rd32(bios, data + 0x08);
136 p->timing[3] = nv_ro32(bios, data + 0x0c); 138 p->timing[3] = nvbios_rd32(bios, data + 0x0c);
137 p->timing[4] = nv_ro32(bios, data + 0x10); 139 p->timing[4] = nvbios_rd32(bios, data + 0x10);
138 p->timing[5] = nv_ro32(bios, data + 0x14); 140 p->timing[5] = nvbios_rd32(bios, data + 0x14);
139 p->timing[6] = nv_ro32(bios, data + 0x18); 141 p->timing[6] = nvbios_rd32(bios, data + 0x18);
140 p->timing[7] = nv_ro32(bios, data + 0x1c); 142 p->timing[7] = nvbios_rd32(bios, data + 0x1c);
141 p->timing[8] = nv_ro32(bios, data + 0x20); 143 p->timing[8] = nvbios_rd32(bios, data + 0x20);
142 p->timing[9] = nv_ro32(bios, data + 0x24); 144 p->timing[9] = nvbios_rd32(bios, data + 0x24);
143 p->timing[10] = nv_ro32(bios, data + 0x28); 145 p->timing[10] = nvbios_rd32(bios, data + 0x28);
144 p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0; 146 p->timing_20_2e_03 = (nvbios_rd08(bios, data + 0x2e) & 0x03) >> 0;
145 p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4; 147 p->timing_20_2e_30 = (nvbios_rd08(bios, data + 0x2e) & 0x30) >> 4;
146 p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6; 148 p->timing_20_2e_c0 = (nvbios_rd08(bios, data + 0x2e) & 0xc0) >> 6;
147 p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0; 149 p->timing_20_2f_03 = (nvbios_rd08(bios, data + 0x2f) & 0x03) >> 0;
148 temp = nv_ro16(bios, data + 0x2c); 150 temp = nvbios_rd16(bios, data + 0x2c);
149 p->timing_20_2c_003f = (temp & 0x003f) >> 0; 151 p->timing_20_2c_003f = (temp & 0x003f) >> 0;
150 p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6; 152 p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
151 p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0; 153 p->timing_20_30_07 = (nvbios_rd08(bios, data + 0x30) & 0x07) >> 0;
152 p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3; 154 p->timing_20_30_f8 = (nvbios_rd08(bios, data + 0x30) & 0xf8) >> 3;
153 temp = nv_ro16(bios, data + 0x31); 155 temp = nvbios_rd16(bios, data + 0x31);
154 p->timing_20_31_0007 = (temp & 0x0007) >> 0; 156 p->timing_20_31_0007 = (temp & 0x0007) >> 0;
155 p->timing_20_31_0078 = (temp & 0x0078) >> 3; 157 p->timing_20_31_0078 = (temp & 0x0078) >> 3;
156 p->timing_20_31_0780 = (temp & 0x0780) >> 7; 158 p->timing_20_31_0780 = (temp & 0x0780) >> 7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index e95b69faa82e..2f13db745948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -33,15 +33,15 @@ nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33 33
34 if (!bit_entry(bios, 'P', &bit_P)) { 34 if (!bit_entry(bios, 'P', &bit_P)) {
35 if (bit_P.version == 2) { 35 if (bit_P.version == 2) {
36 vmap = nv_ro16(bios, bit_P.offset + 0x20); 36 vmap = nvbios_rd16(bios, bit_P.offset + 0x20);
37 if (vmap) { 37 if (vmap) {
38 *ver = nv_ro08(bios, vmap + 0); 38 *ver = nvbios_rd08(bios, vmap + 0);
39 switch (*ver) { 39 switch (*ver) {
40 case 0x10: 40 case 0x10:
41 case 0x20: 41 case 0x20:
42 *hdr = nv_ro08(bios, vmap + 1); 42 *hdr = nvbios_rd08(bios, vmap + 1);
43 *cnt = nv_ro08(bios, vmap + 3); 43 *cnt = nvbios_rd08(bios, vmap + 3);
44 *len = nv_ro08(bios, vmap + 2); 44 *len = nvbios_rd08(bios, vmap + 2);
45 return vmap; 45 return vmap;
46 default: 46 default:
47 break; 47 break;
@@ -88,23 +88,23 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
88 switch (!!vmap * *ver) { 88 switch (!!vmap * *ver) {
89 case 0x10: 89 case 0x10:
90 info->link = 0xff; 90 info->link = 0xff;
91 info->min = nv_ro32(bios, vmap + 0x00); 91 info->min = nvbios_rd32(bios, vmap + 0x00);
92 info->max = nv_ro32(bios, vmap + 0x04); 92 info->max = nvbios_rd32(bios, vmap + 0x04);
93 info->arg[0] = nv_ro32(bios, vmap + 0x08); 93 info->arg[0] = nvbios_rd32(bios, vmap + 0x08);
94 info->arg[1] = nv_ro32(bios, vmap + 0x0c); 94 info->arg[1] = nvbios_rd32(bios, vmap + 0x0c);
95 info->arg[2] = nv_ro32(bios, vmap + 0x10); 95 info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
96 break; 96 break;
97 case 0x20: 97 case 0x20:
98 info->unk0 = nv_ro08(bios, vmap + 0x00); 98 info->unk0 = nvbios_rd08(bios, vmap + 0x00);
99 info->link = nv_ro08(bios, vmap + 0x01); 99 info->link = nvbios_rd08(bios, vmap + 0x01);
100 info->min = nv_ro32(bios, vmap + 0x02); 100 info->min = nvbios_rd32(bios, vmap + 0x02);
101 info->max = nv_ro32(bios, vmap + 0x06); 101 info->max = nvbios_rd32(bios, vmap + 0x06);
102 info->arg[0] = nv_ro32(bios, vmap + 0x0a); 102 info->arg[0] = nvbios_rd32(bios, vmap + 0x0a);
103 info->arg[1] = nv_ro32(bios, vmap + 0x0e); 103 info->arg[1] = nvbios_rd32(bios, vmap + 0x0e);
104 info->arg[2] = nv_ro32(bios, vmap + 0x12); 104 info->arg[2] = nvbios_rd32(bios, vmap + 0x12);
105 info->arg[3] = nv_ro32(bios, vmap + 0x16); 105 info->arg[3] = nvbios_rd32(bios, vmap + 0x16);
106 info->arg[4] = nv_ro32(bios, vmap + 0x1a); 106 info->arg[4] = nvbios_rd32(bios, vmap + 0x1a);
107 info->arg[5] = nv_ro32(bios, vmap + 0x1e); 107 info->arg[5] = nvbios_rd32(bios, vmap + 0x1e);
108 break; 108 break;
109 } 109 }
110 return vmap; 110 return vmap;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 8454ab7c4a3d..615804c3887b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -33,30 +33,30 @@ nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33 33
34 if (!bit_entry(bios, 'P', &bit_P)) { 34 if (!bit_entry(bios, 'P', &bit_P)) {
35 if (bit_P.version == 2) 35 if (bit_P.version == 2)
36 volt = nv_ro16(bios, bit_P.offset + 0x0c); 36 volt = nvbios_rd16(bios, bit_P.offset + 0x0c);
37 else 37 else
38 if (bit_P.version == 1) 38 if (bit_P.version == 1)
39 volt = nv_ro16(bios, bit_P.offset + 0x10); 39 volt = nvbios_rd16(bios, bit_P.offset + 0x10);
40 40
41 if (volt) { 41 if (volt) {
42 *ver = nv_ro08(bios, volt + 0); 42 *ver = nvbios_rd08(bios, volt + 0);
43 switch (*ver) { 43 switch (*ver) {
44 case 0x12: 44 case 0x12:
45 *hdr = 5; 45 *hdr = 5;
46 *cnt = nv_ro08(bios, volt + 2); 46 *cnt = nvbios_rd08(bios, volt + 2);
47 *len = nv_ro08(bios, volt + 1); 47 *len = nvbios_rd08(bios, volt + 1);
48 return volt; 48 return volt;
49 case 0x20: 49 case 0x20:
50 *hdr = nv_ro08(bios, volt + 1); 50 *hdr = nvbios_rd08(bios, volt + 1);
51 *cnt = nv_ro08(bios, volt + 2); 51 *cnt = nvbios_rd08(bios, volt + 2);
52 *len = nv_ro08(bios, volt + 3); 52 *len = nvbios_rd08(bios, volt + 3);
53 return volt; 53 return volt;
54 case 0x30: 54 case 0x30:
55 case 0x40: 55 case 0x40:
56 case 0x50: 56 case 0x50:
57 *hdr = nv_ro08(bios, volt + 1); 57 *hdr = nvbios_rd08(bios, volt + 1);
58 *cnt = nv_ro08(bios, volt + 3); 58 *cnt = nvbios_rd08(bios, volt + 3);
59 *len = nv_ro08(bios, volt + 2); 59 *len = nvbios_rd08(bios, volt + 2);
60 return volt; 60 return volt;
61 } 61 }
62 } 62 }
@@ -73,28 +73,28 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
73 memset(info, 0x00, sizeof(*info)); 73 memset(info, 0x00, sizeof(*info));
74 switch (!!volt * *ver) { 74 switch (!!volt * *ver) {
75 case 0x12: 75 case 0x12:
76 info->vidmask = nv_ro08(bios, volt + 0x04); 76 info->vidmask = nvbios_rd08(bios, volt + 0x04);
77 break; 77 break;
78 case 0x20: 78 case 0x20:
79 info->vidmask = nv_ro08(bios, volt + 0x05); 79 info->vidmask = nvbios_rd08(bios, volt + 0x05);
80 break; 80 break;
81 case 0x30: 81 case 0x30:
82 info->vidmask = nv_ro08(bios, volt + 0x04); 82 info->vidmask = nvbios_rd08(bios, volt + 0x04);
83 break; 83 break;
84 case 0x40: 84 case 0x40:
85 info->base = nv_ro32(bios, volt + 0x04); 85 info->base = nvbios_rd32(bios, volt + 0x04);
86 info->step = nv_ro16(bios, volt + 0x08); 86 info->step = nvbios_rd16(bios, volt + 0x08);
87 info->vidmask = nv_ro08(bios, volt + 0x0b); 87 info->vidmask = nvbios_rd08(bios, volt + 0x0b);
88 /*XXX*/ 88 /*XXX*/
89 info->min = 0; 89 info->min = 0;
90 info->max = info->base; 90 info->max = info->base;
91 break; 91 break;
92 case 0x50: 92 case 0x50:
93 info->vidmask = nv_ro08(bios, volt + 0x06); 93 info->vidmask = nvbios_rd08(bios, volt + 0x06);
94 info->min = nv_ro32(bios, volt + 0x0a); 94 info->min = nvbios_rd32(bios, volt + 0x0a);
95 info->max = nv_ro32(bios, volt + 0x0e); 95 info->max = nvbios_rd32(bios, volt + 0x0e);
96 info->base = nv_ro32(bios, volt + 0x12) & 0x00ffffff; 96 info->base = nvbios_rd32(bios, volt + 0x12) & 0x00ffffff;
97 info->step = nv_ro16(bios, volt + 0x16); 97 info->step = nvbios_rd16(bios, volt + 0x16);
98 break; 98 break;
99 } 99 }
100 return volt; 100 return volt;
@@ -121,12 +121,12 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
121 switch (!!volt * *ver) { 121 switch (!!volt * *ver) {
122 case 0x12: 122 case 0x12:
123 case 0x20: 123 case 0x20:
124 info->voltage = nv_ro08(bios, volt + 0x00) * 10000; 124 info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
125 info->vid = nv_ro08(bios, volt + 0x01); 125 info->vid = nvbios_rd08(bios, volt + 0x01);
126 break; 126 break;
127 case 0x30: 127 case 0x30:
128 info->voltage = nv_ro08(bios, volt + 0x00) * 10000; 128 info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
129 info->vid = nv_ro08(bios, volt + 0x01) >> 2; 129 info->vid = nvbios_rd08(bios, volt + 0x01) >> 2;
130 break; 130 break;
131 case 0x40: 131 case 0x40:
132 case 0x50: 132 case 0x50:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
index 63a5e1b5cb3c..250fc42d8608 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
@@ -30,12 +30,12 @@ dcb_xpiod_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
30{ 30{
31 u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len); 31 u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
32 if (data && *ver >= 0x40 && *hdr >= 0x06) { 32 if (data && *ver >= 0x40 && *hdr >= 0x06) {
33 u16 xpio = nv_ro16(bios, data + 0x04); 33 u16 xpio = nvbios_rd16(bios, data + 0x04);
34 if (xpio) { 34 if (xpio) {
35 *ver = nv_ro08(bios, data + 0x00); 35 *ver = nvbios_rd08(bios, data + 0x00);
36 *hdr = nv_ro08(bios, data + 0x01); 36 *hdr = nvbios_rd08(bios, data + 0x01);
37 *cnt = nv_ro08(bios, data + 0x02); 37 *cnt = nvbios_rd08(bios, data + 0x02);
38 *len = nv_ro08(bios, data + 0x03); 38 *len = nvbios_rd08(bios, data + 0x03);
39 return xpio; 39 return xpio;
40 } 40 }
41 } 41 }
@@ -48,12 +48,12 @@ dcb_xpio_table(struct nvkm_bios *bios, u8 idx,
48{ 48{
49 u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len); 49 u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
50 if (data && idx < *cnt) { 50 if (data && idx < *cnt) {
51 u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len)); 51 u16 xpio = nvbios_rd16(bios, data + *hdr + (idx * *len));
52 if (xpio) { 52 if (xpio) {
53 *ver = nv_ro08(bios, data + 0x00); 53 *ver = nvbios_rd08(bios, data + 0x00);
54 *hdr = nv_ro08(bios, data + 0x01); 54 *hdr = nvbios_rd08(bios, data + 0x01);
55 *cnt = nv_ro08(bios, data + 0x02); 55 *cnt = nvbios_rd08(bios, data + 0x02);
56 *len = nv_ro08(bios, data + 0x03); 56 *len = nvbios_rd08(bios, data + 0x03);
57 return xpio; 57 return xpio;
58 } 58 }
59 } 59 }
@@ -66,9 +66,9 @@ dcb_xpio_parse(struct nvkm_bios *bios, u8 idx,
66{ 66{
67 u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len); 67 u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
68 if (data && *len >= 6) { 68 if (data && *len >= 6) {
69 info->type = nv_ro08(bios, data + 0x04); 69 info->type = nvbios_rd08(bios, data + 0x04);
70 info->addr = nv_ro08(bios, data + 0x05); 70 info->addr = nvbios_rd08(bios, data + 0x05);
71 info->flags = nv_ro08(bios, data + 0x06); 71 info->flags = nvbios_rd08(bios, data + 0x06);
72 } 72 }
73 return 0x0000; 73 return 0x0000;
74} 74}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
index 83d80b13f149..5fa9e91835c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
@@ -1,3 +1,4 @@
1nvkm-y += nvkm/subdev/bus/base.o
1nvkm-y += nvkm/subdev/bus/hwsq.o 2nvkm-y += nvkm/subdev/bus/hwsq.o
2nvkm-y += nvkm/subdev/bus/nv04.o 3nvkm-y += nvkm/subdev/bus/nv04.o
3nvkm-y += nvkm/subdev/bus/nv31.o 4nvkm-y += nvkm/subdev/bus/nv31.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
new file mode 100644
index 000000000000..dc5a10f18bdb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26static void
27nvkm_bus_intr(struct nvkm_subdev *subdev)
28{
29 struct nvkm_bus *bus = nvkm_bus(subdev);
30 bus->func->intr(bus);
31}
32
33static int
34nvkm_bus_init(struct nvkm_subdev *subdev)
35{
36 struct nvkm_bus *bus = nvkm_bus(subdev);
37 bus->func->init(bus);
38 return 0;
39}
40
41static void *
42nvkm_bus_dtor(struct nvkm_subdev *subdev)
43{
44 return nvkm_bus(subdev);
45}
46
47static const struct nvkm_subdev_func
48nvkm_bus = {
49 .dtor = nvkm_bus_dtor,
50 .init = nvkm_bus_init,
51 .intr = nvkm_bus_intr,
52};
53
54int
55nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
56 int index, struct nvkm_bus **pbus)
57{
58 struct nvkm_bus *bus;
59 if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
60 return -ENOMEM;
61 nvkm_subdev_ctor(&nvkm_bus, device, index, 0, &bus->subdev);
62 bus->func = func;
63 return 0;
64}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index cbe699e82593..9700b5c01cc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -22,37 +22,43 @@
22 * Authors: Martin Peres <martin.peres@labri.fr> 22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25#include "nv04.h" 25#include "priv.h"
26 26
27#include <subdev/timer.h> 27#include <subdev/timer.h>
28 28
29static int 29static int
30g94_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size) 30g94_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
31{ 31{
32 struct nv50_bus_priv *priv = (void *)pbus; 32 struct nvkm_device *device = bus->subdev.device;
33 int i; 33 int i;
34 34
35 nv_mask(pbus, 0x001098, 0x00000008, 0x00000000); 35 nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
36 nv_wr32(pbus, 0x001304, 0x00000000); 36 nvkm_wr32(device, 0x001304, 0x00000000);
37 nv_wr32(pbus, 0x001318, 0x00000000); 37 nvkm_wr32(device, 0x001318, 0x00000000);
38 for (i = 0; i < size; i++) 38 for (i = 0; i < size; i++)
39 nv_wr32(priv, 0x080000 + (i * 4), data[i]); 39 nvkm_wr32(device, 0x080000 + (i * 4), data[i]);
40 nv_mask(pbus, 0x001098, 0x00000018, 0x00000018); 40 nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
41 nv_wr32(pbus, 0x00130c, 0x00000001); 41 nvkm_wr32(device, 0x00130c, 0x00000001);
42 42
43 return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT; 43 if (nvkm_msec(device, 2000,
44 if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
45 break;
46 ) < 0)
47 return -ETIMEDOUT;
48
49 return 0;
44} 50}
45 51
46struct nvkm_oclass * 52static const struct nvkm_bus_func
47g94_bus_oclass = &(struct nv04_bus_impl) { 53g94_bus = {
48 .base.handle = NV_SUBDEV(BUS, 0x94), 54 .init = nv50_bus_init,
49 .base.ofuncs = &(struct nvkm_ofuncs) {
50 .ctor = nv04_bus_ctor,
51 .dtor = _nvkm_bus_dtor,
52 .init = nv50_bus_init,
53 .fini = _nvkm_bus_fini,
54 },
55 .intr = nv50_bus_intr, 55 .intr = nv50_bus_intr,
56 .hwsq_exec = g94_bus_hwsq_exec, 56 .hwsq_exec = g94_bus_hwsq_exec,
57 .hwsq_size = 128, 57 .hwsq_size = 128,
58}.base; 58};
59
60int
61g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
62{
63 return nvkm_bus_new_(&g94_bus, device, index, pbus);
64}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index ebc63ba968d4..e0930d5fdfb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -22,59 +22,54 @@
22 * Authors: Martin Peres <martin.peres@labri.fr> 22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25#include "nv04.h" 25#include "priv.h"
26 26
27static void 27static void
28gf100_bus_intr(struct nvkm_subdev *subdev) 28gf100_bus_intr(struct nvkm_bus *bus)
29{ 29{
30 struct nvkm_bus *pbus = nvkm_bus(subdev); 30 struct nvkm_subdev *subdev = &bus->subdev;
31 u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140); 31 struct nvkm_device *device = subdev->device;
32 u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
32 33
33 if (stat & 0x0000000e) { 34 if (stat & 0x0000000e) {
34 u32 addr = nv_rd32(pbus, 0x009084); 35 u32 addr = nvkm_rd32(device, 0x009084);
35 u32 data = nv_rd32(pbus, 0x009088); 36 u32 data = nvkm_rd32(device, 0x009088);
36 37
37 nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n", 38 nvkm_error(subdev,
38 (addr & 0x00000002) ? "write" : "read", data, 39 "MMIO %s of %08x FAULT at %06x [ %s%s%s]\n",
39 (addr & 0x00fffffc), 40 (addr & 0x00000002) ? "write" : "read", data,
40 (stat & 0x00000002) ? "!ENGINE " : "", 41 (addr & 0x00fffffc),
41 (stat & 0x00000004) ? "IBUS " : "", 42 (stat & 0x00000002) ? "!ENGINE " : "",
42 (stat & 0x00000008) ? "TIMEOUT " : ""); 43 (stat & 0x00000004) ? "IBUS " : "",
44 (stat & 0x00000008) ? "TIMEOUT " : "");
43 45
44 nv_wr32(pbus, 0x009084, 0x00000000); 46 nvkm_wr32(device, 0x009084, 0x00000000);
45 nv_wr32(pbus, 0x001100, (stat & 0x0000000e)); 47 nvkm_wr32(device, 0x001100, (stat & 0x0000000e));
46 stat &= ~0x0000000e; 48 stat &= ~0x0000000e;
47 } 49 }
48 50
49 if (stat) { 51 if (stat) {
50 nv_error(pbus, "unknown intr 0x%08x\n", stat); 52 nvkm_error(subdev, "intr %08x\n", stat);
51 nv_mask(pbus, 0x001140, stat, 0x00000000); 53 nvkm_mask(device, 0x001140, stat, 0x00000000);
52 } 54 }
53} 55}
54 56
55static int 57static void
56gf100_bus_init(struct nvkm_object *object) 58gf100_bus_init(struct nvkm_bus *bus)
57{ 59{
58 struct nv04_bus_priv *priv = (void *)object; 60 struct nvkm_device *device = bus->subdev.device;
59 int ret; 61 nvkm_wr32(device, 0x001100, 0xffffffff);
60 62 nvkm_wr32(device, 0x001140, 0x0000000e);
61 ret = nvkm_bus_init(&priv->base);
62 if (ret)
63 return ret;
64
65 nv_wr32(priv, 0x001100, 0xffffffff);
66 nv_wr32(priv, 0x001140, 0x0000000e);
67 return 0;
68} 63}
69 64
70struct nvkm_oclass * 65static const struct nvkm_bus_func
71gf100_bus_oclass = &(struct nv04_bus_impl) { 66gf100_bus = {
72 .base.handle = NV_SUBDEV(BUS, 0xc0), 67 .init = gf100_bus_init,
73 .base.ofuncs = &(struct nvkm_ofuncs) {
74 .ctor = nv04_bus_ctor,
75 .dtor = _nvkm_bus_dtor,
76 .init = gf100_bus_init,
77 .fini = _nvkm_bus_fini,
78 },
79 .intr = gf100_bus_intr, 68 .intr = gf100_bus_intr,
80}.base; 69};
70
71int
72gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
73{
74 return nvkm_bus_new_(&gf100_bus, device, index, pbus);
75}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index 7622b41619a0..79f1cf513b36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -21,10 +21,10 @@
21 * 21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include <subdev/bus.h> 24#include "priv.h"
25 25
26struct nvkm_hwsq { 26struct nvkm_hwsq {
27 struct nvkm_bus *pbus; 27 struct nvkm_subdev *subdev;
28 u32 addr; 28 u32 addr;
29 u32 data; 29 u32 data;
30 struct { 30 struct {
@@ -41,13 +41,13 @@ hwsq_cmd(struct nvkm_hwsq *hwsq, int size, u8 data[])
41} 41}
42 42
43int 43int
44nvkm_hwsq_init(struct nvkm_bus *pbus, struct nvkm_hwsq **phwsq) 44nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq)
45{ 45{
46 struct nvkm_hwsq *hwsq; 46 struct nvkm_hwsq *hwsq;
47 47
48 hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL); 48 hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
49 if (hwsq) { 49 if (hwsq) {
50 hwsq->pbus = pbus; 50 hwsq->subdev = subdev;
51 hwsq->addr = ~0; 51 hwsq->addr = ~0;
52 hwsq->data = ~0; 52 hwsq->data = ~0;
53 memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data)); 53 memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
@@ -63,21 +63,23 @@ nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
63 struct nvkm_hwsq *hwsq = *phwsq; 63 struct nvkm_hwsq *hwsq = *phwsq;
64 int ret = 0, i; 64 int ret = 0, i;
65 if (hwsq) { 65 if (hwsq) {
66 struct nvkm_bus *pbus = hwsq->pbus; 66 struct nvkm_subdev *subdev = hwsq->subdev;
67 struct nvkm_bus *bus = subdev->device->bus;
67 hwsq->c.size = (hwsq->c.size + 4) / 4; 68 hwsq->c.size = (hwsq->c.size + 4) / 4;
68 if (hwsq->c.size <= pbus->hwsq_size) { 69 if (hwsq->c.size <= bus->func->hwsq_size) {
69 if (exec) 70 if (exec)
70 ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data, 71 ret = bus->func->hwsq_exec(bus,
71 hwsq->c.size); 72 (u32 *)hwsq->c.data,
73 hwsq->c.size);
72 if (ret) 74 if (ret)
73 nv_error(pbus, "hwsq exec failed: %d\n", ret); 75 nvkm_error(subdev, "hwsq exec failed: %d\n", ret);
74 } else { 76 } else {
75 nv_error(pbus, "hwsq ucode too large\n"); 77 nvkm_error(subdev, "hwsq ucode too large\n");
76 ret = -ENOSPC; 78 ret = -ENOSPC;
77 } 79 }
78 80
79 for (i = 0; ret && i < hwsq->c.size; i++) 81 for (i = 0; ret && i < hwsq->c.size; i++)
80 nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]); 82 nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]);
81 83
82 *phwsq = NULL; 84 *phwsq = NULL;
83 kfree(hwsq); 85 kfree(hwsq);
@@ -88,7 +90,7 @@ nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
88void 90void
89nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data) 91nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
90{ 92{
91 nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data); 93 nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data);
92 94
93 if (hwsq->data != data) { 95 if (hwsq->data != data) {
94 if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) { 96 if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
@@ -113,7 +115,7 @@ nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
113void 115void
114nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data) 116nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
115{ 117{
116 nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data); 118 nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data);
117 flag += 0x80; 119 flag += 0x80;
118 if (data >= 0) 120 if (data >= 0)
119 flag += 0x20; 121 flag += 0x20;
@@ -125,7 +127,7 @@ nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
125void 127void
126nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data) 128nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
127{ 129{
128 nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data); 130 nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data);
129 hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data }); 131 hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
130} 132}
131 133
@@ -138,6 +140,6 @@ nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
138 shift++; 140 shift++;
139 } 141 }
140 142
141 nv_debug(hwsq->pbus, " DELAY = %d ns\n", nsec); 143 nvkm_debug(hwsq->subdev, " DELAY = %d ns\n", nsec);
142 hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec }); 144 hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
143} 145}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index ebf709c27e3a..8117ec5a1468 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -59,10 +59,9 @@ hwsq_reg(u32 addr)
59static inline int 59static inline int
60hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev) 60hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
61{ 61{
62 struct nvkm_bus *pbus = nvkm_bus(subdev);
63 int ret; 62 int ret;
64 63
65 ret = nvkm_hwsq_init(pbus, &ram->hwsq); 64 ret = nvkm_hwsq_init(subdev, &ram->hwsq);
66 if (ret) 65 if (ret)
67 return ret; 66 return ret;
68 67
@@ -85,8 +84,9 @@ hwsq_exec(struct hwsq *ram, bool exec)
85static inline u32 84static inline u32
86hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg) 85hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
87{ 86{
87 struct nvkm_device *device = ram->subdev->device;
88 if (reg->sequence != ram->sequence) 88 if (reg->sequence != ram->sequence)
89 reg->data = nv_rd32(ram->subdev, reg->addr); 89 reg->data = nvkm_rd32(device, reg->addr);
90 return reg->data; 90 return reg->data;
91} 91}
92 92
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 19c8e50eeff7..c80b96789c31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -22,73 +22,55 @@
22 * Authors: Martin Peres <martin.peres@labri.fr> 22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25#include "nv04.h" 25#include "priv.h"
26
27#include <subdev/gpio.h>
28
29#include <subdev/gpio.h>
26 30
27static void 31static void
28nv04_bus_intr(struct nvkm_subdev *subdev) 32nv04_bus_intr(struct nvkm_bus *bus)
29{ 33{
30 struct nvkm_bus *pbus = nvkm_bus(subdev); 34 struct nvkm_subdev *subdev = &bus->subdev;
31 u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140); 35 struct nvkm_device *device = subdev->device;
36 u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
32 37
33 if (stat & 0x00000001) { 38 if (stat & 0x00000001) {
34 nv_error(pbus, "BUS ERROR\n"); 39 nvkm_error(subdev, "BUS ERROR\n");
35 stat &= ~0x00000001; 40 stat &= ~0x00000001;
36 nv_wr32(pbus, 0x001100, 0x00000001); 41 nvkm_wr32(device, 0x001100, 0x00000001);
37 } 42 }
38 43
39 if (stat & 0x00000110) { 44 if (stat & 0x00000110) {
40 subdev = nvkm_subdev(subdev, NVDEV_SUBDEV_GPIO); 45 struct nvkm_gpio *gpio = device->gpio;
41 if (subdev && subdev->intr) 46 if (gpio)
42 subdev->intr(subdev); 47 nvkm_subdev_intr(&gpio->subdev);
43 stat &= ~0x00000110; 48 stat &= ~0x00000110;
44 nv_wr32(pbus, 0x001100, 0x00000110); 49 nvkm_wr32(device, 0x001100, 0x00000110);
45 } 50 }
46 51
47 if (stat) { 52 if (stat) {
48 nv_error(pbus, "unknown intr 0x%08x\n", stat); 53 nvkm_error(subdev, "intr %08x\n", stat);
49 nv_mask(pbus, 0x001140, stat, 0x00000000); 54 nvkm_mask(device, 0x001140, stat, 0x00000000);
50 } 55 }
51} 56}
52 57
53static int 58static void
54nv04_bus_init(struct nvkm_object *object) 59nv04_bus_init(struct nvkm_bus *bus)
55{ 60{
56 struct nv04_bus_priv *priv = (void *)object; 61 struct nvkm_device *device = bus->subdev.device;
57 62 nvkm_wr32(device, 0x001100, 0xffffffff);
58 nv_wr32(priv, 0x001100, 0xffffffff); 63 nvkm_wr32(device, 0x001140, 0x00000111);
59 nv_wr32(priv, 0x001140, 0x00000111);
60
61 return nvkm_bus_init(&priv->base);
62} 64}
63 65
66static const struct nvkm_bus_func
67nv04_bus = {
68 .init = nv04_bus_init,
69 .intr = nv04_bus_intr,
70};
71
64int 72int
65nv04_bus_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 73nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
66 struct nvkm_oclass *oclass, void *data, u32 size,
67 struct nvkm_object **pobject)
68{ 74{
69 struct nv04_bus_impl *impl = (void *)oclass; 75 return nvkm_bus_new_(&nv04_bus, device, index, pbus);
70 struct nv04_bus_priv *priv;
71 int ret;
72
73 ret = nvkm_bus_create(parent, engine, oclass, &priv);
74 *pobject = nv_object(priv);
75 if (ret)
76 return ret;
77
78 nv_subdev(priv)->intr = impl->intr;
79 priv->base.hwsq_exec = impl->hwsq_exec;
80 priv->base.hwsq_size = impl->hwsq_size;
81 return 0;
82} 76}
83
84struct nvkm_oclass *
85nv04_bus_oclass = &(struct nv04_bus_impl) {
86 .base.handle = NV_SUBDEV(BUS, 0x04),
87 .base.ofuncs = &(struct nvkm_ofuncs) {
88 .ctor = nv04_bus_ctor,
89 .dtor = _nvkm_bus_dtor,
90 .init = nv04_bus_init,
91 .fini = _nvkm_bus_fini,
92 },
93 .intr = nv04_bus_intr,
94}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
deleted file mode 100644
index 3ddc8f91b1e3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef __NVKM_BUS_NV04_H__
2#define __NVKM_BUS_NV04_H__
3#include <subdev/bus.h>
4
5struct nv04_bus_priv {
6 struct nvkm_bus base;
7};
8
9int nv04_bus_ctor(struct nvkm_object *, struct nvkm_object *,
10 struct nvkm_oclass *, void *, u32,
11 struct nvkm_object **);
12int nv50_bus_init(struct nvkm_object *);
13void nv50_bus_intr(struct nvkm_subdev *);
14
15struct nv04_bus_impl {
16 struct nvkm_oclass base;
17 void (*intr)(struct nvkm_subdev *);
18 int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
19 u32 hwsq_size;
20};
21#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index c5739bce8052..5153d89e1f0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -22,70 +22,67 @@
22 * Authors: Martin Peres <martin.peres@labri.fr> 22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25#include "nv04.h" 25#include "priv.h"
26
27#include <subdev/gpio.h>
28#include <subdev/therm.h>
26 29
27static void 30static void
28nv31_bus_intr(struct nvkm_subdev *subdev) 31nv31_bus_intr(struct nvkm_bus *bus)
29{ 32{
30 struct nvkm_bus *pbus = nvkm_bus(subdev); 33 struct nvkm_subdev *subdev = &bus->subdev;
31 u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140); 34 struct nvkm_device *device = subdev->device;
32 u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144); 35 u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
36 u32 gpio = nvkm_rd32(device, 0x001104) & nvkm_rd32(device, 0x001144);
33 37
34 if (gpio) { 38 if (gpio) {
35 subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_GPIO); 39 struct nvkm_gpio *gpio = device->gpio;
36 if (subdev && subdev->intr) 40 if (gpio)
37 subdev->intr(subdev); 41 nvkm_subdev_intr(&gpio->subdev);
38 } 42 }
39 43
40 if (stat & 0x00000008) { /* NV41- */ 44 if (stat & 0x00000008) { /* NV41- */
41 u32 addr = nv_rd32(pbus, 0x009084); 45 u32 addr = nvkm_rd32(device, 0x009084);
42 u32 data = nv_rd32(pbus, 0x009088); 46 u32 data = nvkm_rd32(device, 0x009088);
43 47
44 nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n", 48 nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
45 (addr & 0x00000002) ? "write" : "read", data, 49 (addr & 0x00000002) ? "write" : "read", data,
46 (addr & 0x00fffffc)); 50 (addr & 0x00fffffc));
47 51
48 stat &= ~0x00000008; 52 stat &= ~0x00000008;
49 nv_wr32(pbus, 0x001100, 0x00000008); 53 nvkm_wr32(device, 0x001100, 0x00000008);
50 } 54 }
51 55
52 if (stat & 0x00070000) { 56 if (stat & 0x00070000) {
53 subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM); 57 struct nvkm_therm *therm = device->therm;
54 if (subdev && subdev->intr) 58 if (therm)
55 subdev->intr(subdev); 59 nvkm_subdev_intr(&therm->subdev);
56 stat &= ~0x00070000; 60 stat &= ~0x00070000;
57 nv_wr32(pbus, 0x001100, 0x00070000); 61 nvkm_wr32(device, 0x001100, 0x00070000);
58 } 62 }
59 63
60 if (stat) { 64 if (stat) {
61 nv_error(pbus, "unknown intr 0x%08x\n", stat); 65 nvkm_error(subdev, "intr %08x\n", stat);
62 nv_mask(pbus, 0x001140, stat, 0x00000000); 66 nvkm_mask(device, 0x001140, stat, 0x00000000);
63 } 67 }
64} 68}
65 69
66static int 70static void
67nv31_bus_init(struct nvkm_object *object) 71nv31_bus_init(struct nvkm_bus *bus)
68{ 72{
69 struct nv04_bus_priv *priv = (void *)object; 73 struct nvkm_device *device = bus->subdev.device;
70 int ret; 74 nvkm_wr32(device, 0x001100, 0xffffffff);
71 75 nvkm_wr32(device, 0x001140, 0x00070008);
72 ret = nvkm_bus_init(&priv->base);
73 if (ret)
74 return ret;
75
76 nv_wr32(priv, 0x001100, 0xffffffff);
77 nv_wr32(priv, 0x001140, 0x00070008);
78 return 0;
79} 76}
80 77
81struct nvkm_oclass * 78static const struct nvkm_bus_func
82nv31_bus_oclass = &(struct nv04_bus_impl) { 79nv31_bus = {
83 .base.handle = NV_SUBDEV(BUS, 0x31), 80 .init = nv31_bus_init,
84 .base.ofuncs = &(struct nvkm_ofuncs) {
85 .ctor = nv04_bus_ctor,
86 .dtor = _nvkm_bus_dtor,
87 .init = nv31_bus_init,
88 .fini = _nvkm_bus_fini,
89 },
90 .intr = nv31_bus_intr, 81 .intr = nv31_bus_intr,
91}.base; 82};
83
84int
85nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
86{
87 return nvkm_bus_new_(&nv31_bus, device, index, pbus);
88}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 1987863d71ee..19e10fdc9291 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -22,83 +22,84 @@
22 * Authors: Martin Peres <martin.peres@labri.fr> 22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs 23 * Ben Skeggs
24 */ 24 */
25#include "nv04.h" 25#include "priv.h"
26 26
27#include <subdev/therm.h>
27#include <subdev/timer.h> 28#include <subdev/timer.h>
28 29
29static int 30static int
30nv50_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size) 31nv50_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
31{ 32{
32 struct nv50_bus_priv *priv = (void *)pbus; 33 struct nvkm_device *device = bus->subdev.device;
33 int i; 34 int i;
34 35
35 nv_mask(pbus, 0x001098, 0x00000008, 0x00000000); 36 nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
36 nv_wr32(pbus, 0x001304, 0x00000000); 37 nvkm_wr32(device, 0x001304, 0x00000000);
37 for (i = 0; i < size; i++) 38 for (i = 0; i < size; i++)
38 nv_wr32(priv, 0x001400 + (i * 4), data[i]); 39 nvkm_wr32(device, 0x001400 + (i * 4), data[i]);
39 nv_mask(pbus, 0x001098, 0x00000018, 0x00000018); 40 nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
40 nv_wr32(pbus, 0x00130c, 0x00000003); 41 nvkm_wr32(device, 0x00130c, 0x00000003);
41 42
42 return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT; 43 if (nvkm_msec(device, 2000,
44 if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
45 break;
46 ) < 0)
47 return -ETIMEDOUT;
48
49 return 0;
43} 50}
44 51
45void 52void
46nv50_bus_intr(struct nvkm_subdev *subdev) 53nv50_bus_intr(struct nvkm_bus *bus)
47{ 54{
48 struct nvkm_bus *pbus = nvkm_bus(subdev); 55 struct nvkm_subdev *subdev = &bus->subdev;
49 u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140); 56 struct nvkm_device *device = subdev->device;
57 u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
50 58
51 if (stat & 0x00000008) { 59 if (stat & 0x00000008) {
52 u32 addr = nv_rd32(pbus, 0x009084); 60 u32 addr = nvkm_rd32(device, 0x009084);
53 u32 data = nv_rd32(pbus, 0x009088); 61 u32 data = nvkm_rd32(device, 0x009088);
54 62
55 nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n", 63 nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
56 (addr & 0x00000002) ? "write" : "read", data, 64 (addr & 0x00000002) ? "write" : "read", data,
57 (addr & 0x00fffffc)); 65 (addr & 0x00fffffc));
58 66
59 stat &= ~0x00000008; 67 stat &= ~0x00000008;
60 nv_wr32(pbus, 0x001100, 0x00000008); 68 nvkm_wr32(device, 0x001100, 0x00000008);
61 } 69 }
62 70
63 if (stat & 0x00010000) { 71 if (stat & 0x00010000) {
64 subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM); 72 struct nvkm_therm *therm = device->therm;
65 if (subdev && subdev->intr) 73 if (therm)
66 subdev->intr(subdev); 74 nvkm_subdev_intr(&therm->subdev);
67 stat &= ~0x00010000; 75 stat &= ~0x00010000;
68 nv_wr32(pbus, 0x001100, 0x00010000); 76 nvkm_wr32(device, 0x001100, 0x00010000);
69 } 77 }
70 78
71 if (stat) { 79 if (stat) {
72 nv_error(pbus, "unknown intr 0x%08x\n", stat); 80 nvkm_error(subdev, "intr %08x\n", stat);
73 nv_mask(pbus, 0x001140, stat, 0); 81 nvkm_mask(device, 0x001140, stat, 0);
74 } 82 }
75} 83}
76 84
77int 85void
78nv50_bus_init(struct nvkm_object *object) 86nv50_bus_init(struct nvkm_bus *bus)
79{ 87{
80 struct nv04_bus_priv *priv = (void *)object; 88 struct nvkm_device *device = bus->subdev.device;
81 int ret; 89 nvkm_wr32(device, 0x001100, 0xffffffff);
82 90 nvkm_wr32(device, 0x001140, 0x00010008);
83 ret = nvkm_bus_init(&priv->base);
84 if (ret)
85 return ret;
86
87 nv_wr32(priv, 0x001100, 0xffffffff);
88 nv_wr32(priv, 0x001140, 0x00010008);
89 return 0;
90} 91}
91 92
92struct nvkm_oclass * 93static const struct nvkm_bus_func
93nv50_bus_oclass = &(struct nv04_bus_impl) { 94nv50_bus = {
94 .base.handle = NV_SUBDEV(BUS, 0x50), 95 .init = nv50_bus_init,
95 .base.ofuncs = &(struct nvkm_ofuncs) {
96 .ctor = nv04_bus_ctor,
97 .dtor = _nvkm_bus_dtor,
98 .init = nv50_bus_init,
99 .fini = _nvkm_bus_fini,
100 },
101 .intr = nv50_bus_intr, 96 .intr = nv50_bus_intr,
102 .hwsq_exec = nv50_bus_hwsq_exec, 97 .hwsq_exec = nv50_bus_hwsq_exec,
103 .hwsq_size = 64, 98 .hwsq_size = 64,
104}.base; 99};
100
101int
102nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
103{
104 return nvkm_bus_new_(&nv50_bus, device, index, pbus);
105}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
new file mode 100644
index 000000000000..a130f2c642d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -0,0 +1,18 @@
1#ifndef __NVKM_BUS_PRIV_H__
2#define __NVKM_BUS_PRIV_H__
3#define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
4#include <subdev/bus.h>
5
6struct nvkm_bus_func {
7 void (*init)(struct nvkm_bus *);
8 void (*intr)(struct nvkm_bus *);
9 int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
10 u32 hwsq_size;
11};
12
13int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int,
14 struct nvkm_bus **);
15
16void nv50_bus_init(struct nvkm_bus *);
17void nv50_bus_intr(struct nvkm_bus *);
18#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index 9c2f688c9602..ed7717bcc3a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -8,5 +8,6 @@ nvkm-y += nvkm/subdev/clk/mcp77.o
8nvkm-y += nvkm/subdev/clk/gf100.o 8nvkm-y += nvkm/subdev/clk/gf100.o
9nvkm-y += nvkm/subdev/clk/gk104.o 9nvkm-y += nvkm/subdev/clk/gk104.o
10nvkm-y += nvkm/subdev/clk/gk20a.o 10nvkm-y += nvkm/subdev/clk/gk20a.o
11
11nvkm-y += nvkm/subdev/clk/pllnv04.o 12nvkm-y += nvkm/subdev/clk/pllnv04.o
12nvkm-y += nvkm/subdev/clk/pllgt215.o 13nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 39a83d82e0cd..dc8682c91cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -21,7 +21,8 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/clk.h> 24#include "priv.h"
25
25#include <subdev/bios.h> 26#include <subdev/bios.h>
26#include <subdev/bios/boost.h> 27#include <subdev/bios/boost.h>
27#include <subdev/bios/cstep.h> 28#include <subdev/bios/cstep.h>
@@ -30,7 +31,6 @@
30#include <subdev/therm.h> 31#include <subdev/therm.h>
31#include <subdev/volt.h> 32#include <subdev/volt.h>
32 33
33#include <core/device.h>
34#include <core/option.h> 34#include <core/option.h>
35 35
36/****************************************************************************** 36/******************************************************************************
@@ -40,7 +40,7 @@ static u32
40nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust, 40nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
41 u8 pstate, u8 domain, u32 input) 41 u8 pstate, u8 domain, u32 input)
42{ 42{
43 struct nvkm_bios *bios = nvkm_bios(clk); 43 struct nvkm_bios *bios = clk->subdev.device->bios;
44 struct nvbios_boostE boostE; 44 struct nvbios_boostE boostE;
45 u8 ver, hdr, cnt, len; 45 u8 ver, hdr, cnt, len;
46 u16 data; 46 u16 data;
@@ -77,8 +77,10 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
77static int 77static int
78nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) 78nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
79{ 79{
80 struct nvkm_therm *ptherm = nvkm_therm(clk); 80 struct nvkm_subdev *subdev = &clk->subdev;
81 struct nvkm_volt *volt = nvkm_volt(clk); 81 struct nvkm_device *device = subdev->device;
82 struct nvkm_therm *therm = device->therm;
83 struct nvkm_volt *volt = device->volt;
82 struct nvkm_cstate *cstate; 84 struct nvkm_cstate *cstate;
83 int ret; 85 int ret;
84 86
@@ -88,41 +90,41 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
88 cstate = &pstate->base; 90 cstate = &pstate->base;
89 } 91 }
90 92
91 if (ptherm) { 93 if (therm) {
92 ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, +1); 94 ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
93 if (ret && ret != -ENODEV) { 95 if (ret && ret != -ENODEV) {
94 nv_error(clk, "failed to raise fan speed: %d\n", ret); 96 nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
95 return ret; 97 return ret;
96 } 98 }
97 } 99 }
98 100
99 if (volt) { 101 if (volt) {
100 ret = volt->set_id(volt, cstate->voltage, +1); 102 ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
101 if (ret && ret != -ENODEV) { 103 if (ret && ret != -ENODEV) {
102 nv_error(clk, "failed to raise voltage: %d\n", ret); 104 nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
103 return ret; 105 return ret;
104 } 106 }
105 } 107 }
106 108
107 ret = clk->calc(clk, cstate); 109 ret = clk->func->calc(clk, cstate);
108 if (ret == 0) { 110 if (ret == 0) {
109 ret = clk->prog(clk); 111 ret = clk->func->prog(clk);
110 clk->tidy(clk); 112 clk->func->tidy(clk);
111 } 113 }
112 114
113 if (volt) { 115 if (volt) {
114 ret = volt->set_id(volt, cstate->voltage, -1); 116 ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
115 if (ret && ret != -ENODEV) 117 if (ret && ret != -ENODEV)
116 nv_error(clk, "failed to lower voltage: %d\n", ret); 118 nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
117 } 119 }
118 120
119 if (ptherm) { 121 if (therm) {
120 ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, -1); 122 ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
121 if (ret && ret != -ENODEV) 123 if (ret && ret != -ENODEV)
122 nv_error(clk, "failed to lower fan speed: %d\n", ret); 124 nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
123 } 125 }
124 126
125 return 0; 127 return ret;
126} 128}
127 129
128static void 130static void
@@ -135,8 +137,8 @@ nvkm_cstate_del(struct nvkm_cstate *cstate)
135static int 137static int
136nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate) 138nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
137{ 139{
138 struct nvkm_bios *bios = nvkm_bios(clk); 140 struct nvkm_bios *bios = clk->subdev.device->bios;
139 struct nvkm_domain *domain = clk->domains; 141 const struct nvkm_domain *domain = clk->domains;
140 struct nvkm_cstate *cstate = NULL; 142 struct nvkm_cstate *cstate = NULL;
141 struct nvbios_cstepX cstepX; 143 struct nvbios_cstepX cstepX;
142 u8 ver, hdr; 144 u8 ver, hdr;
@@ -172,7 +174,8 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
172static int 174static int
173nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei) 175nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
174{ 176{
175 struct nvkm_fb *pfb = nvkm_fb(clk); 177 struct nvkm_subdev *subdev = &clk->subdev;
178 struct nvkm_ram *ram = subdev->device->fb->ram;
176 struct nvkm_pstate *pstate; 179 struct nvkm_pstate *pstate;
177 int ret, idx = 0; 180 int ret, idx = 0;
178 181
@@ -181,17 +184,17 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
181 break; 184 break;
182 } 185 }
183 186
184 nv_debug(clk, "setting performance state %d\n", pstatei); 187 nvkm_debug(subdev, "setting performance state %d\n", pstatei);
185 clk->pstate = pstatei; 188 clk->pstate = pstatei;
186 189
187 if (pfb->ram && pfb->ram->calc) { 190 if (ram && ram->func->calc) {
188 int khz = pstate->base.domain[nv_clk_src_mem]; 191 int khz = pstate->base.domain[nv_clk_src_mem];
189 do { 192 do {
190 ret = pfb->ram->calc(pfb, khz); 193 ret = ram->func->calc(ram, khz);
191 if (ret == 0) 194 if (ret == 0)
192 ret = pfb->ram->prog(pfb); 195 ret = ram->func->prog(ram);
193 } while (ret > 0); 196 } while (ret > 0);
194 pfb->ram->tidy(pfb); 197 ram->func->tidy(ram);
195 } 198 }
196 199
197 return nvkm_cstate_prog(clk, pstate, 0); 200 return nvkm_cstate_prog(clk, pstate, 0);
@@ -201,31 +204,32 @@ static void
201nvkm_pstate_work(struct work_struct *work) 204nvkm_pstate_work(struct work_struct *work)
202{ 205{
203 struct nvkm_clk *clk = container_of(work, typeof(*clk), work); 206 struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
207 struct nvkm_subdev *subdev = &clk->subdev;
204 int pstate; 208 int pstate;
205 209
206 if (!atomic_xchg(&clk->waiting, 0)) 210 if (!atomic_xchg(&clk->waiting, 0))
207 return; 211 return;
208 clk->pwrsrc = power_supply_is_system_supplied(); 212 clk->pwrsrc = power_supply_is_system_supplied();
209 213
210 nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n", 214 nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
211 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc, 215 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
212 clk->astate, clk->tstate, clk->dstate); 216 clk->astate, clk->tstate, clk->dstate);
213 217
214 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc; 218 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
215 if (clk->state_nr && pstate != -1) { 219 if (clk->state_nr && pstate != -1) {
216 pstate = (pstate < 0) ? clk->astate : pstate; 220 pstate = (pstate < 0) ? clk->astate : pstate;
217 pstate = min(pstate, clk->state_nr - 1 - clk->tstate); 221 pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
218 pstate = max(pstate, clk->dstate); 222 pstate = max(pstate, clk->dstate);
219 } else { 223 } else {
220 pstate = clk->pstate = -1; 224 pstate = clk->pstate = -1;
221 } 225 }
222 226
223 nv_trace(clk, "-> %d\n", pstate); 227 nvkm_trace(subdev, "-> %d\n", pstate);
224 if (pstate != clk->pstate) { 228 if (pstate != clk->pstate) {
225 int ret = nvkm_pstate_prog(clk, pstate); 229 int ret = nvkm_pstate_prog(clk, pstate);
226 if (ret) { 230 if (ret) {
227 nv_error(clk, "error setting pstate %d: %d\n", 231 nvkm_error(subdev, "error setting pstate %d: %d\n",
228 pstate, ret); 232 pstate, ret);
229 } 233 }
230 } 234 }
231 235
@@ -246,8 +250,9 @@ nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
246static void 250static void
247nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate) 251nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
248{ 252{
249 struct nvkm_domain *clock = clk->domains - 1; 253 const struct nvkm_domain *clock = clk->domains - 1;
250 struct nvkm_cstate *cstate; 254 struct nvkm_cstate *cstate;
255 struct nvkm_subdev *subdev = &clk->subdev;
251 char info[3][32] = { "", "", "" }; 256 char info[3][32] = { "", "", "" };
252 char name[4] = "--"; 257 char name[4] = "--";
253 int i = -1; 258 int i = -1;
@@ -261,12 +266,12 @@ nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
261 if (hi == 0) 266 if (hi == 0)
262 continue; 267 continue;
263 268
264 nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo); 269 nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
265 list_for_each_entry(cstate, &pstate->list, head) { 270 list_for_each_entry(cstate, &pstate->list, head) {
266 u32 freq = cstate->domain[clock->name]; 271 u32 freq = cstate->domain[clock->name];
267 lo = min(lo, freq); 272 lo = min(lo, freq);
268 hi = max(hi, freq); 273 hi = max(hi, freq);
269 nv_debug(clk, "%10d KHz\n", freq); 274 nvkm_debug(subdev, "%10d KHz\n", freq);
270 } 275 }
271 276
272 if (clock->mname && ++i < ARRAY_SIZE(info)) { 277 if (clock->mname && ++i < ARRAY_SIZE(info)) {
@@ -282,7 +287,7 @@ nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
282 } 287 }
283 } 288 }
284 289
285 nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]); 290 nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
286} 291}
287 292
288static void 293static void
@@ -301,8 +306,8 @@ nvkm_pstate_del(struct nvkm_pstate *pstate)
301static int 306static int
302nvkm_pstate_new(struct nvkm_clk *clk, int idx) 307nvkm_pstate_new(struct nvkm_clk *clk, int idx)
303{ 308{
304 struct nvkm_bios *bios = nvkm_bios(clk); 309 struct nvkm_bios *bios = clk->subdev.device->bios;
305 struct nvkm_domain *domain = clk->domains - 1; 310 const struct nvkm_domain *domain = clk->domains - 1;
306 struct nvkm_pstate *pstate; 311 struct nvkm_pstate *pstate;
307 struct nvkm_cstate *cstate; 312 struct nvkm_cstate *cstate;
308 struct nvbios_cstepE cstepE; 313 struct nvbios_cstepE cstepE;
@@ -471,32 +476,37 @@ nvkm_clk_pwrsrc(struct nvkm_notify *notify)
471 *****************************************************************************/ 476 *****************************************************************************/
472 477
473int 478int
474_nvkm_clk_fini(struct nvkm_object *object, bool suspend) 479nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
480{
481 return clk->func->read(clk, src);
482}
483
484static int
485nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
475{ 486{
476 struct nvkm_clk *clk = (void *)object; 487 struct nvkm_clk *clk = nvkm_clk(subdev);
477 nvkm_notify_put(&clk->pwrsrc_ntfy); 488 nvkm_notify_put(&clk->pwrsrc_ntfy);
478 return nvkm_subdev_fini(&clk->base, suspend); 489 flush_work(&clk->work);
490 if (clk->func->fini)
491 clk->func->fini(clk);
492 return 0;
479} 493}
480 494
481int 495static int
482_nvkm_clk_init(struct nvkm_object *object) 496nvkm_clk_init(struct nvkm_subdev *subdev)
483{ 497{
484 struct nvkm_clk *clk = (void *)object; 498 struct nvkm_clk *clk = nvkm_clk(subdev);
485 struct nvkm_domain *clock = clk->domains; 499 const struct nvkm_domain *clock = clk->domains;
486 int ret; 500 int ret;
487 501
488 ret = nvkm_subdev_init(&clk->base);
489 if (ret)
490 return ret;
491
492 memset(&clk->bstate, 0x00, sizeof(clk->bstate)); 502 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
493 INIT_LIST_HEAD(&clk->bstate.list); 503 INIT_LIST_HEAD(&clk->bstate.list);
494 clk->bstate.pstate = 0xff; 504 clk->bstate.pstate = 0xff;
495 505
496 while (clock->name != nv_clk_src_max) { 506 while (clock->name != nv_clk_src_max) {
497 ret = clk->read(clk, clock->name); 507 ret = nvkm_clk_read(clk, clock->name);
498 if (ret < 0) { 508 if (ret < 0) {
499 nv_error(clk, "%02x freq unknown\n", clock->name); 509 nvkm_error(subdev, "%02x freq unknown\n", clock->name);
500 return ret; 510 return ret;
501 } 511 }
502 clk->bstate.base.domain[clock->name] = ret; 512 clk->bstate.base.domain[clock->name] = ret;
@@ -505,6 +515,9 @@ _nvkm_clk_init(struct nvkm_object *object)
505 515
506 nvkm_pstate_info(clk, &clk->bstate); 516 nvkm_pstate_info(clk, &clk->bstate);
507 517
518 if (clk->func->init)
519 return clk->func->init(clk);
520
508 clk->astate = clk->state_nr - 1; 521 clk->astate = clk->state_nr - 1;
509 clk->tstate = 0; 522 clk->tstate = 0;
510 clk->dstate = 0; 523 clk->dstate = 0;
@@ -513,61 +526,63 @@ _nvkm_clk_init(struct nvkm_object *object)
513 return 0; 526 return 0;
514} 527}
515 528
516void 529static void *
517_nvkm_clk_dtor(struct nvkm_object *object) 530nvkm_clk_dtor(struct nvkm_subdev *subdev)
518{ 531{
519 struct nvkm_clk *clk = (void *)object; 532 struct nvkm_clk *clk = nvkm_clk(subdev);
520 struct nvkm_pstate *pstate, *temp; 533 struct nvkm_pstate *pstate, *temp;
521 534
522 nvkm_notify_fini(&clk->pwrsrc_ntfy); 535 nvkm_notify_fini(&clk->pwrsrc_ntfy);
523 536
537 /* Early return if the pstates have been provided statically */
538 if (clk->func->pstates)
539 return clk;
540
524 list_for_each_entry_safe(pstate, temp, &clk->states, head) { 541 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
525 nvkm_pstate_del(pstate); 542 nvkm_pstate_del(pstate);
526 } 543 }
527 544
528 nvkm_subdev_destroy(&clk->base); 545 return clk;
529} 546}
530 547
548static const struct nvkm_subdev_func
549nvkm_clk = {
550 .dtor = nvkm_clk_dtor,
551 .init = nvkm_clk_init,
552 .fini = nvkm_clk_fini,
553};
554
531int 555int
532nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine, 556nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
533 struct nvkm_oclass *oclass, struct nvkm_domain *clocks, 557 int index, bool allow_reclock, struct nvkm_clk *clk)
534 struct nvkm_pstate *pstates, int nb_pstates,
535 bool allow_reclock, int length, void **object)
536{ 558{
537 struct nvkm_device *device = nv_device(parent);
538 struct nvkm_clk *clk;
539 int ret, idx, arglen; 559 int ret, idx, arglen;
540 const char *mode; 560 const char *mode;
541 561
542 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "CLK", 562 nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
543 "clock", length, object); 563 clk->func = func;
544 clk = *object;
545 if (ret)
546 return ret;
547
548 INIT_LIST_HEAD(&clk->states); 564 INIT_LIST_HEAD(&clk->states);
549 clk->domains = clocks; 565 clk->domains = func->domains;
550 clk->ustate_ac = -1; 566 clk->ustate_ac = -1;
551 clk->ustate_dc = -1; 567 clk->ustate_dc = -1;
568 clk->allow_reclock = allow_reclock;
552 569
553 INIT_WORK(&clk->work, nvkm_pstate_work); 570 INIT_WORK(&clk->work, nvkm_pstate_work);
554 init_waitqueue_head(&clk->wait); 571 init_waitqueue_head(&clk->wait);
555 atomic_set(&clk->waiting, 0); 572 atomic_set(&clk->waiting, 0);
556 573
557 /* If no pstates are provided, try and fetch them from the BIOS */ 574 /* If no pstates are provided, try and fetch them from the BIOS */
558 if (!pstates) { 575 if (!func->pstates) {
559 idx = 0; 576 idx = 0;
560 do { 577 do {
561 ret = nvkm_pstate_new(clk, idx++); 578 ret = nvkm_pstate_new(clk, idx++);
562 } while (ret == 0); 579 } while (ret == 0);
563 } else { 580 } else {
564 for (idx = 0; idx < nb_pstates; idx++) 581 for (idx = 0; idx < func->nr_pstates; idx++)
565 list_add_tail(&pstates[idx].head, &clk->states); 582 list_add_tail(&func->pstates[idx].head, &clk->states);
566 clk->state_nr = nb_pstates; 583 clk->state_nr = func->nr_pstates;
567 } 584 }
568 585
569 clk->allow_reclock = allow_reclock;
570
571 ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true, 586 ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
572 NULL, 0, 0, &clk->pwrsrc_ntfy); 587 NULL, 0, 0, &clk->pwrsrc_ntfy);
573 if (ret) 588 if (ret)
@@ -589,3 +604,12 @@ nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
589 604
590 return 0; 605 return 0;
591} 606}
607
608int
609nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
610 int index, bool allow_reclock, struct nvkm_clk **pclk)
611{
612 if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
613 return -ENOMEM;
614 return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
615}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index 4c90b9769d64..347da9ee20f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -23,25 +23,26 @@
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25 25
26static struct nvkm_domain 26static const struct nvkm_clk_func
27g84_domains[] = { 27g84_clk = {
28 { nv_clk_src_crystal, 0xff }, 28 .read = nv50_clk_read,
29 { nv_clk_src_href , 0xff }, 29 .calc = nv50_clk_calc,
30 { nv_clk_src_core , 0xff, 0, "core", 1000 }, 30 .prog = nv50_clk_prog,
31 { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, 31 .tidy = nv50_clk_tidy,
32 { nv_clk_src_mem , 0xff, 0, "memory", 1000 }, 32 .domains = {
33 { nv_clk_src_vdec , 0xff }, 33 { nv_clk_src_crystal, 0xff },
34 { nv_clk_src_max } 34 { nv_clk_src_href , 0xff },
35 { nv_clk_src_core , 0xff, 0, "core", 1000 },
36 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
37 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
38 { nv_clk_src_vdec , 0xff },
39 { nv_clk_src_max }
40 }
35}; 41};
36 42
37struct nvkm_oclass * 43int
38g84_clk_oclass = &(struct nv50_clk_oclass) { 44g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
39 .base.handle = NV_SUBDEV(CLK, 0x84), 45{
40 .base.ofuncs = &(struct nvkm_ofuncs) { 46 return nv50_clk_new_(&g84_clk, device, index,
41 .ctor = nv50_clk_ctor, 47 (device->chipset == 0xa0), pclk);
42 .dtor = _nvkm_clk_dtor, 48}
43 .init = _nvkm_clk_init,
44 .fini = _nvkm_clk_fini,
45 },
46 .domains = g84_domains,
47}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 3d7330d54b02..a52b7e7fce41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -21,10 +21,10 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/clk.h> 24#define gf100_clk(p) container_of((p), struct gf100_clk, base)
25#include "priv.h"
25#include "pll.h" 26#include "pll.h"
26 27
27#include <core/device.h>
28#include <subdev/bios.h> 28#include <subdev/bios.h>
29#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
30#include <subdev/timer.h> 30#include <subdev/timer.h>
@@ -38,29 +38,29 @@ struct gf100_clk_info {
38 u32 coef; 38 u32 coef;
39}; 39};
40 40
41struct gf100_clk_priv { 41struct gf100_clk {
42 struct nvkm_clk base; 42 struct nvkm_clk base;
43 struct gf100_clk_info eng[16]; 43 struct gf100_clk_info eng[16];
44}; 44};
45 45
46static u32 read_div(struct gf100_clk_priv *, int, u32, u32); 46static u32 read_div(struct gf100_clk *, int, u32, u32);
47 47
48static u32 48static u32
49read_vco(struct gf100_clk_priv *priv, u32 dsrc) 49read_vco(struct gf100_clk *clk, u32 dsrc)
50{ 50{
51 struct nvkm_clk *clk = &priv->base; 51 struct nvkm_device *device = clk->base.subdev.device;
52 u32 ssrc = nv_rd32(priv, dsrc); 52 u32 ssrc = nvkm_rd32(device, dsrc);
53 if (!(ssrc & 0x00000100)) 53 if (!(ssrc & 0x00000100))
54 return clk->read(clk, nv_clk_src_sppll0); 54 return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
55 return clk->read(clk, nv_clk_src_sppll1); 55 return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
56} 56}
57 57
58static u32 58static u32
59read_pll(struct gf100_clk_priv *priv, u32 pll) 59read_pll(struct gf100_clk *clk, u32 pll)
60{ 60{
61 struct nvkm_clk *clk = &priv->base; 61 struct nvkm_device *device = clk->base.subdev.device;
62 u32 ctrl = nv_rd32(priv, pll + 0x00); 62 u32 ctrl = nvkm_rd32(device, pll + 0x00);
63 u32 coef = nv_rd32(priv, pll + 0x04); 63 u32 coef = nvkm_rd32(device, pll + 0x04);
64 u32 P = (coef & 0x003f0000) >> 16; 64 u32 P = (coef & 0x003f0000) >> 16;
65 u32 N = (coef & 0x0000ff00) >> 8; 65 u32 N = (coef & 0x0000ff00) >> 8;
66 u32 M = (coef & 0x000000ff) >> 0; 66 u32 M = (coef & 0x000000ff) >> 0;
@@ -72,20 +72,20 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
72 switch (pll) { 72 switch (pll) {
73 case 0x00e800: 73 case 0x00e800:
74 case 0x00e820: 74 case 0x00e820:
75 sclk = nv_device(priv)->crystal; 75 sclk = device->crystal;
76 P = 1; 76 P = 1;
77 break; 77 break;
78 case 0x132000: 78 case 0x132000:
79 sclk = clk->read(clk, nv_clk_src_mpllsrc); 79 sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
80 break; 80 break;
81 case 0x132020: 81 case 0x132020:
82 sclk = clk->read(clk, nv_clk_src_mpllsrcref); 82 sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
83 break; 83 break;
84 case 0x137000: 84 case 0x137000:
85 case 0x137020: 85 case 0x137020:
86 case 0x137040: 86 case 0x137040:
87 case 0x1370e0: 87 case 0x1370e0:
88 sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140); 88 sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
89 break; 89 break;
90 default: 90 default:
91 return 0; 91 return 0;
@@ -95,46 +95,48 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
95} 95}
96 96
97static u32 97static u32
98read_div(struct gf100_clk_priv *priv, int doff, u32 dsrc, u32 dctl) 98read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
99{ 99{
100 u32 ssrc = nv_rd32(priv, dsrc + (doff * 4)); 100 struct nvkm_device *device = clk->base.subdev.device;
101 u32 sctl = nv_rd32(priv, dctl + (doff * 4)); 101 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
102 u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
102 103
103 switch (ssrc & 0x00000003) { 104 switch (ssrc & 0x00000003) {
104 case 0: 105 case 0:
105 if ((ssrc & 0x00030000) != 0x00030000) 106 if ((ssrc & 0x00030000) != 0x00030000)
106 return nv_device(priv)->crystal; 107 return device->crystal;
107 return 108000; 108 return 108000;
108 case 2: 109 case 2:
109 return 100000; 110 return 100000;
110 case 3: 111 case 3:
111 if (sctl & 0x80000000) { 112 if (sctl & 0x80000000) {
112 u32 sclk = read_vco(priv, dsrc + (doff * 4)); 113 u32 sclk = read_vco(clk, dsrc + (doff * 4));
113 u32 sdiv = (sctl & 0x0000003f) + 2; 114 u32 sdiv = (sctl & 0x0000003f) + 2;
114 return (sclk * 2) / sdiv; 115 return (sclk * 2) / sdiv;
115 } 116 }
116 117
117 return read_vco(priv, dsrc + (doff * 4)); 118 return read_vco(clk, dsrc + (doff * 4));
118 default: 119 default:
119 return 0; 120 return 0;
120 } 121 }
121} 122}
122 123
123static u32 124static u32
124read_clk(struct gf100_clk_priv *priv, int clk) 125read_clk(struct gf100_clk *clk, int idx)
125{ 126{
126 u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4)); 127 struct nvkm_device *device = clk->base.subdev.device;
127 u32 ssel = nv_rd32(priv, 0x137100); 128 u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
129 u32 ssel = nvkm_rd32(device, 0x137100);
128 u32 sclk, sdiv; 130 u32 sclk, sdiv;
129 131
130 if (ssel & (1 << clk)) { 132 if (ssel & (1 << idx)) {
131 if (clk < 7) 133 if (idx < 7)
132 sclk = read_pll(priv, 0x137000 + (clk * 0x20)); 134 sclk = read_pll(clk, 0x137000 + (idx * 0x20));
133 else 135 else
134 sclk = read_pll(priv, 0x1370e0); 136 sclk = read_pll(clk, 0x1370e0);
135 sdiv = ((sctl & 0x00003f00) >> 8) + 2; 137 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
136 } else { 138 } else {
137 sclk = read_div(priv, clk, 0x137160, 0x1371d0); 139 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
138 sdiv = ((sctl & 0x0000003f) >> 0) + 2; 140 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
139 } 141 }
140 142
@@ -145,10 +147,11 @@ read_clk(struct gf100_clk_priv *priv, int clk)
145} 147}
146 148
147static int 149static int
148gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) 150gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
149{ 151{
150 struct nvkm_device *device = nv_device(clk); 152 struct gf100_clk *clk = gf100_clk(base);
151 struct gf100_clk_priv *priv = (void *)clk; 153 struct nvkm_subdev *subdev = &clk->base.subdev;
154 struct nvkm_device *device = subdev->device;
152 155
153 switch (src) { 156 switch (src) {
154 case nv_clk_src_crystal: 157 case nv_clk_src_crystal:
@@ -156,47 +159,47 @@ gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
156 case nv_clk_src_href: 159 case nv_clk_src_href:
157 return 100000; 160 return 100000;
158 case nv_clk_src_sppll0: 161 case nv_clk_src_sppll0:
159 return read_pll(priv, 0x00e800); 162 return read_pll(clk, 0x00e800);
160 case nv_clk_src_sppll1: 163 case nv_clk_src_sppll1:
161 return read_pll(priv, 0x00e820); 164 return read_pll(clk, 0x00e820);
162 165
163 case nv_clk_src_mpllsrcref: 166 case nv_clk_src_mpllsrcref:
164 return read_div(priv, 0, 0x137320, 0x137330); 167 return read_div(clk, 0, 0x137320, 0x137330);
165 case nv_clk_src_mpllsrc: 168 case nv_clk_src_mpllsrc:
166 return read_pll(priv, 0x132020); 169 return read_pll(clk, 0x132020);
167 case nv_clk_src_mpll: 170 case nv_clk_src_mpll:
168 return read_pll(priv, 0x132000); 171 return read_pll(clk, 0x132000);
169 case nv_clk_src_mdiv: 172 case nv_clk_src_mdiv:
170 return read_div(priv, 0, 0x137300, 0x137310); 173 return read_div(clk, 0, 0x137300, 0x137310);
171 case nv_clk_src_mem: 174 case nv_clk_src_mem:
172 if (nv_rd32(priv, 0x1373f0) & 0x00000002) 175 if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
173 return clk->read(clk, nv_clk_src_mpll); 176 return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
174 return clk->read(clk, nv_clk_src_mdiv); 177 return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
175 178
176 case nv_clk_src_gpc: 179 case nv_clk_src_gpc:
177 return read_clk(priv, 0x00); 180 return read_clk(clk, 0x00);
178 case nv_clk_src_rop: 181 case nv_clk_src_rop:
179 return read_clk(priv, 0x01); 182 return read_clk(clk, 0x01);
180 case nv_clk_src_hubk07: 183 case nv_clk_src_hubk07:
181 return read_clk(priv, 0x02); 184 return read_clk(clk, 0x02);
182 case nv_clk_src_hubk06: 185 case nv_clk_src_hubk06:
183 return read_clk(priv, 0x07); 186 return read_clk(clk, 0x07);
184 case nv_clk_src_hubk01: 187 case nv_clk_src_hubk01:
185 return read_clk(priv, 0x08); 188 return read_clk(clk, 0x08);
186 case nv_clk_src_copy: 189 case nv_clk_src_copy:
187 return read_clk(priv, 0x09); 190 return read_clk(clk, 0x09);
188 case nv_clk_src_daemon: 191 case nv_clk_src_daemon:
189 return read_clk(priv, 0x0c); 192 return read_clk(clk, 0x0c);
190 case nv_clk_src_vdec: 193 case nv_clk_src_vdec:
191 return read_clk(priv, 0x0e); 194 return read_clk(clk, 0x0e);
192 default: 195 default:
193 nv_error(clk, "invalid clock source %d\n", src); 196 nvkm_error(subdev, "invalid clock source %d\n", src);
194 return -EINVAL; 197 return -EINVAL;
195 } 198 }
196} 199}
197 200
198static u32 201static u32
199calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv) 202calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
200{ 203{
201 u32 div = min((ref * 2) / freq, (u32)65); 204 u32 div = min((ref * 2) / freq, (u32)65);
202 if (div < 2) 205 if (div < 2)
@@ -207,7 +210,7 @@ calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
207} 210}
208 211
209static u32 212static u32
210calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv) 213calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
211{ 214{
212 u32 sclk; 215 u32 sclk;
213 216
@@ -229,28 +232,29 @@ calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
229 } 232 }
230 233
231 /* otherwise, calculate the closest divider */ 234 /* otherwise, calculate the closest divider */
232 sclk = read_vco(priv, 0x137160 + (clk * 4)); 235 sclk = read_vco(clk, 0x137160 + (idx * 4));
233 if (clk < 7) 236 if (idx < 7)
234 sclk = calc_div(priv, clk, sclk, freq, ddiv); 237 sclk = calc_div(clk, idx, sclk, freq, ddiv);
235 return sclk; 238 return sclk;
236} 239}
237 240
238static u32 241static u32
239calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef) 242calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
240{ 243{
241 struct nvkm_bios *bios = nvkm_bios(priv); 244 struct nvkm_subdev *subdev = &clk->base.subdev;
245 struct nvkm_bios *bios = subdev->device->bios;
242 struct nvbios_pll limits; 246 struct nvbios_pll limits;
243 int N, M, P, ret; 247 int N, M, P, ret;
244 248
245 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits); 249 ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
246 if (ret) 250 if (ret)
247 return 0; 251 return 0;
248 252
249 limits.refclk = read_div(priv, clk, 0x137120, 0x137140); 253 limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
250 if (!limits.refclk) 254 if (!limits.refclk)
251 return 0; 255 return 0;
252 256
253 ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P); 257 ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
254 if (ret <= 0) 258 if (ret <= 0)
255 return 0; 259 return 0;
256 260
@@ -259,10 +263,9 @@ calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
259} 263}
260 264
261static int 265static int
262calc_clk(struct gf100_clk_priv *priv, 266calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
263 struct nvkm_cstate *cstate, int clk, int dom)
264{ 267{
265 struct gf100_clk_info *info = &priv->eng[clk]; 268 struct gf100_clk_info *info = &clk->eng[idx];
266 u32 freq = cstate->domain[dom]; 269 u32 freq = cstate->domain[dom];
267 u32 src0, div0, div1D, div1P = 0; 270 u32 src0, div0, div1D, div1P = 0;
268 u32 clk0, clk1 = 0; 271 u32 clk0, clk1 = 0;
@@ -272,16 +275,16 @@ calc_clk(struct gf100_clk_priv *priv,
272 return 0; 275 return 0;
273 276
274 /* first possible path, using only dividers */ 277 /* first possible path, using only dividers */
275 clk0 = calc_src(priv, clk, freq, &src0, &div0); 278 clk0 = calc_src(clk, idx, freq, &src0, &div0);
276 clk0 = calc_div(priv, clk, clk0, freq, &div1D); 279 clk0 = calc_div(clk, idx, clk0, freq, &div1D);
277 280
278 /* see if we can get any closer using PLLs */ 281 /* see if we can get any closer using PLLs */
279 if (clk0 != freq && (0x00004387 & (1 << clk))) { 282 if (clk0 != freq && (0x00004387 & (1 << idx))) {
280 if (clk <= 7) 283 if (idx <= 7)
281 clk1 = calc_pll(priv, clk, freq, &info->coef); 284 clk1 = calc_pll(clk, idx, freq, &info->coef);
282 else 285 else
283 clk1 = cstate->domain[nv_clk_src_hubk06]; 286 clk1 = cstate->domain[nv_clk_src_hubk06];
284 clk1 = calc_div(priv, clk, clk1, freq, &div1P); 287 clk1 = calc_div(clk, idx, clk1, freq, &div1P);
285 } 288 }
286 289
287 /* select the method which gets closest to target freq */ 290 /* select the method which gets closest to target freq */
@@ -303,7 +306,7 @@ calc_clk(struct gf100_clk_priv *priv,
303 info->mdiv |= 0x80000000; 306 info->mdiv |= 0x80000000;
304 info->mdiv |= div1P << 8; 307 info->mdiv |= div1P << 8;
305 } 308 }
306 info->ssel = (1 << clk); 309 info->ssel = (1 << idx);
307 info->freq = clk1; 310 info->freq = clk1;
308 } 311 }
309 312
@@ -311,81 +314,96 @@ calc_clk(struct gf100_clk_priv *priv,
311} 314}
312 315
313static int 316static int
314gf100_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate) 317gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
315{ 318{
316 struct gf100_clk_priv *priv = (void *)clk; 319 struct gf100_clk *clk = gf100_clk(base);
317 int ret; 320 int ret;
318 321
319 if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) || 322 if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
320 (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) || 323 (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
321 (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) || 324 (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
322 (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) || 325 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
323 (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) || 326 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
324 (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) || 327 (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
325 (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) || 328 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
326 (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec))) 329 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
327 return ret; 330 return ret;
328 331
329 return 0; 332 return 0;
330} 333}
331 334
332static void 335static void
333gf100_clk_prog_0(struct gf100_clk_priv *priv, int clk) 336gf100_clk_prog_0(struct gf100_clk *clk, int idx)
334{ 337{
335 struct gf100_clk_info *info = &priv->eng[clk]; 338 struct gf100_clk_info *info = &clk->eng[idx];
336 if (clk < 7 && !info->ssel) { 339 struct nvkm_device *device = clk->base.subdev.device;
337 nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv); 340 if (idx < 7 && !info->ssel) {
338 nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc); 341 nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
342 nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
339 } 343 }
340} 344}
341 345
342static void 346static void
343gf100_clk_prog_1(struct gf100_clk_priv *priv, int clk) 347gf100_clk_prog_1(struct gf100_clk *clk, int idx)
344{ 348{
345 nv_mask(priv, 0x137100, (1 << clk), 0x00000000); 349 struct nvkm_device *device = clk->base.subdev.device;
346 nv_wait(priv, 0x137100, (1 << clk), 0x00000000); 350 nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
351 nvkm_msec(device, 2000,
352 if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
353 break;
354 );
347} 355}
348 356
349static void 357static void
350gf100_clk_prog_2(struct gf100_clk_priv *priv, int clk) 358gf100_clk_prog_2(struct gf100_clk *clk, int idx)
351{ 359{
352 struct gf100_clk_info *info = &priv->eng[clk]; 360 struct gf100_clk_info *info = &clk->eng[idx];
353 const u32 addr = 0x137000 + (clk * 0x20); 361 struct nvkm_device *device = clk->base.subdev.device;
354 if (clk <= 7) { 362 const u32 addr = 0x137000 + (idx * 0x20);
355 nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000); 363 if (idx <= 7) {
356 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000); 364 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
365 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
357 if (info->coef) { 366 if (info->coef) {
358 nv_wr32(priv, addr + 0x04, info->coef); 367 nvkm_wr32(device, addr + 0x04, info->coef);
359 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001); 368 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
360 nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000); 369 nvkm_msec(device, 2000,
361 nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004); 370 if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
371 break;
372 );
373 nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
362 } 374 }
363 } 375 }
364} 376}
365 377
366static void 378static void
367gf100_clk_prog_3(struct gf100_clk_priv *priv, int clk) 379gf100_clk_prog_3(struct gf100_clk *clk, int idx)
368{ 380{
369 struct gf100_clk_info *info = &priv->eng[clk]; 381 struct gf100_clk_info *info = &clk->eng[idx];
382 struct nvkm_device *device = clk->base.subdev.device;
370 if (info->ssel) { 383 if (info->ssel) {
371 nv_mask(priv, 0x137100, (1 << clk), info->ssel); 384 nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
372 nv_wait(priv, 0x137100, (1 << clk), info->ssel); 385 nvkm_msec(device, 2000,
386 u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
387 if (tmp == info->ssel)
388 break;
389 );
373 } 390 }
374} 391}
375 392
376static void 393static void
377gf100_clk_prog_4(struct gf100_clk_priv *priv, int clk) 394gf100_clk_prog_4(struct gf100_clk *clk, int idx)
378{ 395{
379 struct gf100_clk_info *info = &priv->eng[clk]; 396 struct gf100_clk_info *info = &clk->eng[idx];
380 nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv); 397 struct nvkm_device *device = clk->base.subdev.device;
398 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
381} 399}
382 400
383static int 401static int
384gf100_clk_prog(struct nvkm_clk *clk) 402gf100_clk_prog(struct nvkm_clk *base)
385{ 403{
386 struct gf100_clk_priv *priv = (void *)clk; 404 struct gf100_clk *clk = gf100_clk(base);
387 struct { 405 struct {
388 void (*exec)(struct gf100_clk_priv *, int); 406 void (*exec)(struct gf100_clk *, int);
389 } stage[] = { 407 } stage[] = {
390 { gf100_clk_prog_0 }, /* div programming */ 408 { gf100_clk_prog_0 }, /* div programming */
391 { gf100_clk_prog_1 }, /* select div mode */ 409 { gf100_clk_prog_1 }, /* select div mode */
@@ -396,10 +414,10 @@ gf100_clk_prog(struct nvkm_clk *clk)
396 int i, j; 414 int i, j;
397 415
398 for (i = 0; i < ARRAY_SIZE(stage); i++) { 416 for (i = 0; i < ARRAY_SIZE(stage); i++) {
399 for (j = 0; j < ARRAY_SIZE(priv->eng); j++) { 417 for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
400 if (!priv->eng[j].freq) 418 if (!clk->eng[j].freq)
401 continue; 419 continue;
402 stage[i].exec(priv, j); 420 stage[i].exec(clk, j);
403 } 421 }
404 } 422 }
405 423
@@ -407,56 +425,42 @@ gf100_clk_prog(struct nvkm_clk *clk)
407} 425}
408 426
409static void 427static void
410gf100_clk_tidy(struct nvkm_clk *clk) 428gf100_clk_tidy(struct nvkm_clk *base)
411{ 429{
412 struct gf100_clk_priv *priv = (void *)clk; 430 struct gf100_clk *clk = gf100_clk(base);
413 memset(priv->eng, 0x00, sizeof(priv->eng)); 431 memset(clk->eng, 0x00, sizeof(clk->eng));
414} 432}
415 433
416static struct nvkm_domain 434static const struct nvkm_clk_func
417gf100_domain[] = { 435gf100_clk = {
418 { nv_clk_src_crystal, 0xff }, 436 .read = gf100_clk_read,
419 { nv_clk_src_href , 0xff }, 437 .calc = gf100_clk_calc,
420 { nv_clk_src_hubk06 , 0x00 }, 438 .prog = gf100_clk_prog,
421 { nv_clk_src_hubk01 , 0x01 }, 439 .tidy = gf100_clk_tidy,
422 { nv_clk_src_copy , 0x02 }, 440 .domains = {
423 { nv_clk_src_gpc , 0x03, 0, "core", 2000 }, 441 { nv_clk_src_crystal, 0xff },
424 { nv_clk_src_rop , 0x04 }, 442 { nv_clk_src_href , 0xff },
425 { nv_clk_src_mem , 0x05, 0, "memory", 1000 }, 443 { nv_clk_src_hubk06 , 0x00 },
426 { nv_clk_src_vdec , 0x06 }, 444 { nv_clk_src_hubk01 , 0x01 },
427 { nv_clk_src_daemon , 0x0a }, 445 { nv_clk_src_copy , 0x02 },
428 { nv_clk_src_hubk07 , 0x0b }, 446 { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
429 { nv_clk_src_max } 447 { nv_clk_src_rop , 0x04 },
448 { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
449 { nv_clk_src_vdec , 0x06 },
450 { nv_clk_src_daemon , 0x0a },
451 { nv_clk_src_hubk07 , 0x0b },
452 { nv_clk_src_max }
453 }
430}; 454};
431 455
432static int 456int
433gf100_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 457gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
434 struct nvkm_oclass *oclass, void *data, u32 size,
435 struct nvkm_object **pobject)
436{ 458{
437 struct gf100_clk_priv *priv; 459 struct gf100_clk *clk;
438 int ret;
439 460
440 ret = nvkm_clk_create(parent, engine, oclass, gf100_domain, 461 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
441 NULL, 0, false, &priv); 462 return -ENOMEM;
442 *pobject = nv_object(priv); 463 *pclk = &clk->base;
443 if (ret)
444 return ret;
445 464
446 priv->base.read = gf100_clk_read; 465 return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
447 priv->base.calc = gf100_clk_calc;
448 priv->base.prog = gf100_clk_prog;
449 priv->base.tidy = gf100_clk_tidy;
450 return 0;
451} 466}
452
453struct nvkm_oclass
454gf100_clk_oclass = {
455 .handle = NV_SUBDEV(CLK, 0xc0),
456 .ofuncs = &(struct nvkm_ofuncs) {
457 .ctor = gf100_clk_ctor,
458 .dtor = _nvkm_clk_dtor,
459 .init = _nvkm_clk_init,
460 .fini = _nvkm_clk_fini,
461 },
462};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index e9b2310bdfbb..396f7e4dad0a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -21,10 +21,10 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/clk.h> 24#define gk104_clk(p) container_of((p), struct gk104_clk, base)
25#include "priv.h"
25#include "pll.h" 26#include "pll.h"
26 27
27#include <core/device.h>
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29#include <subdev/bios.h> 29#include <subdev/bios.h>
30#include <subdev/bios/pll.h> 30#include <subdev/bios/pll.h>
@@ -38,28 +38,30 @@ struct gk104_clk_info {
38 u32 coef; 38 u32 coef;
39}; 39};
40 40
41struct gk104_clk_priv { 41struct gk104_clk {
42 struct nvkm_clk base; 42 struct nvkm_clk base;
43 struct gk104_clk_info eng[16]; 43 struct gk104_clk_info eng[16];
44}; 44};
45 45
46static u32 read_div(struct gk104_clk_priv *, int, u32, u32); 46static u32 read_div(struct gk104_clk *, int, u32, u32);
47static u32 read_pll(struct gk104_clk_priv *, u32); 47static u32 read_pll(struct gk104_clk *, u32);
48 48
49static u32 49static u32
50read_vco(struct gk104_clk_priv *priv, u32 dsrc) 50read_vco(struct gk104_clk *clk, u32 dsrc)
51{ 51{
52 u32 ssrc = nv_rd32(priv, dsrc); 52 struct nvkm_device *device = clk->base.subdev.device;
53 u32 ssrc = nvkm_rd32(device, dsrc);
53 if (!(ssrc & 0x00000100)) 54 if (!(ssrc & 0x00000100))
54 return read_pll(priv, 0x00e800); 55 return read_pll(clk, 0x00e800);
55 return read_pll(priv, 0x00e820); 56 return read_pll(clk, 0x00e820);
56} 57}
57 58
58static u32 59static u32
59read_pll(struct gk104_clk_priv *priv, u32 pll) 60read_pll(struct gk104_clk *clk, u32 pll)
60{ 61{
61 u32 ctrl = nv_rd32(priv, pll + 0x00); 62 struct nvkm_device *device = clk->base.subdev.device;
62 u32 coef = nv_rd32(priv, pll + 0x04); 63 u32 ctrl = nvkm_rd32(device, pll + 0x00);
64 u32 coef = nvkm_rd32(device, pll + 0x04);
63 u32 P = (coef & 0x003f0000) >> 16; 65 u32 P = (coef & 0x003f0000) >> 16;
64 u32 N = (coef & 0x0000ff00) >> 8; 66 u32 N = (coef & 0x0000ff00) >> 8;
65 u32 M = (coef & 0x000000ff) >> 0; 67 u32 M = (coef & 0x000000ff) >> 0;
@@ -72,22 +74,22 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
72 switch (pll) { 74 switch (pll) {
73 case 0x00e800: 75 case 0x00e800:
74 case 0x00e820: 76 case 0x00e820:
75 sclk = nv_device(priv)->crystal; 77 sclk = device->crystal;
76 P = 1; 78 P = 1;
77 break; 79 break;
78 case 0x132000: 80 case 0x132000:
79 sclk = read_pll(priv, 0x132020); 81 sclk = read_pll(clk, 0x132020);
80 P = (coef & 0x10000000) ? 2 : 1; 82 P = (coef & 0x10000000) ? 2 : 1;
81 break; 83 break;
82 case 0x132020: 84 case 0x132020:
83 sclk = read_div(priv, 0, 0x137320, 0x137330); 85 sclk = read_div(clk, 0, 0x137320, 0x137330);
84 fN = nv_rd32(priv, pll + 0x10) >> 16; 86 fN = nvkm_rd32(device, pll + 0x10) >> 16;
85 break; 87 break;
86 case 0x137000: 88 case 0x137000:
87 case 0x137020: 89 case 0x137020:
88 case 0x137040: 90 case 0x137040:
89 case 0x1370e0: 91 case 0x1370e0:
90 sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140); 92 sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
91 break; 93 break;
92 default: 94 default:
93 return 0; 95 return 0;
@@ -101,70 +103,73 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
101} 103}
102 104
103static u32 105static u32
104read_div(struct gk104_clk_priv *priv, int doff, u32 dsrc, u32 dctl) 106read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
105{ 107{
106 u32 ssrc = nv_rd32(priv, dsrc + (doff * 4)); 108 struct nvkm_device *device = clk->base.subdev.device;
107 u32 sctl = nv_rd32(priv, dctl + (doff * 4)); 109 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
110 u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
108 111
109 switch (ssrc & 0x00000003) { 112 switch (ssrc & 0x00000003) {
110 case 0: 113 case 0:
111 if ((ssrc & 0x00030000) != 0x00030000) 114 if ((ssrc & 0x00030000) != 0x00030000)
112 return nv_device(priv)->crystal; 115 return device->crystal;
113 return 108000; 116 return 108000;
114 case 2: 117 case 2:
115 return 100000; 118 return 100000;
116 case 3: 119 case 3:
117 if (sctl & 0x80000000) { 120 if (sctl & 0x80000000) {
118 u32 sclk = read_vco(priv, dsrc + (doff * 4)); 121 u32 sclk = read_vco(clk, dsrc + (doff * 4));
119 u32 sdiv = (sctl & 0x0000003f) + 2; 122 u32 sdiv = (sctl & 0x0000003f) + 2;
120 return (sclk * 2) / sdiv; 123 return (sclk * 2) / sdiv;
121 } 124 }
122 125
123 return read_vco(priv, dsrc + (doff * 4)); 126 return read_vco(clk, dsrc + (doff * 4));
124 default: 127 default:
125 return 0; 128 return 0;
126 } 129 }
127} 130}
128 131
129static u32 132static u32
130read_mem(struct gk104_clk_priv *priv) 133read_mem(struct gk104_clk *clk)
131{ 134{
132 switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) { 135 struct nvkm_device *device = clk->base.subdev.device;
133 case 1: return read_pll(priv, 0x132020); 136 switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
134 case 2: return read_pll(priv, 0x132000); 137 case 1: return read_pll(clk, 0x132020);
138 case 2: return read_pll(clk, 0x132000);
135 default: 139 default:
136 return 0; 140 return 0;
137 } 141 }
138} 142}
139 143
140static u32 144static u32
141read_clk(struct gk104_clk_priv *priv, int clk) 145read_clk(struct gk104_clk *clk, int idx)
142{ 146{
143 u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4)); 147 struct nvkm_device *device = clk->base.subdev.device;
148 u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
144 u32 sclk, sdiv; 149 u32 sclk, sdiv;
145 150
146 if (clk < 7) { 151 if (idx < 7) {
147 u32 ssel = nv_rd32(priv, 0x137100); 152 u32 ssel = nvkm_rd32(device, 0x137100);
148 if (ssel & (1 << clk)) { 153 if (ssel & (1 << idx)) {
149 sclk = read_pll(priv, 0x137000 + (clk * 0x20)); 154 sclk = read_pll(clk, 0x137000 + (idx * 0x20));
150 sdiv = 1; 155 sdiv = 1;
151 } else { 156 } else {
152 sclk = read_div(priv, clk, 0x137160, 0x1371d0); 157 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
153 sdiv = 0; 158 sdiv = 0;
154 } 159 }
155 } else { 160 } else {
156 u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04)); 161 u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
157 if ((ssrc & 0x00000003) == 0x00000003) { 162 if ((ssrc & 0x00000003) == 0x00000003) {
158 sclk = read_div(priv, clk, 0x137160, 0x1371d0); 163 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
159 if (ssrc & 0x00000100) { 164 if (ssrc & 0x00000100) {
160 if (ssrc & 0x40000000) 165 if (ssrc & 0x40000000)
161 sclk = read_pll(priv, 0x1370e0); 166 sclk = read_pll(clk, 0x1370e0);
162 sdiv = 1; 167 sdiv = 1;
163 } else { 168 } else {
164 sdiv = 0; 169 sdiv = 0;
165 } 170 }
166 } else { 171 } else {
167 sclk = read_div(priv, clk, 0x137160, 0x1371d0); 172 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
168 sdiv = 0; 173 sdiv = 0;
169 } 174 }
170 } 175 }
@@ -181,10 +186,11 @@ read_clk(struct gk104_clk_priv *priv, int clk)
181} 186}
182 187
183static int 188static int
184gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) 189gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
185{ 190{
186 struct nvkm_device *device = nv_device(clk); 191 struct gk104_clk *clk = gk104_clk(base);
187 struct gk104_clk_priv *priv = (void *)clk; 192 struct nvkm_subdev *subdev = &clk->base.subdev;
193 struct nvkm_device *device = subdev->device;
188 194
189 switch (src) { 195 switch (src) {
190 case nv_clk_src_crystal: 196 case nv_clk_src_crystal:
@@ -192,29 +198,29 @@ gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
192 case nv_clk_src_href: 198 case nv_clk_src_href:
193 return 100000; 199 return 100000;
194 case nv_clk_src_mem: 200 case nv_clk_src_mem:
195 return read_mem(priv); 201 return read_mem(clk);
196 case nv_clk_src_gpc: 202 case nv_clk_src_gpc:
197 return read_clk(priv, 0x00); 203 return read_clk(clk, 0x00);
198 case nv_clk_src_rop: 204 case nv_clk_src_rop:
199 return read_clk(priv, 0x01); 205 return read_clk(clk, 0x01);
200 case nv_clk_src_hubk07: 206 case nv_clk_src_hubk07:
201 return read_clk(priv, 0x02); 207 return read_clk(clk, 0x02);
202 case nv_clk_src_hubk06: 208 case nv_clk_src_hubk06:
203 return read_clk(priv, 0x07); 209 return read_clk(clk, 0x07);
204 case nv_clk_src_hubk01: 210 case nv_clk_src_hubk01:
205 return read_clk(priv, 0x08); 211 return read_clk(clk, 0x08);
206 case nv_clk_src_daemon: 212 case nv_clk_src_daemon:
207 return read_clk(priv, 0x0c); 213 return read_clk(clk, 0x0c);
208 case nv_clk_src_vdec: 214 case nv_clk_src_vdec:
209 return read_clk(priv, 0x0e); 215 return read_clk(clk, 0x0e);
210 default: 216 default:
211 nv_error(clk, "invalid clock source %d\n", src); 217 nvkm_error(subdev, "invalid clock source %d\n", src);
212 return -EINVAL; 218 return -EINVAL;
213 } 219 }
214} 220}
215 221
216static u32 222static u32
217calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv) 223calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
218{ 224{
219 u32 div = min((ref * 2) / freq, (u32)65); 225 u32 div = min((ref * 2) / freq, (u32)65);
220 if (div < 2) 226 if (div < 2)
@@ -225,7 +231,7 @@ calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
225} 231}
226 232
227static u32 233static u32
228calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv) 234calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
229{ 235{
230 u32 sclk; 236 u32 sclk;
231 237
@@ -247,28 +253,29 @@ calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
247 } 253 }
248 254
249 /* otherwise, calculate the closest divider */ 255 /* otherwise, calculate the closest divider */
250 sclk = read_vco(priv, 0x137160 + (clk * 4)); 256 sclk = read_vco(clk, 0x137160 + (idx * 4));
251 if (clk < 7) 257 if (idx < 7)
252 sclk = calc_div(priv, clk, sclk, freq, ddiv); 258 sclk = calc_div(clk, idx, sclk, freq, ddiv);
253 return sclk; 259 return sclk;
254} 260}
255 261
256static u32 262static u32
257calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef) 263calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
258{ 264{
259 struct nvkm_bios *bios = nvkm_bios(priv); 265 struct nvkm_subdev *subdev = &clk->base.subdev;
266 struct nvkm_bios *bios = subdev->device->bios;
260 struct nvbios_pll limits; 267 struct nvbios_pll limits;
261 int N, M, P, ret; 268 int N, M, P, ret;
262 269
263 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits); 270 ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
264 if (ret) 271 if (ret)
265 return 0; 272 return 0;
266 273
267 limits.refclk = read_div(priv, clk, 0x137120, 0x137140); 274 limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
268 if (!limits.refclk) 275 if (!limits.refclk)
269 return 0; 276 return 0;
270 277
271 ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P); 278 ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
272 if (ret <= 0) 279 if (ret <= 0)
273 return 0; 280 return 0;
274 281
@@ -277,10 +284,10 @@ calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
277} 284}
278 285
279static int 286static int
280calc_clk(struct gk104_clk_priv *priv, 287calc_clk(struct gk104_clk *clk,
281 struct nvkm_cstate *cstate, int clk, int dom) 288 struct nvkm_cstate *cstate, int idx, int dom)
282{ 289{
283 struct gk104_clk_info *info = &priv->eng[clk]; 290 struct gk104_clk_info *info = &clk->eng[idx];
284 u32 freq = cstate->domain[dom]; 291 u32 freq = cstate->domain[dom];
285 u32 src0, div0, div1D, div1P = 0; 292 u32 src0, div0, div1D, div1P = 0;
286 u32 clk0, clk1 = 0; 293 u32 clk0, clk1 = 0;
@@ -290,16 +297,16 @@ calc_clk(struct gk104_clk_priv *priv,
290 return 0; 297 return 0;
291 298
292 /* first possible path, using only dividers */ 299 /* first possible path, using only dividers */
293 clk0 = calc_src(priv, clk, freq, &src0, &div0); 300 clk0 = calc_src(clk, idx, freq, &src0, &div0);
294 clk0 = calc_div(priv, clk, clk0, freq, &div1D); 301 clk0 = calc_div(clk, idx, clk0, freq, &div1D);
295 302
296 /* see if we can get any closer using PLLs */ 303 /* see if we can get any closer using PLLs */
297 if (clk0 != freq && (0x0000ff87 & (1 << clk))) { 304 if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
298 if (clk <= 7) 305 if (idx <= 7)
299 clk1 = calc_pll(priv, clk, freq, &info->coef); 306 clk1 = calc_pll(clk, idx, freq, &info->coef);
300 else 307 else
301 clk1 = cstate->domain[nv_clk_src_hubk06]; 308 clk1 = cstate->domain[nv_clk_src_hubk06];
302 clk1 = calc_div(priv, clk, clk1, freq, &div1P); 309 clk1 = calc_div(clk, idx, clk1, freq, &div1P);
303 } 310 }
304 311
305 /* select the method which gets closest to target freq */ 312 /* select the method which gets closest to target freq */
@@ -320,7 +327,7 @@ calc_clk(struct gk104_clk_priv *priv,
320 info->mdiv |= 0x80000000; 327 info->mdiv |= 0x80000000;
321 info->mdiv |= div1P << 8; 328 info->mdiv |= div1P << 8;
322 } 329 }
323 info->ssel = (1 << clk); 330 info->ssel = (1 << idx);
324 info->dsrc = 0x40000100; 331 info->dsrc = 0x40000100;
325 info->freq = clk1; 332 info->freq = clk1;
326 } 333 }
@@ -329,98 +336,115 @@ calc_clk(struct gk104_clk_priv *priv,
329} 336}
330 337
331static int 338static int
332gk104_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate) 339gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
333{ 340{
334 struct gk104_clk_priv *priv = (void *)clk; 341 struct gk104_clk *clk = gk104_clk(base);
335 int ret; 342 int ret;
336 343
337 if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) || 344 if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
338 (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) || 345 (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
339 (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) || 346 (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
340 (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) || 347 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
341 (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) || 348 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
342 (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) || 349 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
343 (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec))) 350 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
344 return ret; 351 return ret;
345 352
346 return 0; 353 return 0;
347} 354}
348 355
349static void 356static void
350gk104_clk_prog_0(struct gk104_clk_priv *priv, int clk) 357gk104_clk_prog_0(struct gk104_clk *clk, int idx)
351{ 358{
352 struct gk104_clk_info *info = &priv->eng[clk]; 359 struct gk104_clk_info *info = &clk->eng[idx];
360 struct nvkm_device *device = clk->base.subdev.device;
353 if (!info->ssel) { 361 if (!info->ssel) {
354 nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv); 362 nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
355 nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc); 363 nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
356 } 364 }
357} 365}
358 366
359static void 367static void
360gk104_clk_prog_1_0(struct gk104_clk_priv *priv, int clk) 368gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
361{ 369{
362 nv_mask(priv, 0x137100, (1 << clk), 0x00000000); 370 struct nvkm_device *device = clk->base.subdev.device;
363 nv_wait(priv, 0x137100, (1 << clk), 0x00000000); 371 nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
372 nvkm_msec(device, 2000,
373 if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
374 break;
375 );
364} 376}
365 377
366static void 378static void
367gk104_clk_prog_1_1(struct gk104_clk_priv *priv, int clk) 379gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
368{ 380{
369 nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000); 381 struct nvkm_device *device = clk->base.subdev.device;
382 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
370} 383}
371 384
372static void 385static void
373gk104_clk_prog_2(struct gk104_clk_priv *priv, int clk) 386gk104_clk_prog_2(struct gk104_clk *clk, int idx)
374{ 387{
375 struct gk104_clk_info *info = &priv->eng[clk]; 388 struct gk104_clk_info *info = &clk->eng[idx];
376 const u32 addr = 0x137000 + (clk * 0x20); 389 struct nvkm_device *device = clk->base.subdev.device;
377 nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000); 390 const u32 addr = 0x137000 + (idx * 0x20);
378 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000); 391 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
392 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
379 if (info->coef) { 393 if (info->coef) {
380 nv_wr32(priv, addr + 0x04, info->coef); 394 nvkm_wr32(device, addr + 0x04, info->coef);
381 nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001); 395 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
382 nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000); 396 nvkm_msec(device, 2000,
383 nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004); 397 if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
398 break;
399 );
400 nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
384 } 401 }
385} 402}
386 403
387static void 404static void
388gk104_clk_prog_3(struct gk104_clk_priv *priv, int clk) 405gk104_clk_prog_3(struct gk104_clk *clk, int idx)
389{ 406{
390 struct gk104_clk_info *info = &priv->eng[clk]; 407 struct gk104_clk_info *info = &clk->eng[idx];
408 struct nvkm_device *device = clk->base.subdev.device;
391 if (info->ssel) 409 if (info->ssel)
392 nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv); 410 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
393 else 411 else
394 nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv); 412 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
395} 413}
396 414
397static void 415static void
398gk104_clk_prog_4_0(struct gk104_clk_priv *priv, int clk) 416gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
399{ 417{
400 struct gk104_clk_info *info = &priv->eng[clk]; 418 struct gk104_clk_info *info = &clk->eng[idx];
419 struct nvkm_device *device = clk->base.subdev.device;
401 if (info->ssel) { 420 if (info->ssel) {
402 nv_mask(priv, 0x137100, (1 << clk), info->ssel); 421 nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
403 nv_wait(priv, 0x137100, (1 << clk), info->ssel); 422 nvkm_msec(device, 2000,
423 u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
424 if (tmp == info->ssel)
425 break;
426 );
404 } 427 }
405} 428}
406 429
407static void 430static void
408gk104_clk_prog_4_1(struct gk104_clk_priv *priv, int clk) 431gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
409{ 432{
410 struct gk104_clk_info *info = &priv->eng[clk]; 433 struct gk104_clk_info *info = &clk->eng[idx];
434 struct nvkm_device *device = clk->base.subdev.device;
411 if (info->ssel) { 435 if (info->ssel) {
412 nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000); 436 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
413 nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100); 437 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
414 } 438 }
415} 439}
416 440
417static int 441static int
418gk104_clk_prog(struct nvkm_clk *clk) 442gk104_clk_prog(struct nvkm_clk *base)
419{ 443{
420 struct gk104_clk_priv *priv = (void *)clk; 444 struct gk104_clk *clk = gk104_clk(base);
421 struct { 445 struct {
422 u32 mask; 446 u32 mask;
423 void (*exec)(struct gk104_clk_priv *, int); 447 void (*exec)(struct gk104_clk *, int);
424 } stage[] = { 448 } stage[] = {
425 { 0x007f, gk104_clk_prog_0 }, /* div programming */ 449 { 0x007f, gk104_clk_prog_0 }, /* div programming */
426 { 0x007f, gk104_clk_prog_1_0 }, /* select div mode */ 450 { 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
@@ -433,12 +457,12 @@ gk104_clk_prog(struct nvkm_clk *clk)
433 int i, j; 457 int i, j;
434 458
435 for (i = 0; i < ARRAY_SIZE(stage); i++) { 459 for (i = 0; i < ARRAY_SIZE(stage); i++) {
436 for (j = 0; j < ARRAY_SIZE(priv->eng); j++) { 460 for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
437 if (!(stage[i].mask & (1 << j))) 461 if (!(stage[i].mask & (1 << j)))
438 continue; 462 continue;
439 if (!priv->eng[j].freq) 463 if (!clk->eng[j].freq)
440 continue; 464 continue;
441 stage[i].exec(priv, j); 465 stage[i].exec(clk, j);
442 } 466 }
443 } 467 }
444 468
@@ -446,55 +470,41 @@ gk104_clk_prog(struct nvkm_clk *clk)
446} 470}
447 471
448static void 472static void
449gk104_clk_tidy(struct nvkm_clk *clk) 473gk104_clk_tidy(struct nvkm_clk *base)
450{ 474{
451 struct gk104_clk_priv *priv = (void *)clk; 475 struct gk104_clk *clk = gk104_clk(base);
452 memset(priv->eng, 0x00, sizeof(priv->eng)); 476 memset(clk->eng, 0x00, sizeof(clk->eng));
453} 477}
454 478
455static struct nvkm_domain 479static const struct nvkm_clk_func
456gk104_domain[] = { 480gk104_clk = {
457 { nv_clk_src_crystal, 0xff }, 481 .read = gk104_clk_read,
458 { nv_clk_src_href , 0xff }, 482 .calc = gk104_clk_calc,
459 { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 }, 483 .prog = gk104_clk_prog,
460 { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE }, 484 .tidy = gk104_clk_tidy,
461 { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE }, 485 .domains = {
462 { nv_clk_src_mem , 0x03, 0, "memory", 500 }, 486 { nv_clk_src_crystal, 0xff },
463 { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE }, 487 { nv_clk_src_href , 0xff },
464 { nv_clk_src_hubk01 , 0x05 }, 488 { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
465 { nv_clk_src_vdec , 0x06 }, 489 { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
466 { nv_clk_src_daemon , 0x07 }, 490 { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
467 { nv_clk_src_max } 491 { nv_clk_src_mem , 0x03, 0, "memory", 500 },
492 { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
493 { nv_clk_src_hubk01 , 0x05 },
494 { nv_clk_src_vdec , 0x06 },
495 { nv_clk_src_daemon , 0x07 },
496 { nv_clk_src_max }
497 }
468}; 498};
469 499
470static int 500int
471gk104_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 501gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
472 struct nvkm_oclass *oclass, void *data, u32 size,
473 struct nvkm_object **pobject)
474{ 502{
475 struct gk104_clk_priv *priv; 503 struct gk104_clk *clk;
476 int ret;
477 504
478 ret = nvkm_clk_create(parent, engine, oclass, gk104_domain, 505 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
479 NULL, 0, true, &priv); 506 return -ENOMEM;
480 *pobject = nv_object(priv); 507 *pclk = &clk->base;
481 if (ret)
482 return ret;
483 508
484 priv->base.read = gk104_clk_read; 509 return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
485 priv->base.calc = gk104_clk_calc;
486 priv->base.prog = gk104_clk_prog;
487 priv->base.tidy = gk104_clk_tidy;
488 return 0;
489} 510}
490
491struct nvkm_oclass
492gk104_clk_oclass = {
493 .handle = NV_SUBDEV(CLK, 0xe0),
494 .ofuncs = &(struct nvkm_ofuncs) {
495 .ctor = gk104_clk_ctor,
496 .dtor = _nvkm_clk_dtor,
497 .init = _nvkm_clk_init,
498 .fini = _nvkm_clk_fini,
499 },
500};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 65c532742b08..254094ab7fb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -22,14 +22,11 @@
22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c 22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
23 * 23 *
24 */ 24 */
25#include <subdev/clk.h> 25#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
26#include <subdev/timer.h> 26#include "priv.h"
27
28#include <core/device.h>
29 27
30#ifdef __KERNEL__ 28#include <core/tegra.h>
31#include <nouveau_platform.h> 29#include <subdev/timer.h>
32#endif
33 30
34#define MHZ (1000 * 1000) 31#define MHZ (1000 * 1000)
35 32
@@ -117,41 +114,42 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
117 .min_pl = 1, .max_pl = 32, 114 .min_pl = 1, .max_pl = 32,
118}; 115};
119 116
120struct gk20a_clk_priv { 117struct gk20a_clk {
121 struct nvkm_clk base; 118 struct nvkm_clk base;
122 const struct gk20a_clk_pllg_params *params; 119 const struct gk20a_clk_pllg_params *params;
123 u32 m, n, pl; 120 u32 m, n, pl;
124 u32 parent_rate; 121 u32 parent_rate;
125}; 122};
126#define to_gk20a_clk(base) container_of(base, struct gk20a_clk_priv, base)
127 123
128static void 124static void
129gk20a_pllg_read_mnp(struct gk20a_clk_priv *priv) 125gk20a_pllg_read_mnp(struct gk20a_clk *clk)
130{ 126{
127 struct nvkm_device *device = clk->base.subdev.device;
131 u32 val; 128 u32 val;
132 129
133 val = nv_rd32(priv, GPCPLL_COEFF); 130 val = nvkm_rd32(device, GPCPLL_COEFF);
134 priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); 131 clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
135 priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH); 132 clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
136 priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); 133 clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
137} 134}
138 135
139static u32 136static u32
140gk20a_pllg_calc_rate(struct gk20a_clk_priv *priv) 137gk20a_pllg_calc_rate(struct gk20a_clk *clk)
141{ 138{
142 u32 rate; 139 u32 rate;
143 u32 divider; 140 u32 divider;
144 141
145 rate = priv->parent_rate * priv->n; 142 rate = clk->parent_rate * clk->n;
146 divider = priv->m * pl_to_div[priv->pl]; 143 divider = clk->m * pl_to_div[clk->pl];
147 do_div(rate, divider); 144 do_div(rate, divider);
148 145
149 return rate / 2; 146 return rate / 2;
150} 147}
151 148
152static int 149static int
153gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate) 150gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
154{ 151{
152 struct nvkm_subdev *subdev = &clk->base.subdev;
155 u32 target_clk_f, ref_clk_f, target_freq; 153 u32 target_clk_f, ref_clk_f, target_freq;
156 u32 min_vco_f, max_vco_f; 154 u32 min_vco_f, max_vco_f;
157 u32 low_pl, high_pl, best_pl; 155 u32 low_pl, high_pl, best_pl;
@@ -163,13 +161,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
163 u32 pl; 161 u32 pl;
164 162
165 target_clk_f = rate * 2 / MHZ; 163 target_clk_f = rate * 2 / MHZ;
166 ref_clk_f = priv->parent_rate / MHZ; 164 ref_clk_f = clk->parent_rate / MHZ;
167 165
168 max_vco_f = priv->params->max_vco; 166 max_vco_f = clk->params->max_vco;
169 min_vco_f = priv->params->min_vco; 167 min_vco_f = clk->params->min_vco;
170 best_m = priv->params->max_m; 168 best_m = clk->params->max_m;
171 best_n = priv->params->min_n; 169 best_n = clk->params->min_n;
172 best_pl = priv->params->min_pl; 170 best_pl = clk->params->min_pl;
173 171
174 target_vco_f = target_clk_f + target_clk_f / 50; 172 target_vco_f = target_clk_f + target_clk_f / 50;
175 if (max_vco_f < target_vco_f) 173 if (max_vco_f < target_vco_f)
@@ -177,13 +175,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
177 175
178 /* min_pl <= high_pl <= max_pl */ 176 /* min_pl <= high_pl <= max_pl */
179 high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; 177 high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
180 high_pl = min(high_pl, priv->params->max_pl); 178 high_pl = min(high_pl, clk->params->max_pl);
181 high_pl = max(high_pl, priv->params->min_pl); 179 high_pl = max(high_pl, clk->params->min_pl);
182 180
183 /* min_pl <= low_pl <= max_pl */ 181 /* min_pl <= low_pl <= max_pl */
184 low_pl = min_vco_f / target_vco_f; 182 low_pl = min_vco_f / target_vco_f;
185 low_pl = min(low_pl, priv->params->max_pl); 183 low_pl = min(low_pl, clk->params->max_pl);
186 low_pl = max(low_pl, priv->params->min_pl); 184 low_pl = max(low_pl, clk->params->min_pl);
187 185
188 /* Find Indices of high_pl and low_pl */ 186 /* Find Indices of high_pl and low_pl */
189 for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) { 187 for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
@@ -199,30 +197,30 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
199 } 197 }
200 } 198 }
201 199
202 nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl, 200 nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
203 pl_to_div[low_pl], high_pl, pl_to_div[high_pl]); 201 pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
204 202
205 /* Select lowest possible VCO */ 203 /* Select lowest possible VCO */
206 for (pl = low_pl; pl <= high_pl; pl++) { 204 for (pl = low_pl; pl <= high_pl; pl++) {
207 target_vco_f = target_clk_f * pl_to_div[pl]; 205 target_vco_f = target_clk_f * pl_to_div[pl];
208 for (m = priv->params->min_m; m <= priv->params->max_m; m++) { 206 for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
209 u_f = ref_clk_f / m; 207 u_f = ref_clk_f / m;
210 208
211 if (u_f < priv->params->min_u) 209 if (u_f < clk->params->min_u)
212 break; 210 break;
213 if (u_f > priv->params->max_u) 211 if (u_f > clk->params->max_u)
214 continue; 212 continue;
215 213
216 n = (target_vco_f * m) / ref_clk_f; 214 n = (target_vco_f * m) / ref_clk_f;
217 n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f; 215 n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
218 216
219 if (n > priv->params->max_n) 217 if (n > clk->params->max_n)
220 break; 218 break;
221 219
222 for (; n <= n2; n++) { 220 for (; n <= n2; n++) {
223 if (n < priv->params->min_n) 221 if (n < clk->params->min_n)
224 continue; 222 continue;
225 if (n > priv->params->max_n) 223 if (n > clk->params->max_n)
226 break; 224 break;
227 225
228 vco_f = ref_clk_f * n / m; 226 vco_f = ref_clk_f * n / m;
@@ -250,71 +248,75 @@ found_match:
250 WARN_ON(best_delta == ~0); 248 WARN_ON(best_delta == ~0);
251 249
252 if (best_delta != 0) 250 if (best_delta != 0)
253 nv_debug(priv, "no best match for target @ %dMHz on gpc_pll", 251 nvkm_debug(subdev,
254 target_clk_f); 252 "no best match for target @ %dMHz on gpc_pll",
253 target_clk_f);
255 254
256 priv->m = best_m; 255 clk->m = best_m;
257 priv->n = best_n; 256 clk->n = best_n;
258 priv->pl = best_pl; 257 clk->pl = best_pl;
259 258
260 target_freq = gk20a_pllg_calc_rate(priv) / MHZ; 259 target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
261 260
262 nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n", 261 nvkm_debug(subdev,
263 target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]); 262 "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
263 target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
264 return 0; 264 return 0;
265} 265}
266 266
267static int 267static int
268gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n) 268gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
269{ 269{
270 struct nvkm_subdev *subdev = &clk->base.subdev;
271 struct nvkm_device *device = subdev->device;
270 u32 val; 272 u32 val;
271 int ramp_timeout; 273 int ramp_timeout;
272 274
273 /* get old coefficients */ 275 /* get old coefficients */
274 val = nv_rd32(priv, GPCPLL_COEFF); 276 val = nvkm_rd32(device, GPCPLL_COEFF);
275 /* do nothing if NDIV is the same */ 277 /* do nothing if NDIV is the same */
276 if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) 278 if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
277 return 0; 279 return 0;
278 280
279 /* setup */ 281 /* setup */
280 nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, 282 nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
281 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT); 283 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
282 nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, 284 nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
283 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT); 285 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
284 286
285 /* pll slowdown mode */ 287 /* pll slowdown mode */
286 nv_mask(priv, GPCPLL_NDIV_SLOWDOWN, 288 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
287 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), 289 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
288 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); 290 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
289 291
290 /* new ndiv ready for ramp */ 292 /* new ndiv ready for ramp */
291 val = nv_rd32(priv, GPCPLL_COEFF); 293 val = nvkm_rd32(device, GPCPLL_COEFF);
292 val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT); 294 val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
293 val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; 295 val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
294 udelay(1); 296 udelay(1);
295 nv_wr32(priv, GPCPLL_COEFF, val); 297 nvkm_wr32(device, GPCPLL_COEFF, val);
296 298
297 /* dynamic ramp to new ndiv */ 299 /* dynamic ramp to new ndiv */
298 val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN); 300 val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
299 val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT; 301 val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
300 udelay(1); 302 udelay(1);
301 nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val); 303 nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
302 304
303 for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) { 305 for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
304 udelay(1); 306 udelay(1);
305 val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG); 307 val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
306 if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) 308 if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
307 break; 309 break;
308 } 310 }
309 311
310 /* exit slowdown mode */ 312 /* exit slowdown mode */
311 nv_mask(priv, GPCPLL_NDIV_SLOWDOWN, 313 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
312 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | 314 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
313 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); 315 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
314 nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN); 316 nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
315 317
316 if (ramp_timeout <= 0) { 318 if (ramp_timeout <= 0) {
317 nv_error(priv, "gpcpll dynamic ramp timeout\n"); 319 nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
318 return -ETIMEDOUT; 320 return -ETIMEDOUT;
319 } 321 }
320 322
@@ -322,149 +324,147 @@ gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
322} 324}
323 325
324static void 326static void
325_gk20a_pllg_enable(struct gk20a_clk_priv *priv) 327_gk20a_pllg_enable(struct gk20a_clk *clk)
326{ 328{
327 nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); 329 struct nvkm_device *device = clk->base.subdev.device;
328 nv_rd32(priv, GPCPLL_CFG); 330 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
331 nvkm_rd32(device, GPCPLL_CFG);
329} 332}
330 333
331static void 334static void
332_gk20a_pllg_disable(struct gk20a_clk_priv *priv) 335_gk20a_pllg_disable(struct gk20a_clk *clk)
333{ 336{
334 nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); 337 struct nvkm_device *device = clk->base.subdev.device;
335 nv_rd32(priv, GPCPLL_CFG); 338 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
339 nvkm_rd32(device, GPCPLL_CFG);
336} 340}
337 341
338static int 342static int
339_gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv, bool allow_slide) 343_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
340{ 344{
345 struct nvkm_subdev *subdev = &clk->base.subdev;
346 struct nvkm_device *device = subdev->device;
341 u32 val, cfg; 347 u32 val, cfg;
342 u32 m_old, pl_old, n_lo; 348 u32 m_old, pl_old, n_lo;
343 349
344 /* get old coefficients */ 350 /* get old coefficients */
345 val = nv_rd32(priv, GPCPLL_COEFF); 351 val = nvkm_rd32(device, GPCPLL_COEFF);
346 m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); 352 m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
347 pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); 353 pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
348 354
349 /* do NDIV slide if there is no change in M and PL */ 355 /* do NDIV slide if there is no change in M and PL */
350 cfg = nv_rd32(priv, GPCPLL_CFG); 356 cfg = nvkm_rd32(device, GPCPLL_CFG);
351 if (allow_slide && priv->m == m_old && priv->pl == pl_old && 357 if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
352 (cfg & GPCPLL_CFG_ENABLE)) { 358 (cfg & GPCPLL_CFG_ENABLE)) {
353 return gk20a_pllg_slide(priv, priv->n); 359 return gk20a_pllg_slide(clk, clk->n);
354 } 360 }
355 361
356 /* slide down to NDIV_LO */ 362 /* slide down to NDIV_LO */
357 n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco, 363 n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
358 priv->parent_rate / MHZ); 364 clk->parent_rate / MHZ);
359 if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) { 365 if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
360 int ret = gk20a_pllg_slide(priv, n_lo); 366 int ret = gk20a_pllg_slide(clk, n_lo);
361 367
362 if (ret) 368 if (ret)
363 return ret; 369 return ret;
364 } 370 }
365 371
366 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ 372 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
367 nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, 373 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
368 0x2 << GPC2CLK_OUT_VCODIV_SHIFT); 374 0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
369 375
370 /* put PLL in bypass before programming it */ 376 /* put PLL in bypass before programming it */
371 val = nv_rd32(priv, SEL_VCO); 377 val = nvkm_rd32(device, SEL_VCO);
372 val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); 378 val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
373 udelay(2); 379 udelay(2);
374 nv_wr32(priv, SEL_VCO, val); 380 nvkm_wr32(device, SEL_VCO, val);
375 381
376 /* get out from IDDQ */ 382 /* get out from IDDQ */
377 val = nv_rd32(priv, GPCPLL_CFG); 383 val = nvkm_rd32(device, GPCPLL_CFG);
378 if (val & GPCPLL_CFG_IDDQ) { 384 if (val & GPCPLL_CFG_IDDQ) {
379 val &= ~GPCPLL_CFG_IDDQ; 385 val &= ~GPCPLL_CFG_IDDQ;
380 nv_wr32(priv, GPCPLL_CFG, val); 386 nvkm_wr32(device, GPCPLL_CFG, val);
381 nv_rd32(priv, GPCPLL_CFG); 387 nvkm_rd32(device, GPCPLL_CFG);
382 udelay(2); 388 udelay(2);
383 } 389 }
384 390
385 _gk20a_pllg_disable(priv); 391 _gk20a_pllg_disable(clk);
386 392
387 nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n, 393 nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
388 priv->pl); 394 clk->m, clk->n, clk->pl);
389 395
390 n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco, 396 n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
391 priv->parent_rate / MHZ); 397 clk->parent_rate / MHZ);
392 val = priv->m << GPCPLL_COEFF_M_SHIFT; 398 val = clk->m << GPCPLL_COEFF_M_SHIFT;
393 val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT; 399 val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
394 val |= priv->pl << GPCPLL_COEFF_P_SHIFT; 400 val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
395 nv_wr32(priv, GPCPLL_COEFF, val); 401 nvkm_wr32(device, GPCPLL_COEFF, val);
396 402
397 _gk20a_pllg_enable(priv); 403 _gk20a_pllg_enable(clk);
398 404
399 val = nv_rd32(priv, GPCPLL_CFG); 405 val = nvkm_rd32(device, GPCPLL_CFG);
400 if (val & GPCPLL_CFG_LOCK_DET_OFF) { 406 if (val & GPCPLL_CFG_LOCK_DET_OFF) {
401 val &= ~GPCPLL_CFG_LOCK_DET_OFF; 407 val &= ~GPCPLL_CFG_LOCK_DET_OFF;
402 nv_wr32(priv, GPCPLL_CFG, val); 408 nvkm_wr32(device, GPCPLL_CFG, val);
403 } 409 }
404 410
405 if (!nvkm_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK, 411 if (nvkm_usec(device, 300,
406 GPCPLL_CFG_LOCK)) { 412 if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
407 nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__); 413 break;
414 ) < 0)
408 return -ETIMEDOUT; 415 return -ETIMEDOUT;
409 }
410 416
411 /* switch to VCO mode */ 417 /* switch to VCO mode */
412 nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); 418 nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
413 419
414 /* restore out divider 1:1 */ 420 /* restore out divider 1:1 */
415 val = nv_rd32(priv, GPC2CLK_OUT); 421 val = nvkm_rd32(device, GPC2CLK_OUT);
416 val &= ~GPC2CLK_OUT_VCODIV_MASK; 422 val &= ~GPC2CLK_OUT_VCODIV_MASK;
417 udelay(2); 423 udelay(2);
418 nv_wr32(priv, GPC2CLK_OUT, val); 424 nvkm_wr32(device, GPC2CLK_OUT, val);
419 425
420 /* slide up to new NDIV */ 426 /* slide up to new NDIV */
421 return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0; 427 return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
422} 428}
423 429
424static int 430static int
425gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv) 431gk20a_pllg_program_mnp(struct gk20a_clk *clk)
426{ 432{
427 int err; 433 int err;
428 434
429 err = _gk20a_pllg_program_mnp(priv, true); 435 err = _gk20a_pllg_program_mnp(clk, true);
430 if (err) 436 if (err)
431 err = _gk20a_pllg_program_mnp(priv, false); 437 err = _gk20a_pllg_program_mnp(clk, false);
432 438
433 return err; 439 return err;
434} 440}
435 441
436static void 442static void
437gk20a_pllg_disable(struct gk20a_clk_priv *priv) 443gk20a_pllg_disable(struct gk20a_clk *clk)
438{ 444{
445 struct nvkm_device *device = clk->base.subdev.device;
439 u32 val; 446 u32 val;
440 447
441 /* slide to VCO min */ 448 /* slide to VCO min */
442 val = nv_rd32(priv, GPCPLL_CFG); 449 val = nvkm_rd32(device, GPCPLL_CFG);
443 if (val & GPCPLL_CFG_ENABLE) { 450 if (val & GPCPLL_CFG_ENABLE) {
444 u32 coeff, m, n_lo; 451 u32 coeff, m, n_lo;
445 452
446 coeff = nv_rd32(priv, GPCPLL_COEFF); 453 coeff = nvkm_rd32(device, GPCPLL_COEFF);
447 m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); 454 m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
448 n_lo = DIV_ROUND_UP(m * priv->params->min_vco, 455 n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
449 priv->parent_rate / MHZ); 456 clk->parent_rate / MHZ);
450 gk20a_pllg_slide(priv, n_lo); 457 gk20a_pllg_slide(clk, n_lo);
451 } 458 }
452 459
453 /* put PLL in bypass before disabling it */ 460 /* put PLL in bypass before disabling it */
454 nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); 461 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
455 462
456 _gk20a_pllg_disable(priv); 463 _gk20a_pllg_disable(clk);
457} 464}
458 465
459#define GK20A_CLK_GPC_MDIV 1000 466#define GK20A_CLK_GPC_MDIV 1000
460 467
461static struct nvkm_domain
462gk20a_domains[] = {
463 { nv_clk_src_crystal, 0xff },
464 { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
465 { nv_clk_src_max }
466};
467
468static struct nvkm_pstate 468static struct nvkm_pstate
469gk20a_pstates[] = { 469gk20a_pstates[] = {
470 { 470 {
@@ -560,87 +560,99 @@ gk20a_pstates[] = {
560}; 560};
561 561
562static int 562static int
563gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) 563gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
564{ 564{
565 struct gk20a_clk_priv *priv = (void *)clk; 565 struct gk20a_clk *clk = gk20a_clk(base);
566 struct nvkm_subdev *subdev = &clk->base.subdev;
567 struct nvkm_device *device = subdev->device;
566 568
567 switch (src) { 569 switch (src) {
568 case nv_clk_src_crystal: 570 case nv_clk_src_crystal:
569 return nv_device(clk)->crystal; 571 return device->crystal;
570 case nv_clk_src_gpc: 572 case nv_clk_src_gpc:
571 gk20a_pllg_read_mnp(priv); 573 gk20a_pllg_read_mnp(clk);
572 return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV; 574 return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
573 default: 575 default:
574 nv_error(clk, "invalid clock source %d\n", src); 576 nvkm_error(subdev, "invalid clock source %d\n", src);
575 return -EINVAL; 577 return -EINVAL;
576 } 578 }
577} 579}
578 580
579static int 581static int
580gk20a_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate) 582gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
581{ 583{
582 struct gk20a_clk_priv *priv = (void *)clk; 584 struct gk20a_clk *clk = gk20a_clk(base);
583 585
584 return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] * 586 return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
585 GK20A_CLK_GPC_MDIV); 587 GK20A_CLK_GPC_MDIV);
586} 588}
587 589
588static int 590static int
589gk20a_clk_prog(struct nvkm_clk *clk) 591gk20a_clk_prog(struct nvkm_clk *base)
590{ 592{
591 struct gk20a_clk_priv *priv = (void *)clk; 593 struct gk20a_clk *clk = gk20a_clk(base);
592 594
593 return gk20a_pllg_program_mnp(priv); 595 return gk20a_pllg_program_mnp(clk);
594} 596}
595 597
596static void 598static void
597gk20a_clk_tidy(struct nvkm_clk *clk) 599gk20a_clk_tidy(struct nvkm_clk *base)
598{ 600{
599} 601}
600 602
601static int 603static void
602gk20a_clk_fini(struct nvkm_object *object, bool suspend) 604gk20a_clk_fini(struct nvkm_clk *base)
603{ 605{
604 struct gk20a_clk_priv *priv = (void *)object; 606 struct gk20a_clk *clk = gk20a_clk(base);
605 int ret; 607 gk20a_pllg_disable(clk);
606
607 ret = nvkm_clk_fini(&priv->base, false);
608
609 gk20a_pllg_disable(priv);
610
611 return ret;
612} 608}
613 609
614static int 610static int
615gk20a_clk_init(struct nvkm_object *object) 611gk20a_clk_init(struct nvkm_clk *base)
616{ 612{
617 struct gk20a_clk_priv *priv = (void *)object; 613 struct gk20a_clk *clk = gk20a_clk(base);
614 struct nvkm_subdev *subdev = &clk->base.subdev;
615 struct nvkm_device *device = subdev->device;
618 int ret; 616 int ret;
619 617
620 nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); 618 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
621
622 ret = nvkm_clk_init(&priv->base);
623 if (ret)
624 return ret;
625 619
626 ret = gk20a_clk_prog(&priv->base); 620 ret = gk20a_clk_prog(&clk->base);
627 if (ret) { 621 if (ret) {
628 nv_error(priv, "cannot initialize clock\n"); 622 nvkm_error(subdev, "cannot initialize clock\n");
629 return ret; 623 return ret;
630 } 624 }
631 625
632 return 0; 626 return 0;
633} 627}
634 628
635static int 629static const struct nvkm_clk_func
636gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 630gk20a_clk = {
637 struct nvkm_oclass *oclass, void *data, u32 size, 631 .init = gk20a_clk_init,
638 struct nvkm_object **pobject) 632 .fini = gk20a_clk_fini,
633 .read = gk20a_clk_read,
634 .calc = gk20a_clk_calc,
635 .prog = gk20a_clk_prog,
636 .tidy = gk20a_clk_tidy,
637 .pstates = gk20a_pstates,
638 .nr_pstates = ARRAY_SIZE(gk20a_pstates),
639 .domains = {
640 { nv_clk_src_crystal, 0xff },
641 { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
642 { nv_clk_src_max }
643 }
644};
645
646int
647gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
639{ 648{
640 struct gk20a_clk_priv *priv; 649 struct nvkm_device_tegra *tdev = device->func->tegra(device);
641 struct nouveau_platform_device *plat; 650 struct gk20a_clk *clk;
642 int ret; 651 int ret, i;
643 int i; 652
653 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
654 return -ENOMEM;
655 *pclk = &clk->base;
644 656
645 /* Finish initializing the pstates */ 657 /* Finish initializing the pstates */
646 for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) { 658 for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
@@ -648,33 +660,11 @@ gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
648 gk20a_pstates[i].pstate = i + 1; 660 gk20a_pstates[i].pstate = i + 1;
649 } 661 }
650 662
651 ret = nvkm_clk_create(parent, engine, oclass, gk20a_domains, 663 clk->params = &gk20a_pllg_params;
652 gk20a_pstates, ARRAY_SIZE(gk20a_pstates), 664 clk->parent_rate = clk_get_rate(tdev->clk);
653 true, &priv);
654 *pobject = nv_object(priv);
655 if (ret)
656 return ret;
657 665
658 priv->params = &gk20a_pllg_params; 666 ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
659 667 nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
660 plat = nv_device_to_platform(nv_device(parent)); 668 clk->parent_rate / MHZ);
661 priv->parent_rate = clk_get_rate(plat->gpu->clk); 669 return ret;
662 nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
663
664 priv->base.read = gk20a_clk_read;
665 priv->base.calc = gk20a_clk_calc;
666 priv->base.prog = gk20a_clk_prog;
667 priv->base.tidy = gk20a_clk_tidy;
668 return 0;
669} 670}
670
671struct nvkm_oclass
672gk20a_clk_oclass = {
673 .handle = NV_SUBDEV(CLK, 0xea),
674 .ofuncs = &(struct nvkm_ofuncs) {
675 .ctor = gk20a_clk_ctor,
676 .dtor = _nvkm_subdev_dtor,
677 .init = gk20a_clk_init,
678 .fini = gk20a_clk_fini,
679 },
680};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 065e9f5c8db9..07feae620c8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -22,56 +22,58 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 * Roy Spliet 23 * Roy Spliet
24 */ 24 */
25#define gt215_clk(p) container_of((p), struct gt215_clk, base)
25#include "gt215.h" 26#include "gt215.h"
26#include "pll.h" 27#include "pll.h"
27 28
28#include <core/device.h>
29#include <engine/fifo.h> 29#include <engine/fifo.h>
30#include <subdev/bios.h> 30#include <subdev/bios.h>
31#include <subdev/bios/pll.h> 31#include <subdev/bios/pll.h>
32#include <subdev/timer.h> 32#include <subdev/timer.h>
33 33
34struct gt215_clk_priv { 34struct gt215_clk {
35 struct nvkm_clk base; 35 struct nvkm_clk base;
36 struct gt215_clk_info eng[nv_clk_src_max]; 36 struct gt215_clk_info eng[nv_clk_src_max];
37}; 37};
38 38
39static u32 read_clk(struct gt215_clk_priv *, int, bool); 39static u32 read_clk(struct gt215_clk *, int, bool);
40static u32 read_pll(struct gt215_clk_priv *, int, u32); 40static u32 read_pll(struct gt215_clk *, int, u32);
41 41
42static u32 42static u32
43read_vco(struct gt215_clk_priv *priv, int clk) 43read_vco(struct gt215_clk *clk, int idx)
44{ 44{
45 u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4)); 45 struct nvkm_device *device = clk->base.subdev.device;
46 u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
46 47
47 switch (sctl & 0x00000030) { 48 switch (sctl & 0x00000030) {
48 case 0x00000000: 49 case 0x00000000:
49 return nv_device(priv)->crystal; 50 return device->crystal;
50 case 0x00000020: 51 case 0x00000020:
51 return read_pll(priv, 0x41, 0x00e820); 52 return read_pll(clk, 0x41, 0x00e820);
52 case 0x00000030: 53 case 0x00000030:
53 return read_pll(priv, 0x42, 0x00e8a0); 54 return read_pll(clk, 0x42, 0x00e8a0);
54 default: 55 default:
55 return 0; 56 return 0;
56 } 57 }
57} 58}
58 59
59static u32 60static u32
60read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en) 61read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
61{ 62{
63 struct nvkm_device *device = clk->base.subdev.device;
62 u32 sctl, sdiv, sclk; 64 u32 sctl, sdiv, sclk;
63 65
64 /* refclk for the 0xe8xx plls is a fixed frequency */ 66 /* refclk for the 0xe8xx plls is a fixed frequency */
65 if (clk >= 0x40) { 67 if (idx >= 0x40) {
66 if (nv_device(priv)->chipset == 0xaf) { 68 if (device->chipset == 0xaf) {
67 /* no joke.. seriously.. sigh.. */ 69 /* no joke.. seriously.. sigh.. */
68 return nv_rd32(priv, 0x00471c) * 1000; 70 return nvkm_rd32(device, 0x00471c) * 1000;
69 } 71 }
70 72
71 return nv_device(priv)->crystal; 73 return device->crystal;
72 } 74 }
73 75
74 sctl = nv_rd32(priv, 0x4120 + (clk * 4)); 76 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
75 if (!ignore_en && !(sctl & 0x00000100)) 77 if (!ignore_en && !(sctl & 0x00000100))
76 return 0; 78 return 0;
77 79
@@ -83,7 +85,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
83 switch (sctl & 0x00003000) { 85 switch (sctl & 0x00003000) {
84 case 0x00000000: 86 case 0x00000000:
85 if (!(sctl & 0x00000200)) 87 if (!(sctl & 0x00000200))
86 return nv_device(priv)->crystal; 88 return device->crystal;
87 return 0; 89 return 0;
88 case 0x00002000: 90 case 0x00002000:
89 if (sctl & 0x00000040) 91 if (sctl & 0x00000040)
@@ -94,7 +96,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
94 if (!(sctl & 0x00000001)) 96 if (!(sctl & 0x00000001))
95 return 0; 97 return 0;
96 98
97 sclk = read_vco(priv, clk); 99 sclk = read_vco(clk, idx);
98 sdiv = ((sctl & 0x003f0000) >> 16) + 2; 100 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
99 return (sclk * 2) / sdiv; 101 return (sclk * 2) / sdiv;
100 default: 102 default:
@@ -103,14 +105,15 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
103} 105}
104 106
105static u32 107static u32
106read_pll(struct gt215_clk_priv *priv, int clk, u32 pll) 108read_pll(struct gt215_clk *clk, int idx, u32 pll)
107{ 109{
108 u32 ctrl = nv_rd32(priv, pll + 0); 110 struct nvkm_device *device = clk->base.subdev.device;
111 u32 ctrl = nvkm_rd32(device, pll + 0);
109 u32 sclk = 0, P = 1, N = 1, M = 1; 112 u32 sclk = 0, P = 1, N = 1, M = 1;
110 113
111 if (!(ctrl & 0x00000008)) { 114 if (!(ctrl & 0x00000008)) {
112 if (ctrl & 0x00000001) { 115 if (ctrl & 0x00000001) {
113 u32 coef = nv_rd32(priv, pll + 4); 116 u32 coef = nvkm_rd32(device, pll + 4);
114 M = (coef & 0x000000ff) >> 0; 117 M = (coef & 0x000000ff) >> 0;
115 N = (coef & 0x0000ff00) >> 8; 118 N = (coef & 0x0000ff00) >> 8;
116 P = (coef & 0x003f0000) >> 16; 119 P = (coef & 0x003f0000) >> 16;
@@ -121,10 +124,10 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
121 if ((pll & 0x00ff00) == 0x00e800) 124 if ((pll & 0x00ff00) == 0x00e800)
122 P = 1; 125 P = 1;
123 126
124 sclk = read_clk(priv, 0x00 + clk, false); 127 sclk = read_clk(clk, 0x00 + idx, false);
125 } 128 }
126 } else { 129 } else {
127 sclk = read_clk(priv, 0x10 + clk, false); 130 sclk = read_clk(clk, 0x10 + idx, false);
128 } 131 }
129 132
130 if (M * P) 133 if (M * P)
@@ -134,41 +137,43 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
134} 137}
135 138
136static int 139static int
137gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) 140gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
138{ 141{
139 struct gt215_clk_priv *priv = (void *)clk; 142 struct gt215_clk *clk = gt215_clk(base);
143 struct nvkm_subdev *subdev = &clk->base.subdev;
144 struct nvkm_device *device = subdev->device;
140 u32 hsrc; 145 u32 hsrc;
141 146
142 switch (src) { 147 switch (src) {
143 case nv_clk_src_crystal: 148 case nv_clk_src_crystal:
144 return nv_device(priv)->crystal; 149 return device->crystal;
145 case nv_clk_src_core: 150 case nv_clk_src_core:
146 case nv_clk_src_core_intm: 151 case nv_clk_src_core_intm:
147 return read_pll(priv, 0x00, 0x4200); 152 return read_pll(clk, 0x00, 0x4200);
148 case nv_clk_src_shader: 153 case nv_clk_src_shader:
149 return read_pll(priv, 0x01, 0x4220); 154 return read_pll(clk, 0x01, 0x4220);
150 case nv_clk_src_mem: 155 case nv_clk_src_mem:
151 return read_pll(priv, 0x02, 0x4000); 156 return read_pll(clk, 0x02, 0x4000);
152 case nv_clk_src_disp: 157 case nv_clk_src_disp:
153 return read_clk(priv, 0x20, false); 158 return read_clk(clk, 0x20, false);
154 case nv_clk_src_vdec: 159 case nv_clk_src_vdec:
155 return read_clk(priv, 0x21, false); 160 return read_clk(clk, 0x21, false);
156 case nv_clk_src_daemon: 161 case nv_clk_src_daemon:
157 return read_clk(priv, 0x25, false); 162 return read_clk(clk, 0x25, false);
158 case nv_clk_src_host: 163 case nv_clk_src_host:
159 hsrc = (nv_rd32(priv, 0xc040) & 0x30000000) >> 28; 164 hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
160 switch (hsrc) { 165 switch (hsrc) {
161 case 0: 166 case 0:
162 return read_clk(priv, 0x1d, false); 167 return read_clk(clk, 0x1d, false);
163 case 2: 168 case 2:
164 case 3: 169 case 3:
165 return 277000; 170 return 277000;
166 default: 171 default:
167 nv_error(clk, "unknown HOST clock source %d\n", hsrc); 172 nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
168 return -EINVAL; 173 return -EINVAL;
169 } 174 }
170 default: 175 default:
171 nv_error(clk, "invalid clock source %d\n", src); 176 nvkm_error(subdev, "invalid clock source %d\n", src);
172 return -EINVAL; 177 return -EINVAL;
173 } 178 }
174 179
@@ -176,10 +181,10 @@ gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
176} 181}
177 182
178int 183int
179gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz, 184gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
180 struct gt215_clk_info *info) 185 struct gt215_clk_info *info)
181{ 186{
182 struct gt215_clk_priv *priv = (void *)clock; 187 struct gt215_clk *clk = gt215_clk(base);
183 u32 oclk, sclk, sdiv; 188 u32 oclk, sclk, sdiv;
184 s32 diff; 189 s32 diff;
185 190
@@ -196,7 +201,7 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
196 info->clk = 0x00002140; 201 info->clk = 0x00002140;
197 return khz; 202 return khz;
198 default: 203 default:
199 sclk = read_vco(priv, clk); 204 sclk = read_vco(clk, idx);
200 sdiv = min((sclk * 2) / khz, (u32)65); 205 sdiv = min((sclk * 2) / khz, (u32)65);
201 oclk = (sclk * 2) / sdiv; 206 oclk = (sclk * 2) / sdiv;
202 diff = ((khz + 3000) - oclk); 207 diff = ((khz + 3000) - oclk);
@@ -224,11 +229,11 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
224} 229}
225 230
226int 231int
227gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz, 232gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
228 struct gt215_clk_info *info) 233 struct gt215_clk_info *info)
229{ 234{
230 struct nvkm_bios *bios = nvkm_bios(clock); 235 struct gt215_clk *clk = gt215_clk(base);
231 struct gt215_clk_priv *priv = (void *)clock; 236 struct nvkm_subdev *subdev = &clk->base.subdev;
232 struct nvbios_pll limits; 237 struct nvbios_pll limits;
233 int P, N, M, diff; 238 int P, N, M, diff;
234 int ret; 239 int ret;
@@ -237,22 +242,22 @@ gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
237 242
238 /* If we can get a within [-2, 3) MHz of a divider, we'll disable the 243 /* If we can get a within [-2, 3) MHz of a divider, we'll disable the
239 * PLL and use the divider instead. */ 244 * PLL and use the divider instead. */
240 ret = gt215_clk_info(clock, clk, khz, info); 245 ret = gt215_clk_info(&clk->base, idx, khz, info);
241 diff = khz - ret; 246 diff = khz - ret;
242 if (!pll || (diff >= -2000 && diff < 3000)) { 247 if (!pll || (diff >= -2000 && diff < 3000)) {
243 goto out; 248 goto out;
244 } 249 }
245 250
246 /* Try with PLL */ 251 /* Try with PLL */
247 ret = nvbios_pll_parse(bios, pll, &limits); 252 ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
248 if (ret) 253 if (ret)
249 return ret; 254 return ret;
250 255
251 ret = gt215_clk_info(clock, clk - 0x10, limits.refclk, info); 256 ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
252 if (ret != limits.refclk) 257 if (ret != limits.refclk)
253 return -EINVAL; 258 return -EINVAL;
254 259
255 ret = gt215_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P); 260 ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
256 if (ret >= 0) { 261 if (ret >= 0) {
257 info->pll = (P << 16) | (N << 8) | M; 262 info->pll = (P << 16) | (N << 8) | M;
258 } 263 }
@@ -263,22 +268,22 @@ out:
263} 268}
264 269
265static int 270static int
266calc_clk(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate, 271calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
267 int clk, u32 pll, int idx) 272 int idx, u32 pll, int dom)
268{ 273{
269 int ret = gt215_pll_info(&priv->base, clk, pll, cstate->domain[idx], 274 int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
270 &priv->eng[idx]); 275 &clk->eng[dom]);
271 if (ret >= 0) 276 if (ret >= 0)
272 return 0; 277 return 0;
273 return ret; 278 return ret;
274} 279}
275 280
276static int 281static int
277calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate) 282calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
278{ 283{
279 int ret = 0; 284 int ret = 0;
280 u32 kHz = cstate->domain[nv_clk_src_host]; 285 u32 kHz = cstate->domain[nv_clk_src_host];
281 struct gt215_clk_info *info = &priv->eng[nv_clk_src_host]; 286 struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
282 287
283 if (kHz == 277000) { 288 if (kHz == 277000) {
284 info->clk = 0; 289 info->clk = 0;
@@ -288,7 +293,7 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
288 293
289 info->host_out = NVA3_HOST_CLK; 294 info->host_out = NVA3_HOST_CLK;
290 295
291 ret = gt215_clk_info(&priv->base, 0x1d, kHz, info); 296 ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
292 if (ret >= 0) 297 if (ret >= 0)
293 return 0; 298 return 0;
294 299
@@ -298,21 +303,33 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
298int 303int
299gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags) 304gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
300{ 305{
301 struct nvkm_fifo *pfifo = nvkm_fifo(clk); 306 struct nvkm_device *device = clk->subdev.device;
307 struct nvkm_fifo *fifo = device->fifo;
302 308
303 /* halt and idle execution engines */ 309 /* halt and idle execution engines */
304 nv_mask(clk, 0x020060, 0x00070000, 0x00000000); 310 nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
305 nv_mask(clk, 0x002504, 0x00000001, 0x00000001); 311 nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
306 /* Wait until the interrupt handler is finished */ 312 /* Wait until the interrupt handler is finished */
307 if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000)) 313 if (nvkm_msec(device, 2000,
314 if (!nvkm_rd32(device, 0x000100))
315 break;
316 ) < 0)
308 return -EBUSY; 317 return -EBUSY;
309 318
310 if (pfifo) 319 if (fifo)
311 pfifo->pause(pfifo, flags); 320 nvkm_fifo_pause(fifo, flags);
312 321
313 if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010)) 322 if (nvkm_msec(device, 2000,
323 if (nvkm_rd32(device, 0x002504) & 0x00000010)
324 break;
325 ) < 0)
314 return -EIO; 326 return -EIO;
315 if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f)) 327
328 if (nvkm_msec(device, 2000,
329 u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f;
330 if (tmp == 0x0000003f)
331 break;
332 ) < 0)
316 return -EIO; 333 return -EIO;
317 334
318 return 0; 335 return 0;
@@ -321,86 +338,94 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
321void 338void
322gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags) 339gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
323{ 340{
324 struct nvkm_fifo *pfifo = nvkm_fifo(clk); 341 struct nvkm_device *device = clk->subdev.device;
342 struct nvkm_fifo *fifo = device->fifo;
325 343
326 if (pfifo && flags) 344 if (fifo && flags)
327 pfifo->start(pfifo, flags); 345 nvkm_fifo_start(fifo, flags);
328 346
329 nv_mask(clk, 0x002504, 0x00000001, 0x00000000); 347 nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
330 nv_mask(clk, 0x020060, 0x00070000, 0x00040000); 348 nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
331} 349}
332 350
333static void 351static void
334disable_clk_src(struct gt215_clk_priv *priv, u32 src) 352disable_clk_src(struct gt215_clk *clk, u32 src)
335{ 353{
336 nv_mask(priv, src, 0x00000100, 0x00000000); 354 struct nvkm_device *device = clk->base.subdev.device;
337 nv_mask(priv, src, 0x00000001, 0x00000000); 355 nvkm_mask(device, src, 0x00000100, 0x00000000);
356 nvkm_mask(device, src, 0x00000001, 0x00000000);
338} 357}
339 358
340static void 359static void
341prog_pll(struct gt215_clk_priv *priv, int clk, u32 pll, int idx) 360prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
342{ 361{
343 struct gt215_clk_info *info = &priv->eng[idx]; 362 struct gt215_clk_info *info = &clk->eng[dom];
344 const u32 src0 = 0x004120 + (clk * 4); 363 struct nvkm_device *device = clk->base.subdev.device;
345 const u32 src1 = 0x004160 + (clk * 4); 364 const u32 src0 = 0x004120 + (idx * 4);
365 const u32 src1 = 0x004160 + (idx * 4);
346 const u32 ctrl = pll + 0; 366 const u32 ctrl = pll + 0;
347 const u32 coef = pll + 4; 367 const u32 coef = pll + 4;
348 u32 bypass; 368 u32 bypass;
349 369
350 if (info->pll) { 370 if (info->pll) {
351 /* Always start from a non-PLL clock */ 371 /* Always start from a non-PLL clock */
352 bypass = nv_rd32(priv, ctrl) & 0x00000008; 372 bypass = nvkm_rd32(device, ctrl) & 0x00000008;
353 if (!bypass) { 373 if (!bypass) {
354 nv_mask(priv, src1, 0x00000101, 0x00000101); 374 nvkm_mask(device, src1, 0x00000101, 0x00000101);
355 nv_mask(priv, ctrl, 0x00000008, 0x00000008); 375 nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
356 udelay(20); 376 udelay(20);
357 } 377 }
358 378
359 nv_mask(priv, src0, 0x003f3141, 0x00000101 | info->clk); 379 nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
360 nv_wr32(priv, coef, info->pll); 380 nvkm_wr32(device, coef, info->pll);
361 nv_mask(priv, ctrl, 0x00000015, 0x00000015); 381 nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
362 nv_mask(priv, ctrl, 0x00000010, 0x00000000); 382 nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
363 if (!nv_wait(priv, ctrl, 0x00020000, 0x00020000)) { 383 if (nvkm_msec(device, 2000,
364 nv_mask(priv, ctrl, 0x00000010, 0x00000010); 384 if (nvkm_rd32(device, ctrl) & 0x00020000)
365 nv_mask(priv, src0, 0x00000101, 0x00000000); 385 break;
386 ) < 0) {
387 nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
388 nvkm_mask(device, src0, 0x00000101, 0x00000000);
366 return; 389 return;
367 } 390 }
368 nv_mask(priv, ctrl, 0x00000010, 0x00000010); 391 nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
369 nv_mask(priv, ctrl, 0x00000008, 0x00000000); 392 nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
370 disable_clk_src(priv, src1); 393 disable_clk_src(clk, src1);
371 } else { 394 } else {
372 nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk); 395 nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
373 nv_mask(priv, ctrl, 0x00000018, 0x00000018); 396 nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
374 udelay(20); 397 udelay(20);
375 nv_mask(priv, ctrl, 0x00000001, 0x00000000); 398 nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
376 disable_clk_src(priv, src0); 399 disable_clk_src(clk, src0);
377 } 400 }
378} 401}
379 402
380static void 403static void
381prog_clk(struct gt215_clk_priv *priv, int clk, int idx) 404prog_clk(struct gt215_clk *clk, int idx, int dom)
382{ 405{
383 struct gt215_clk_info *info = &priv->eng[idx]; 406 struct gt215_clk_info *info = &clk->eng[dom];
384 nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk); 407 struct nvkm_device *device = clk->base.subdev.device;
408 nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
385} 409}
386 410
387static void 411static void
388prog_host(struct gt215_clk_priv *priv) 412prog_host(struct gt215_clk *clk)
389{ 413{
390 struct gt215_clk_info *info = &priv->eng[nv_clk_src_host]; 414 struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
391 u32 hsrc = (nv_rd32(priv, 0xc040)); 415 struct nvkm_device *device = clk->base.subdev.device;
416 u32 hsrc = (nvkm_rd32(device, 0xc040));
392 417
393 switch (info->host_out) { 418 switch (info->host_out) {
394 case NVA3_HOST_277: 419 case NVA3_HOST_277:
395 if ((hsrc & 0x30000000) == 0) { 420 if ((hsrc & 0x30000000) == 0) {
396 nv_wr32(priv, 0xc040, hsrc | 0x20000000); 421 nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
397 disable_clk_src(priv, 0x4194); 422 disable_clk_src(clk, 0x4194);
398 } 423 }
399 break; 424 break;
400 case NVA3_HOST_CLK: 425 case NVA3_HOST_CLK:
401 prog_clk(priv, 0x1d, nv_clk_src_host); 426 prog_clk(clk, 0x1d, nv_clk_src_host);
402 if ((hsrc & 0x30000000) >= 0x20000000) { 427 if ((hsrc & 0x30000000) >= 0x20000000) {
403 nv_wr32(priv, 0xc040, hsrc & ~0x30000000); 428 nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
404 } 429 }
405 break; 430 break;
406 default: 431 default:
@@ -408,44 +433,45 @@ prog_host(struct gt215_clk_priv *priv)
408 } 433 }
409 434
410 /* This seems to be a clock gating factor on idle, always set to 64 */ 435 /* This seems to be a clock gating factor on idle, always set to 64 */
411 nv_wr32(priv, 0xc044, 0x3e); 436 nvkm_wr32(device, 0xc044, 0x3e);
412} 437}
413 438
414static void 439static void
415prog_core(struct gt215_clk_priv *priv, int idx) 440prog_core(struct gt215_clk *clk, int dom)
416{ 441{
417 struct gt215_clk_info *info = &priv->eng[idx]; 442 struct gt215_clk_info *info = &clk->eng[dom];
418 u32 fb_delay = nv_rd32(priv, 0x10002c); 443 struct nvkm_device *device = clk->base.subdev.device;
444 u32 fb_delay = nvkm_rd32(device, 0x10002c);
419 445
420 if (fb_delay < info->fb_delay) 446 if (fb_delay < info->fb_delay)
421 nv_wr32(priv, 0x10002c, info->fb_delay); 447 nvkm_wr32(device, 0x10002c, info->fb_delay);
422 448
423 prog_pll(priv, 0x00, 0x004200, idx); 449 prog_pll(clk, 0x00, 0x004200, dom);
424 450
425 if (fb_delay > info->fb_delay) 451 if (fb_delay > info->fb_delay)
426 nv_wr32(priv, 0x10002c, info->fb_delay); 452 nvkm_wr32(device, 0x10002c, info->fb_delay);
427} 453}
428 454
429static int 455static int
430gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate) 456gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
431{ 457{
432 struct gt215_clk_priv *priv = (void *)clk; 458 struct gt215_clk *clk = gt215_clk(base);
433 struct gt215_clk_info *core = &priv->eng[nv_clk_src_core]; 459 struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
434 int ret; 460 int ret;
435 461
436 if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) || 462 if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
437 (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) || 463 (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
438 (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) || 464 (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
439 (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)) || 465 (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
440 (ret = calc_host(priv, cstate))) 466 (ret = calc_host(clk, cstate)))
441 return ret; 467 return ret;
442 468
443 /* XXX: Should be reading the highest bit in the VBIOS clock to decide 469 /* XXX: Should be reading the highest bit in the VBIOS clock to decide
444 * whether to use a PLL or not... but using a PLL defeats the purpose */ 470 * whether to use a PLL or not... but using a PLL defeats the purpose */
445 if (core->pll) { 471 if (core->pll) {
446 ret = gt215_clk_info(clk, 0x10, 472 ret = gt215_clk_info(&clk->base, 0x10,
447 cstate->domain[nv_clk_src_core_intm], 473 cstate->domain[nv_clk_src_core_intm],
448 &priv->eng[nv_clk_src_core_intm]); 474 &clk->eng[nv_clk_src_core_intm]);
449 if (ret < 0) 475 if (ret < 0)
450 return ret; 476 return ret;
451 } 477 }
@@ -454,81 +480,67 @@ gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
454} 480}
455 481
456static int 482static int
457gt215_clk_prog(struct nvkm_clk *clk) 483gt215_clk_prog(struct nvkm_clk *base)
458{ 484{
459 struct gt215_clk_priv *priv = (void *)clk; 485 struct gt215_clk *clk = gt215_clk(base);
460 struct gt215_clk_info *core = &priv->eng[nv_clk_src_core]; 486 struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
461 int ret = 0; 487 int ret = 0;
462 unsigned long flags; 488 unsigned long flags;
463 unsigned long *f = &flags; 489 unsigned long *f = &flags;
464 490
465 ret = gt215_clk_pre(clk, f); 491 ret = gt215_clk_pre(&clk->base, f);
466 if (ret) 492 if (ret)
467 goto out; 493 goto out;
468 494
469 if (core->pll) 495 if (core->pll)
470 prog_core(priv, nv_clk_src_core_intm); 496 prog_core(clk, nv_clk_src_core_intm);
471 497
472 prog_core(priv, nv_clk_src_core); 498 prog_core(clk, nv_clk_src_core);
473 prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader); 499 prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
474 prog_clk(priv, 0x20, nv_clk_src_disp); 500 prog_clk(clk, 0x20, nv_clk_src_disp);
475 prog_clk(priv, 0x21, nv_clk_src_vdec); 501 prog_clk(clk, 0x21, nv_clk_src_vdec);
476 prog_host(priv); 502 prog_host(clk);
477 503
478out: 504out:
479 if (ret == -EBUSY) 505 if (ret == -EBUSY)
480 f = NULL; 506 f = NULL;
481 507
482 gt215_clk_post(clk, f); 508 gt215_clk_post(&clk->base, f);
483 return ret; 509 return ret;
484} 510}
485 511
486static void 512static void
487gt215_clk_tidy(struct nvkm_clk *clk) 513gt215_clk_tidy(struct nvkm_clk *base)
488{ 514{
489} 515}
490 516
491static struct nvkm_domain 517static const struct nvkm_clk_func
492gt215_domain[] = { 518gt215_clk = {
493 { nv_clk_src_crystal , 0xff }, 519 .read = gt215_clk_read,
494 { nv_clk_src_core , 0x00, 0, "core", 1000 }, 520 .calc = gt215_clk_calc,
495 { nv_clk_src_shader , 0x01, 0, "shader", 1000 }, 521 .prog = gt215_clk_prog,
496 { nv_clk_src_mem , 0x02, 0, "memory", 1000 }, 522 .tidy = gt215_clk_tidy,
497 { nv_clk_src_vdec , 0x03 }, 523 .domains = {
498 { nv_clk_src_disp , 0x04 }, 524 { nv_clk_src_crystal , 0xff },
499 { nv_clk_src_host , 0x05 }, 525 { nv_clk_src_core , 0x00, 0, "core", 1000 },
500 { nv_clk_src_core_intm, 0x06 }, 526 { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
501 { nv_clk_src_max } 527 { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
528 { nv_clk_src_vdec , 0x03 },
529 { nv_clk_src_disp , 0x04 },
530 { nv_clk_src_host , 0x05 },
531 { nv_clk_src_core_intm, 0x06 },
532 { nv_clk_src_max }
533 }
502}; 534};
503 535
504static int 536int
505gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 537gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
506 struct nvkm_oclass *oclass, void *data, u32 size,
507 struct nvkm_object **pobject)
508{ 538{
509 struct gt215_clk_priv *priv; 539 struct gt215_clk *clk;
510 int ret;
511 540
512 ret = nvkm_clk_create(parent, engine, oclass, gt215_domain, 541 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
513 NULL, 0, true, &priv); 542 return -ENOMEM;
514 *pobject = nv_object(priv); 543 *pclk = &clk->base;
515 if (ret)
516 return ret;
517 544
518 priv->base.read = gt215_clk_read; 545 return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
519 priv->base.calc = gt215_clk_calc;
520 priv->base.prog = gt215_clk_prog;
521 priv->base.tidy = gt215_clk_tidy;
522 return 0;
523} 546}
524
525struct nvkm_oclass
526gt215_clk_oclass = {
527 .handle = NV_SUBDEV(CLK, 0xa3),
528 .ofuncs = &(struct nvkm_ofuncs) {
529 .ctor = gt215_clk_ctor,
530 .dtor = _nvkm_clk_dtor,
531 .init = _nvkm_clk_init,
532 .fini = _nvkm_clk_fini,
533 },
534};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index b447d9cd4d37..8865b59fe575 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,6 +1,6 @@
1#ifndef __NVKM_CLK_NVA3_H__ 1#ifndef __NVKM_CLK_NVA3_H__
2#define __NVKM_CLK_NVA3_H__ 2#define __NVKM_CLK_NVA3_H__
3#include <subdev/clk.h> 3#include "priv.h"
4 4
5struct gt215_clk_info { 5struct gt215_clk_info {
6 u32 clk; 6 u32 clk;
@@ -13,6 +13,6 @@ struct gt215_clk_info {
13}; 13};
14 14
15int gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *); 15int gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *);
16int gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags); 16int gt215_clk_pre(struct nvkm_clk *, unsigned long *flags);
17void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags); 17void gt215_clk_post(struct nvkm_clk *, unsigned long *flags);
18#endif 18#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index c54417b146c7..1c21b8b53b78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -21,15 +21,15 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
24#include "gt215.h" 25#include "gt215.h"
25#include "pll.h" 26#include "pll.h"
26 27
27#include <core/device.h>
28#include <subdev/bios.h> 28#include <subdev/bios.h>
29#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
30#include <subdev/timer.h> 30#include <subdev/timer.h>
31 31
32struct mcp77_clk_priv { 32struct mcp77_clk {
33 struct nvkm_clk base; 33 struct nvkm_clk base;
34 enum nv_clk_src csrc, ssrc, vsrc; 34 enum nv_clk_src csrc, ssrc, vsrc;
35 u32 cctrl, sctrl; 35 u32 cctrl, sctrl;
@@ -39,27 +39,29 @@ struct mcp77_clk_priv {
39}; 39};
40 40
41static u32 41static u32
42read_div(struct nvkm_clk *clk) 42read_div(struct mcp77_clk *clk)
43{ 43{
44 return nv_rd32(clk, 0x004600); 44 struct nvkm_device *device = clk->base.subdev.device;
45 return nvkm_rd32(device, 0x004600);
45} 46}
46 47
47static u32 48static u32
48read_pll(struct nvkm_clk *clk, u32 base) 49read_pll(struct mcp77_clk *clk, u32 base)
49{ 50{
50 u32 ctrl = nv_rd32(clk, base + 0); 51 struct nvkm_device *device = clk->base.subdev.device;
51 u32 coef = nv_rd32(clk, base + 4); 52 u32 ctrl = nvkm_rd32(device, base + 0);
52 u32 ref = clk->read(clk, nv_clk_src_href); 53 u32 coef = nvkm_rd32(device, base + 4);
54 u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
53 u32 post_div = 0; 55 u32 post_div = 0;
54 u32 clock = 0; 56 u32 clock = 0;
55 int N1, M1; 57 int N1, M1;
56 58
57 switch (base){ 59 switch (base){
58 case 0x4020: 60 case 0x4020:
59 post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16); 61 post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
60 break; 62 break;
61 case 0x4028: 63 case 0x4028:
62 post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16; 64 post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
63 break; 65 break;
64 default: 66 default:
65 break; 67 break;
@@ -76,59 +78,61 @@ read_pll(struct nvkm_clk *clk, u32 base)
76} 78}
77 79
78static int 80static int
79mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) 81mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
80{ 82{
81 struct mcp77_clk_priv *priv = (void *)clk; 83 struct mcp77_clk *clk = mcp77_clk(base);
82 u32 mast = nv_rd32(clk, 0x00c054); 84 struct nvkm_subdev *subdev = &clk->base.subdev;
85 struct nvkm_device *device = subdev->device;
86 u32 mast = nvkm_rd32(device, 0x00c054);
83 u32 P = 0; 87 u32 P = 0;
84 88
85 switch (src) { 89 switch (src) {
86 case nv_clk_src_crystal: 90 case nv_clk_src_crystal:
87 return nv_device(priv)->crystal; 91 return device->crystal;
88 case nv_clk_src_href: 92 case nv_clk_src_href:
89 return 100000; /* PCIE reference clock */ 93 return 100000; /* PCIE reference clock */
90 case nv_clk_src_hclkm4: 94 case nv_clk_src_hclkm4:
91 return clk->read(clk, nv_clk_src_href) * 4; 95 return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
92 case nv_clk_src_hclkm2d3: 96 case nv_clk_src_hclkm2d3:
93 return clk->read(clk, nv_clk_src_href) * 2 / 3; 97 return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
94 case nv_clk_src_host: 98 case nv_clk_src_host:
95 switch (mast & 0x000c0000) { 99 switch (mast & 0x000c0000) {
96 case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3); 100 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
97 case 0x00040000: break; 101 case 0x00040000: break;
98 case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4); 102 case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
99 case 0x000c0000: return clk->read(clk, nv_clk_src_cclk); 103 case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
100 } 104 }
101 break; 105 break;
102 case nv_clk_src_core: 106 case nv_clk_src_core:
103 P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16; 107 P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
104 108
105 switch (mast & 0x00000003) { 109 switch (mast & 0x00000003) {
106 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P; 110 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
107 case 0x00000001: return 0; 111 case 0x00000001: return 0;
108 case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P; 112 case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
109 case 0x00000003: return read_pll(clk, 0x004028) >> P; 113 case 0x00000003: return read_pll(clk, 0x004028) >> P;
110 } 114 }
111 break; 115 break;
112 case nv_clk_src_cclk: 116 case nv_clk_src_cclk:
113 if ((mast & 0x03000000) != 0x03000000) 117 if ((mast & 0x03000000) != 0x03000000)
114 return clk->read(clk, nv_clk_src_core); 118 return nvkm_clk_read(&clk->base, nv_clk_src_core);
115 119
116 if ((mast & 0x00000200) == 0x00000000) 120 if ((mast & 0x00000200) == 0x00000000)
117 return clk->read(clk, nv_clk_src_core); 121 return nvkm_clk_read(&clk->base, nv_clk_src_core);
118 122
119 switch (mast & 0x00000c00) { 123 switch (mast & 0x00000c00) {
120 case 0x00000000: return clk->read(clk, nv_clk_src_href); 124 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
121 case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4); 125 case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
122 case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3); 126 case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
123 default: return 0; 127 default: return 0;
124 } 128 }
125 case nv_clk_src_shader: 129 case nv_clk_src_shader:
126 P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16; 130 P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
127 switch (mast & 0x00000030) { 131 switch (mast & 0x00000030) {
128 case 0x00000000: 132 case 0x00000000:
129 if (mast & 0x00000040) 133 if (mast & 0x00000040)
130 return clk->read(clk, nv_clk_src_href) >> P; 134 return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
131 return clk->read(clk, nv_clk_src_crystal) >> P; 135 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
132 case 0x00000010: break; 136 case 0x00000010: break;
133 case 0x00000020: return read_pll(clk, 0x004028) >> P; 137 case 0x00000020: return read_pll(clk, 0x004028) >> P;
134 case 0x00000030: return read_pll(clk, 0x004020) >> P; 138 case 0x00000030: return read_pll(clk, 0x004020) >> P;
@@ -142,7 +146,7 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
142 146
143 switch (mast & 0x00400000) { 147 switch (mast & 0x00400000) {
144 case 0x00400000: 148 case 0x00400000:
145 return clk->read(clk, nv_clk_src_core) >> P; 149 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
146 break; 150 break;
147 default: 151 default:
148 return 500000 >> P; 152 return 500000 >> P;
@@ -153,29 +157,28 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
153 break; 157 break;
154 } 158 }
155 159
156 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast); 160 nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
157 return 0; 161 return 0;
158} 162}
159 163
160static u32 164static u32
161calc_pll(struct mcp77_clk_priv *priv, u32 reg, 165calc_pll(struct mcp77_clk *clk, u32 reg,
162 u32 clock, int *N, int *M, int *P) 166 u32 clock, int *N, int *M, int *P)
163{ 167{
164 struct nvkm_bios *bios = nvkm_bios(priv); 168 struct nvkm_subdev *subdev = &clk->base.subdev;
165 struct nvbios_pll pll; 169 struct nvbios_pll pll;
166 struct nvkm_clk *clk = &priv->base;
167 int ret; 170 int ret;
168 171
169 ret = nvbios_pll_parse(bios, reg, &pll); 172 ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
170 if (ret) 173 if (ret)
171 return 0; 174 return 0;
172 175
173 pll.vco2.max_freq = 0; 176 pll.vco2.max_freq = 0;
174 pll.refclk = clk->read(clk, nv_clk_src_href); 177 pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
175 if (!pll.refclk) 178 if (!pll.refclk)
176 return 0; 179 return 0;
177 180
178 return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P); 181 return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
179} 182}
180 183
181static inline u32 184static inline u32
@@ -197,26 +200,27 @@ calc_P(u32 src, u32 target, int *div)
197} 200}
198 201
199static int 202static int
200mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate) 203mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
201{ 204{
202 struct mcp77_clk_priv *priv = (void *)clk; 205 struct mcp77_clk *clk = mcp77_clk(base);
203 const int shader = cstate->domain[nv_clk_src_shader]; 206 const int shader = cstate->domain[nv_clk_src_shader];
204 const int core = cstate->domain[nv_clk_src_core]; 207 const int core = cstate->domain[nv_clk_src_core];
205 const int vdec = cstate->domain[nv_clk_src_vdec]; 208 const int vdec = cstate->domain[nv_clk_src_vdec];
209 struct nvkm_subdev *subdev = &clk->base.subdev;
206 u32 out = 0, clock = 0; 210 u32 out = 0, clock = 0;
207 int N, M, P1, P2 = 0; 211 int N, M, P1, P2 = 0;
208 int divs = 0; 212 int divs = 0;
209 213
210 /* cclk: find suitable source, disable PLL if we can */ 214 /* cclk: find suitable source, disable PLL if we can */
211 if (core < clk->read(clk, nv_clk_src_hclkm4)) 215 if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
212 out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs); 216 out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
213 217
214 /* Calculate clock * 2, so shader clock can use it too */ 218 /* Calculate clock * 2, so shader clock can use it too */
215 clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1); 219 clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
216 220
217 if (abs(core - out) <= abs(core - (clock >> 1))) { 221 if (abs(core - out) <= abs(core - (clock >> 1))) {
218 priv->csrc = nv_clk_src_hclkm4; 222 clk->csrc = nv_clk_src_hclkm4;
219 priv->cctrl = divs << 16; 223 clk->cctrl = divs << 16;
220 } else { 224 } else {
221 /* NVCTRL is actually used _after_ NVPOST, and after what we 225 /* NVCTRL is actually used _after_ NVPOST, and after what we
222 * call NVPLL. To make matters worse, NVPOST is an integer 226 * call NVPLL. To make matters worse, NVPOST is an integer
@@ -226,31 +230,31 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
226 P1 = 2; 230 P1 = 2;
227 } 231 }
228 232
229 priv->csrc = nv_clk_src_core; 233 clk->csrc = nv_clk_src_core;
230 priv->ccoef = (N << 8) | M; 234 clk->ccoef = (N << 8) | M;
231 235
232 priv->cctrl = (P2 + 1) << 16; 236 clk->cctrl = (P2 + 1) << 16;
233 priv->cpost = (1 << P1) << 16; 237 clk->cpost = (1 << P1) << 16;
234 } 238 }
235 239
236 /* sclk: nvpll + divisor, href or spll */ 240 /* sclk: nvpll + divisor, href or spll */
237 out = 0; 241 out = 0;
238 if (shader == clk->read(clk, nv_clk_src_href)) { 242 if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
239 priv->ssrc = nv_clk_src_href; 243 clk->ssrc = nv_clk_src_href;
240 } else { 244 } else {
241 clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1); 245 clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
242 if (priv->csrc == nv_clk_src_core) 246 if (clk->csrc == nv_clk_src_core)
243 out = calc_P((core << 1), shader, &divs); 247 out = calc_P((core << 1), shader, &divs);
244 248
245 if (abs(shader - out) <= 249 if (abs(shader - out) <=
246 abs(shader - clock) && 250 abs(shader - clock) &&
247 (divs + P2) <= 7) { 251 (divs + P2) <= 7) {
248 priv->ssrc = nv_clk_src_core; 252 clk->ssrc = nv_clk_src_core;
249 priv->sctrl = (divs + P2) << 16; 253 clk->sctrl = (divs + P2) << 16;
250 } else { 254 } else {
251 priv->ssrc = nv_clk_src_shader; 255 clk->ssrc = nv_clk_src_shader;
252 priv->scoef = (N << 8) | M; 256 clk->scoef = (N << 8) | M;
253 priv->sctrl = P1 << 16; 257 clk->sctrl = P1 << 16;
254 } 258 }
255 } 259 }
256 260
@@ -258,172 +262,162 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
258 out = calc_P(core, vdec, &divs); 262 out = calc_P(core, vdec, &divs);
259 clock = calc_P(500000, vdec, &P1); 263 clock = calc_P(500000, vdec, &P1);
260 if(abs(vdec - out) <= abs(vdec - clock)) { 264 if(abs(vdec - out) <= abs(vdec - clock)) {
261 priv->vsrc = nv_clk_src_cclk; 265 clk->vsrc = nv_clk_src_cclk;
262 priv->vdiv = divs << 16; 266 clk->vdiv = divs << 16;
263 } else { 267 } else {
264 priv->vsrc = nv_clk_src_vdec; 268 clk->vsrc = nv_clk_src_vdec;
265 priv->vdiv = P1 << 16; 269 clk->vdiv = P1 << 16;
266 } 270 }
267 271
268 /* Print strategy! */ 272 /* Print strategy! */
269 nv_debug(priv, "nvpll: %08x %08x %08x\n", 273 nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
270 priv->ccoef, priv->cpost, priv->cctrl); 274 clk->ccoef, clk->cpost, clk->cctrl);
271 nv_debug(priv, " spll: %08x %08x %08x\n", 275 nvkm_debug(subdev, " spll: %08x %08x %08x\n",
272 priv->scoef, priv->spost, priv->sctrl); 276 clk->scoef, clk->spost, clk->sctrl);
273 nv_debug(priv, " vdiv: %08x\n", priv->vdiv); 277 nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
274 if (priv->csrc == nv_clk_src_hclkm4) 278 if (clk->csrc == nv_clk_src_hclkm4)
275 nv_debug(priv, "core: hrefm4\n"); 279 nvkm_debug(subdev, "core: hrefm4\n");
276 else 280 else
277 nv_debug(priv, "core: nvpll\n"); 281 nvkm_debug(subdev, "core: nvpll\n");
278 282
279 if (priv->ssrc == nv_clk_src_hclkm4) 283 if (clk->ssrc == nv_clk_src_hclkm4)
280 nv_debug(priv, "shader: hrefm4\n"); 284 nvkm_debug(subdev, "shader: hrefm4\n");
281 else if (priv->ssrc == nv_clk_src_core) 285 else if (clk->ssrc == nv_clk_src_core)
282 nv_debug(priv, "shader: nvpll\n"); 286 nvkm_debug(subdev, "shader: nvpll\n");
283 else 287 else
284 nv_debug(priv, "shader: spll\n"); 288 nvkm_debug(subdev, "shader: spll\n");
285 289
286 if (priv->vsrc == nv_clk_src_hclkm4) 290 if (clk->vsrc == nv_clk_src_hclkm4)
287 nv_debug(priv, "vdec: 500MHz\n"); 291 nvkm_debug(subdev, "vdec: 500MHz\n");
288 else 292 else
289 nv_debug(priv, "vdec: core\n"); 293 nvkm_debug(subdev, "vdec: core\n");
290 294
291 return 0; 295 return 0;
292} 296}
293 297
294static int 298static int
295mcp77_clk_prog(struct nvkm_clk *clk) 299mcp77_clk_prog(struct nvkm_clk *base)
296{ 300{
297 struct mcp77_clk_priv *priv = (void *)clk; 301 struct mcp77_clk *clk = mcp77_clk(base);
302 struct nvkm_subdev *subdev = &clk->base.subdev;
303 struct nvkm_device *device = subdev->device;
298 u32 pllmask = 0, mast; 304 u32 pllmask = 0, mast;
299 unsigned long flags; 305 unsigned long flags;
300 unsigned long *f = &flags; 306 unsigned long *f = &flags;
301 int ret = 0; 307 int ret = 0;
302 308
303 ret = gt215_clk_pre(clk, f); 309 ret = gt215_clk_pre(&clk->base, f);
304 if (ret) 310 if (ret)
305 goto out; 311 goto out;
306 312
307 /* First switch to safe clocks: href */ 313 /* First switch to safe clocks: href */
308 mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640); 314 mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
309 mast &= ~0x00400e73; 315 mast &= ~0x00400e73;
310 mast |= 0x03000000; 316 mast |= 0x03000000;
311 317
312 switch (priv->csrc) { 318 switch (clk->csrc) {
313 case nv_clk_src_hclkm4: 319 case nv_clk_src_hclkm4:
314 nv_mask(clk, 0x4028, 0x00070000, priv->cctrl); 320 nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
315 mast |= 0x00000002; 321 mast |= 0x00000002;
316 break; 322 break;
317 case nv_clk_src_core: 323 case nv_clk_src_core:
318 nv_wr32(clk, 0x402c, priv->ccoef); 324 nvkm_wr32(device, 0x402c, clk->ccoef);
319 nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl); 325 nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
320 nv_wr32(clk, 0x4040, priv->cpost); 326 nvkm_wr32(device, 0x4040, clk->cpost);
321 pllmask |= (0x3 << 8); 327 pllmask |= (0x3 << 8);
322 mast |= 0x00000003; 328 mast |= 0x00000003;
323 break; 329 break;
324 default: 330 default:
325 nv_warn(priv,"Reclocking failed: unknown core clock\n"); 331 nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
326 goto resume; 332 goto resume;
327 } 333 }
328 334
329 switch (priv->ssrc) { 335 switch (clk->ssrc) {
330 case nv_clk_src_href: 336 case nv_clk_src_href:
331 nv_mask(clk, 0x4020, 0x00070000, 0x00000000); 337 nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
332 /* mast |= 0x00000000; */ 338 /* mast |= 0x00000000; */
333 break; 339 break;
334 case nv_clk_src_core: 340 case nv_clk_src_core:
335 nv_mask(clk, 0x4020, 0x00070000, priv->sctrl); 341 nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
336 mast |= 0x00000020; 342 mast |= 0x00000020;
337 break; 343 break;
338 case nv_clk_src_shader: 344 case nv_clk_src_shader:
339 nv_wr32(clk, 0x4024, priv->scoef); 345 nvkm_wr32(device, 0x4024, clk->scoef);
340 nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl); 346 nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
341 nv_wr32(clk, 0x4070, priv->spost); 347 nvkm_wr32(device, 0x4070, clk->spost);
342 pllmask |= (0x3 << 12); 348 pllmask |= (0x3 << 12);
343 mast |= 0x00000030; 349 mast |= 0x00000030;
344 break; 350 break;
345 default: 351 default:
346 nv_warn(priv,"Reclocking failed: unknown sclk clock\n"); 352 nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
347 goto resume; 353 goto resume;
348 } 354 }
349 355
350 if (!nv_wait(clk, 0x004080, pllmask, pllmask)) { 356 if (nvkm_msec(device, 2000,
351 nv_warn(priv,"Reclocking failed: unstable PLLs\n"); 357 u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
358 if (tmp == pllmask)
359 break;
360 ) < 0)
352 goto resume; 361 goto resume;
353 }
354 362
355 switch (priv->vsrc) { 363 switch (clk->vsrc) {
356 case nv_clk_src_cclk: 364 case nv_clk_src_cclk:
357 mast |= 0x00400000; 365 mast |= 0x00400000;
358 default: 366 default:
359 nv_wr32(clk, 0x4600, priv->vdiv); 367 nvkm_wr32(device, 0x4600, clk->vdiv);
360 } 368 }
361 369
362 nv_wr32(clk, 0xc054, mast); 370 nvkm_wr32(device, 0xc054, mast);
363 371
364resume: 372resume:
365 /* Disable some PLLs and dividers when unused */ 373 /* Disable some PLLs and dividers when unused */
366 if (priv->csrc != nv_clk_src_core) { 374 if (clk->csrc != nv_clk_src_core) {
367 nv_wr32(clk, 0x4040, 0x00000000); 375 nvkm_wr32(device, 0x4040, 0x00000000);
368 nv_mask(clk, 0x4028, 0x80000000, 0x00000000); 376 nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
369 } 377 }
370 378
371 if (priv->ssrc != nv_clk_src_shader) { 379 if (clk->ssrc != nv_clk_src_shader) {
372 nv_wr32(clk, 0x4070, 0x00000000); 380 nvkm_wr32(device, 0x4070, 0x00000000);
373 nv_mask(clk, 0x4020, 0x80000000, 0x00000000); 381 nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
374 } 382 }
375 383
376out: 384out:
377 if (ret == -EBUSY) 385 if (ret == -EBUSY)
378 f = NULL; 386 f = NULL;
379 387
380 gt215_clk_post(clk, f); 388 gt215_clk_post(&clk->base, f);
381 return ret; 389 return ret;
382} 390}
383 391
384static void 392static void
385mcp77_clk_tidy(struct nvkm_clk *clk) 393mcp77_clk_tidy(struct nvkm_clk *base)
386{ 394{
387} 395}
388 396
389static struct nvkm_domain 397static const struct nvkm_clk_func
390mcp77_domains[] = { 398mcp77_clk = {
391 { nv_clk_src_crystal, 0xff }, 399 .read = mcp77_clk_read,
392 { nv_clk_src_href , 0xff }, 400 .calc = mcp77_clk_calc,
393 { nv_clk_src_core , 0xff, 0, "core", 1000 }, 401 .prog = mcp77_clk_prog,
394 { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, 402 .tidy = mcp77_clk_tidy,
395 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 }, 403 .domains = {
396 { nv_clk_src_max } 404 { nv_clk_src_crystal, 0xff },
405 { nv_clk_src_href , 0xff },
406 { nv_clk_src_core , 0xff, 0, "core", 1000 },
407 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
408 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
409 { nv_clk_src_max }
410 }
397}; 411};
398 412
399static int 413int
400mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 414mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
401 struct nvkm_oclass *oclass, void *data, u32 size,
402 struct nvkm_object **pobject)
403{ 415{
404 struct mcp77_clk_priv *priv; 416 struct mcp77_clk *clk;
405 int ret;
406 417
407 ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains, 418 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
408 NULL, 0, true, &priv); 419 return -ENOMEM;
409 *pobject = nv_object(priv); 420 *pclk = &clk->base;
410 if (ret)
411 return ret;
412 421
413 priv->base.read = mcp77_clk_read; 422 return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
414 priv->base.calc = mcp77_clk_calc;
415 priv->base.prog = mcp77_clk_prog;
416 priv->base.tidy = mcp77_clk_tidy;
417 return 0;
418} 423}
419
420struct nvkm_oclass *
421mcp77_clk_oclass = &(struct nvkm_oclass) {
422 .handle = NV_SUBDEV(CLK, 0xaa),
423 .ofuncs = &(struct nvkm_ofuncs) {
424 .ctor = mcp77_clk_ctor,
425 .dtor = _nvkm_clk_dtor,
426 .init = _nvkm_clk_init,
427 .fini = _nvkm_clk_fini,
428 },
429};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index 63dbbb575228..b280f85e8827 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -21,23 +21,19 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/clk.h> 24#include "priv.h"
25#include "pll.h" 25#include "pll.h"
26 26
27#include <subdev/bios.h> 27#include <subdev/bios.h>
28#include <subdev/bios/pll.h> 28#include <subdev/bios/pll.h>
29#include <subdev/devinit/nv04.h> 29#include <subdev/devinit/nv04.h>
30 30
31struct nv04_clk_priv {
32 struct nvkm_clk base;
33};
34
35int 31int
36nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info, 32nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
37 int clk, struct nvkm_pll_vals *pv) 33 int clk, struct nvkm_pll_vals *pv)
38{ 34{
39 int N1, M1, N2, M2, P; 35 int N1, M1, N2, M2, P;
40 int ret = nv04_pll_calc(nv_subdev(clock), info, clk, &N1, &M1, &N2, &M2, &P); 36 int ret = nv04_pll_calc(&clock->subdev, info, clk, &N1, &M1, &N2, &M2, &P);
41 if (ret) { 37 if (ret) {
42 pv->refclk = info->refclk; 38 pv->refclk = info->refclk;
43 pv->N1 = N1; 39 pv->N1 = N1;
@@ -52,8 +48,9 @@ nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
52int 48int
53nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv) 49nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
54{ 50{
55 struct nvkm_devinit *devinit = nvkm_devinit(clk); 51 struct nvkm_device *device = clk->subdev.device;
56 int cv = nvkm_bios(clk)->version.chip; 52 struct nvkm_devinit *devinit = device->devinit;
53 int cv = device->bios->version.chip;
57 54
58 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || 55 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
59 cv >= 0x40) { 56 cv >= 0x40) {
@@ -67,37 +64,20 @@ nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
67 return 0; 64 return 0;
68} 65}
69 66
70static struct nvkm_domain 67static const struct nvkm_clk_func
71nv04_domain[] = { 68nv04_clk = {
72 { nv_clk_src_max } 69 .domains = {
70 { nv_clk_src_max }
71 }
73}; 72};
74 73
75static int 74int
76nv04_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 75nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
77 struct nvkm_oclass *oclass, void *data, u32 size,
78 struct nvkm_object **pobject)
79{ 76{
80 struct nv04_clk_priv *priv; 77 int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
81 int ret; 78 if (ret == 0) {
82 79 (*pclk)->pll_calc = nv04_clk_pll_calc;
83 ret = nvkm_clk_create(parent, engine, oclass, nv04_domain, 80 (*pclk)->pll_prog = nv04_clk_pll_prog;
84 NULL, 0, false, &priv); 81 }
85 *pobject = nv_object(priv); 82 return ret;
86 if (ret)
87 return ret;
88
89 priv->base.pll_calc = nv04_clk_pll_calc;
90 priv->base.pll_prog = nv04_clk_pll_prog;
91 return 0;
92} 83}
93
94struct nvkm_oclass
95nv04_clk_oclass = {
96 .handle = NV_SUBDEV(CLK, 0x04),
97 .ofuncs = &(struct nvkm_ofuncs) {
98 .ctor = nv04_clk_ctor,
99 .dtor = _nvkm_clk_dtor,
100 .init = _nvkm_clk_init,
101 .fini = _nvkm_clk_fini,
102 },
103};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index ed838130c89d..2ab9b9b84018 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -21,14 +21,14 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/clk.h> 24#define nv40_clk(p) container_of((p), struct nv40_clk, base)
25#include "priv.h"
25#include "pll.h" 26#include "pll.h"
26 27
27#include <core/device.h>
28#include <subdev/bios.h> 28#include <subdev/bios.h>
29#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
30 30
31struct nv40_clk_priv { 31struct nv40_clk {
32 struct nvkm_clk base; 32 struct nvkm_clk base;
33 u32 ctrl; 33 u32 ctrl;
34 u32 npll_ctrl; 34 u32 npll_ctrl;
@@ -36,64 +36,56 @@ struct nv40_clk_priv {
36 u32 spll; 36 u32 spll;
37}; 37};
38 38
39static struct nvkm_domain
40nv40_domain[] = {
41 { nv_clk_src_crystal, 0xff },
42 { nv_clk_src_href , 0xff },
43 { nv_clk_src_core , 0xff, 0, "core", 1000 },
44 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
45 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
46 { nv_clk_src_max }
47};
48
49static u32 39static u32
50read_pll_1(struct nv40_clk_priv *priv, u32 reg) 40read_pll_1(struct nv40_clk *clk, u32 reg)
51{ 41{
52 u32 ctrl = nv_rd32(priv, reg + 0x00); 42 struct nvkm_device *device = clk->base.subdev.device;
43 u32 ctrl = nvkm_rd32(device, reg + 0x00);
53 int P = (ctrl & 0x00070000) >> 16; 44 int P = (ctrl & 0x00070000) >> 16;
54 int N = (ctrl & 0x0000ff00) >> 8; 45 int N = (ctrl & 0x0000ff00) >> 8;
55 int M = (ctrl & 0x000000ff) >> 0; 46 int M = (ctrl & 0x000000ff) >> 0;
56 u32 ref = 27000, clk = 0; 47 u32 ref = 27000, khz = 0;
57 48
58 if (ctrl & 0x80000000) 49 if (ctrl & 0x80000000)
59 clk = ref * N / M; 50 khz = ref * N / M;
60 51
61 return clk >> P; 52 return khz >> P;
62} 53}
63 54
64static u32 55static u32
65read_pll_2(struct nv40_clk_priv *priv, u32 reg) 56read_pll_2(struct nv40_clk *clk, u32 reg)
66{ 57{
67 u32 ctrl = nv_rd32(priv, reg + 0x00); 58 struct nvkm_device *device = clk->base.subdev.device;
68 u32 coef = nv_rd32(priv, reg + 0x04); 59 u32 ctrl = nvkm_rd32(device, reg + 0x00);
60 u32 coef = nvkm_rd32(device, reg + 0x04);
69 int N2 = (coef & 0xff000000) >> 24; 61 int N2 = (coef & 0xff000000) >> 24;
70 int M2 = (coef & 0x00ff0000) >> 16; 62 int M2 = (coef & 0x00ff0000) >> 16;
71 int N1 = (coef & 0x0000ff00) >> 8; 63 int N1 = (coef & 0x0000ff00) >> 8;
72 int M1 = (coef & 0x000000ff) >> 0; 64 int M1 = (coef & 0x000000ff) >> 0;
73 int P = (ctrl & 0x00070000) >> 16; 65 int P = (ctrl & 0x00070000) >> 16;
74 u32 ref = 27000, clk = 0; 66 u32 ref = 27000, khz = 0;
75 67
76 if ((ctrl & 0x80000000) && M1) { 68 if ((ctrl & 0x80000000) && M1) {
77 clk = ref * N1 / M1; 69 khz = ref * N1 / M1;
78 if ((ctrl & 0x40000100) == 0x40000000) { 70 if ((ctrl & 0x40000100) == 0x40000000) {
79 if (M2) 71 if (M2)
80 clk = clk * N2 / M2; 72 khz = khz * N2 / M2;
81 else 73 else
82 clk = 0; 74 khz = 0;
83 } 75 }
84 } 76 }
85 77
86 return clk >> P; 78 return khz >> P;
87} 79}
88 80
89static u32 81static u32
90read_clk(struct nv40_clk_priv *priv, u32 src) 82read_clk(struct nv40_clk *clk, u32 src)
91{ 83{
92 switch (src) { 84 switch (src) {
93 case 3: 85 case 3:
94 return read_pll_2(priv, 0x004000); 86 return read_pll_2(clk, 0x004000);
95 case 2: 87 case 2:
96 return read_pll_1(priv, 0x004008); 88 return read_pll_1(clk, 0x004008);
97 default: 89 default:
98 break; 90 break;
99 } 91 }
@@ -102,46 +94,48 @@ read_clk(struct nv40_clk_priv *priv, u32 src)
102} 94}
103 95
104static int 96static int
105nv40_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) 97nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
106{ 98{
107 struct nv40_clk_priv *priv = (void *)clk; 99 struct nv40_clk *clk = nv40_clk(base);
108 u32 mast = nv_rd32(priv, 0x00c040); 100 struct nvkm_subdev *subdev = &clk->base.subdev;
101 struct nvkm_device *device = subdev->device;
102 u32 mast = nvkm_rd32(device, 0x00c040);
109 103
110 switch (src) { 104 switch (src) {
111 case nv_clk_src_crystal: 105 case nv_clk_src_crystal:
112 return nv_device(priv)->crystal; 106 return device->crystal;
113 case nv_clk_src_href: 107 case nv_clk_src_href:
114 return 100000; /*XXX: PCIE/AGP differ*/ 108 return 100000; /*XXX: PCIE/AGP differ*/
115 case nv_clk_src_core: 109 case nv_clk_src_core:
116 return read_clk(priv, (mast & 0x00000003) >> 0); 110 return read_clk(clk, (mast & 0x00000003) >> 0);
117 case nv_clk_src_shader: 111 case nv_clk_src_shader:
118 return read_clk(priv, (mast & 0x00000030) >> 4); 112 return read_clk(clk, (mast & 0x00000030) >> 4);
119 case nv_clk_src_mem: 113 case nv_clk_src_mem:
120 return read_pll_2(priv, 0x4020); 114 return read_pll_2(clk, 0x4020);
121 default: 115 default:
122 break; 116 break;
123 } 117 }
124 118
125 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast); 119 nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
126 return -EINVAL; 120 return -EINVAL;
127} 121}
128 122
129static int 123static int
130nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk, 124nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
131 int *N1, int *M1, int *N2, int *M2, int *log2P) 125 int *N1, int *M1, int *N2, int *M2, int *log2P)
132{ 126{
133 struct nvkm_bios *bios = nvkm_bios(priv); 127 struct nvkm_subdev *subdev = &clk->base.subdev;
134 struct nvbios_pll pll; 128 struct nvbios_pll pll;
135 int ret; 129 int ret;
136 130
137 ret = nvbios_pll_parse(bios, reg, &pll); 131 ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
138 if (ret) 132 if (ret)
139 return ret; 133 return ret;
140 134
141 if (clk < pll.vco1.max_freq) 135 if (khz < pll.vco1.max_freq)
142 pll.vco2.max_freq = 0; 136 pll.vco2.max_freq = 0;
143 137
144 ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P); 138 ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P);
145 if (ret == 0) 139 if (ret == 0)
146 return -ERANGE; 140 return -ERANGE;
147 141
@@ -149,93 +143,90 @@ nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
149} 143}
150 144
151static int 145static int
152nv40_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate) 146nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
153{ 147{
154 struct nv40_clk_priv *priv = (void *)clk; 148 struct nv40_clk *clk = nv40_clk(base);
155 int gclk = cstate->domain[nv_clk_src_core]; 149 int gclk = cstate->domain[nv_clk_src_core];
156 int sclk = cstate->domain[nv_clk_src_shader]; 150 int sclk = cstate->domain[nv_clk_src_shader];
157 int N1, M1, N2, M2, log2P; 151 int N1, M1, N2, M2, log2P;
158 int ret; 152 int ret;
159 153
160 /* core/geometric clock */ 154 /* core/geometric clock */
161 ret = nv40_clk_calc_pll(priv, 0x004000, gclk, 155 ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
162 &N1, &M1, &N2, &M2, &log2P); 156 &N1, &M1, &N2, &M2, &log2P);
163 if (ret < 0) 157 if (ret < 0)
164 return ret; 158 return ret;
165 159
166 if (N2 == M2) { 160 if (N2 == M2) {
167 priv->npll_ctrl = 0x80000100 | (log2P << 16); 161 clk->npll_ctrl = 0x80000100 | (log2P << 16);
168 priv->npll_coef = (N1 << 8) | M1; 162 clk->npll_coef = (N1 << 8) | M1;
169 } else { 163 } else {
170 priv->npll_ctrl = 0xc0000000 | (log2P << 16); 164 clk->npll_ctrl = 0xc0000000 | (log2P << 16);
171 priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1; 165 clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
172 } 166 }
173 167
174 /* use the second pll for shader/rop clock, if it differs from core */ 168 /* use the second pll for shader/rop clock, if it differs from core */
175 if (sclk && sclk != gclk) { 169 if (sclk && sclk != gclk) {
176 ret = nv40_clk_calc_pll(priv, 0x004008, sclk, 170 ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
177 &N1, &M1, NULL, NULL, &log2P); 171 &N1, &M1, NULL, NULL, &log2P);
178 if (ret < 0) 172 if (ret < 0)
179 return ret; 173 return ret;
180 174
181 priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1; 175 clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
182 priv->ctrl = 0x00000223; 176 clk->ctrl = 0x00000223;
183 } else { 177 } else {
184 priv->spll = 0x00000000; 178 clk->spll = 0x00000000;
185 priv->ctrl = 0x00000333; 179 clk->ctrl = 0x00000333;
186 } 180 }
187 181
188 return 0; 182 return 0;
189} 183}
190 184
191static int 185static int
192nv40_clk_prog(struct nvkm_clk *clk) 186nv40_clk_prog(struct nvkm_clk *base)
193{ 187{
194 struct nv40_clk_priv *priv = (void *)clk; 188 struct nv40_clk *clk = nv40_clk(base);
195 nv_mask(priv, 0x00c040, 0x00000333, 0x00000000); 189 struct nvkm_device *device = clk->base.subdev.device;
196 nv_wr32(priv, 0x004004, priv->npll_coef); 190 nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
197 nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl); 191 nvkm_wr32(device, 0x004004, clk->npll_coef);
198 nv_mask(priv, 0x004008, 0xc007ffff, priv->spll); 192 nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
193 nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
199 mdelay(5); 194 mdelay(5);
200 nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl); 195 nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
201 return 0; 196 return 0;
202} 197}
203 198
204static void 199static void
205nv40_clk_tidy(struct nvkm_clk *clk) 200nv40_clk_tidy(struct nvkm_clk *obj)
206{ 201{
207} 202}
208 203
209static int 204static const struct nvkm_clk_func
210nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 205nv40_clk = {
211 struct nvkm_oclass *oclass, void *data, u32 size, 206 .read = nv40_clk_read,
212 struct nvkm_object **pobject) 207 .calc = nv40_clk_calc,
208 .prog = nv40_clk_prog,
209 .tidy = nv40_clk_tidy,
210 .domains = {
211 { nv_clk_src_crystal, 0xff },
212 { nv_clk_src_href , 0xff },
213 { nv_clk_src_core , 0xff, 0, "core", 1000 },
214 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
215 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
216 { nv_clk_src_max }
217 }
218};
219
220int
221nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
213{ 222{
214 struct nv40_clk_priv *priv; 223 struct nv40_clk *clk;
215 int ret;
216 224
217 ret = nvkm_clk_create(parent, engine, oclass, nv40_domain, 225 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
218 NULL, 0, true, &priv); 226 return -ENOMEM;
219 *pobject = nv_object(priv); 227 clk->base.pll_calc = nv04_clk_pll_calc;
220 if (ret) 228 clk->base.pll_prog = nv04_clk_pll_prog;
221 return ret; 229 *pclk = &clk->base;
222 230
223 priv->base.pll_calc = nv04_clk_pll_calc; 231 return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
224 priv->base.pll_prog = nv04_clk_pll_prog;
225 priv->base.read = nv40_clk_read;
226 priv->base.calc = nv40_clk_calc;
227 priv->base.prog = nv40_clk_prog;
228 priv->base.tidy = nv40_clk_tidy;
229 return 0;
230} 232}
231
232struct nvkm_oclass
233nv40_clk_oclass = {
234 .handle = NV_SUBDEV(CLK, 0x40),
235 .ofuncs = &(struct nvkm_ofuncs) {
236 .ctor = nv40_clk_ctor,
237 .dtor = _nvkm_clk_dtor,
238 .init = _nvkm_clk_init,
239 .fini = _nvkm_clk_fini,
240 },
241};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 9b4ffd6347ce..5841f297973c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -25,38 +25,39 @@
25#include "pll.h" 25#include "pll.h"
26#include "seq.h" 26#include "seq.h"
27 27
28#include <core/device.h>
29#include <subdev/bios.h> 28#include <subdev/bios.h>
30#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
31 30
32static u32 31static u32
33read_div(struct nv50_clk_priv *priv) 32read_div(struct nv50_clk *clk)
34{ 33{
35 switch (nv_device(priv)->chipset) { 34 struct nvkm_device *device = clk->base.subdev.device;
35 switch (device->chipset) {
36 case 0x50: /* it exists, but only has bit 31, not the dividers.. */ 36 case 0x50: /* it exists, but only has bit 31, not the dividers.. */
37 case 0x84: 37 case 0x84:
38 case 0x86: 38 case 0x86:
39 case 0x98: 39 case 0x98:
40 case 0xa0: 40 case 0xa0:
41 return nv_rd32(priv, 0x004700); 41 return nvkm_rd32(device, 0x004700);
42 case 0x92: 42 case 0x92:
43 case 0x94: 43 case 0x94:
44 case 0x96: 44 case 0x96:
45 return nv_rd32(priv, 0x004800); 45 return nvkm_rd32(device, 0x004800);
46 default: 46 default:
47 return 0x00000000; 47 return 0x00000000;
48 } 48 }
49} 49}
50 50
51static u32 51static u32
52read_pll_src(struct nv50_clk_priv *priv, u32 base) 52read_pll_src(struct nv50_clk *clk, u32 base)
53{ 53{
54 struct nvkm_clk *clk = &priv->base; 54 struct nvkm_subdev *subdev = &clk->base.subdev;
55 u32 coef, ref = clk->read(clk, nv_clk_src_crystal); 55 struct nvkm_device *device = subdev->device;
56 u32 rsel = nv_rd32(priv, 0x00e18c); 56 u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal);
57 u32 rsel = nvkm_rd32(device, 0x00e18c);
57 int P, N, M, id; 58 int P, N, M, id;
58 59
59 switch (nv_device(priv)->chipset) { 60 switch (device->chipset) {
60 case 0x50: 61 case 0x50:
61 case 0xa0: 62 case 0xa0:
62 switch (base) { 63 switch (base) {
@@ -65,11 +66,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
65 case 0x4008: id = !!(rsel & 0x00000008); break; 66 case 0x4008: id = !!(rsel & 0x00000008); break;
66 case 0x4030: id = 0; break; 67 case 0x4030: id = 0; break;
67 default: 68 default:
68 nv_error(priv, "ref: bad pll 0x%06x\n", base); 69 nvkm_error(subdev, "ref: bad pll %06x\n", base);
69 return 0; 70 return 0;
70 } 71 }
71 72
72 coef = nv_rd32(priv, 0x00e81c + (id * 0x0c)); 73 coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
73 ref *= (coef & 0x01000000) ? 2 : 4; 74 ref *= (coef & 0x01000000) ? 2 : 4;
74 P = (coef & 0x00070000) >> 16; 75 P = (coef & 0x00070000) >> 16;
75 N = ((coef & 0x0000ff00) >> 8) + 1; 76 N = ((coef & 0x0000ff00) >> 8) + 1;
@@ -78,7 +79,7 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
78 case 0x84: 79 case 0x84:
79 case 0x86: 80 case 0x86:
80 case 0x92: 81 case 0x92:
81 coef = nv_rd32(priv, 0x00e81c); 82 coef = nvkm_rd32(device, 0x00e81c);
82 P = (coef & 0x00070000) >> 16; 83 P = (coef & 0x00070000) >> 16;
83 N = (coef & 0x0000ff00) >> 8; 84 N = (coef & 0x0000ff00) >> 8;
84 M = (coef & 0x000000ff) >> 0; 85 M = (coef & 0x000000ff) >> 0;
@@ -86,26 +87,26 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
86 case 0x94: 87 case 0x94:
87 case 0x96: 88 case 0x96:
88 case 0x98: 89 case 0x98:
89 rsel = nv_rd32(priv, 0x00c050); 90 rsel = nvkm_rd32(device, 0x00c050);
90 switch (base) { 91 switch (base) {
91 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break; 92 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
92 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break; 93 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
93 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break; 94 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
94 case 0x4030: rsel = 3; break; 95 case 0x4030: rsel = 3; break;
95 default: 96 default:
96 nv_error(priv, "ref: bad pll 0x%06x\n", base); 97 nvkm_error(subdev, "ref: bad pll %06x\n", base);
97 return 0; 98 return 0;
98 } 99 }
99 100
100 switch (rsel) { 101 switch (rsel) {
101 case 0: id = 1; break; 102 case 0: id = 1; break;
102 case 1: return clk->read(clk, nv_clk_src_crystal); 103 case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
103 case 2: return clk->read(clk, nv_clk_src_href); 104 case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href);
104 case 3: id = 0; break; 105 case 3: id = 0; break;
105 } 106 }
106 107
107 coef = nv_rd32(priv, 0x00e81c + (id * 0x28)); 108 coef = nvkm_rd32(device, 0x00e81c + (id * 0x28));
108 P = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7; 109 P = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
109 P += (coef & 0x00070000) >> 16; 110 P += (coef & 0x00070000) >> 16;
110 N = (coef & 0x0000ff00) >> 8; 111 N = (coef & 0x0000ff00) >> 8;
111 M = (coef & 0x000000ff) >> 0; 112 M = (coef & 0x000000ff) >> 0;
@@ -121,10 +122,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
121} 122}
122 123
123static u32 124static u32
124read_pll_ref(struct nv50_clk_priv *priv, u32 base) 125read_pll_ref(struct nv50_clk *clk, u32 base)
125{ 126{
126 struct nvkm_clk *clk = &priv->base; 127 struct nvkm_subdev *subdev = &clk->base.subdev;
127 u32 src, mast = nv_rd32(priv, 0x00c040); 128 struct nvkm_device *device = subdev->device;
129 u32 src, mast = nvkm_rd32(device, 0x00c040);
128 130
129 switch (base) { 131 switch (base) {
130 case 0x004028: 132 case 0x004028:
@@ -140,33 +142,33 @@ read_pll_ref(struct nv50_clk_priv *priv, u32 base)
140 src = !!(mast & 0x02000000); 142 src = !!(mast & 0x02000000);
141 break; 143 break;
142 case 0x00e810: 144 case 0x00e810:
143 return clk->read(clk, nv_clk_src_crystal); 145 return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
144 default: 146 default:
145 nv_error(priv, "bad pll 0x%06x\n", base); 147 nvkm_error(subdev, "bad pll %06x\n", base);
146 return 0; 148 return 0;
147 } 149 }
148 150
149 if (src) 151 if (src)
150 return clk->read(clk, nv_clk_src_href); 152 return nvkm_clk_read(&clk->base, nv_clk_src_href);
151 153
152 return read_pll_src(priv, base); 154 return read_pll_src(clk, base);
153} 155}
154 156
155static u32 157static u32
156read_pll(struct nv50_clk_priv *priv, u32 base) 158read_pll(struct nv50_clk *clk, u32 base)
157{ 159{
158 struct nvkm_clk *clk = &priv->base; 160 struct nvkm_device *device = clk->base.subdev.device;
159 u32 mast = nv_rd32(priv, 0x00c040); 161 u32 mast = nvkm_rd32(device, 0x00c040);
160 u32 ctrl = nv_rd32(priv, base + 0); 162 u32 ctrl = nvkm_rd32(device, base + 0);
161 u32 coef = nv_rd32(priv, base + 4); 163 u32 coef = nvkm_rd32(device, base + 4);
162 u32 ref = read_pll_ref(priv, base); 164 u32 ref = read_pll_ref(clk, base);
163 u32 freq = 0; 165 u32 freq = 0;
164 int N1, N2, M1, M2; 166 int N1, N2, M1, M2;
165 167
166 if (base == 0x004028 && (mast & 0x00100000)) { 168 if (base == 0x004028 && (mast & 0x00100000)) {
167 /* wtf, appears to only disable post-divider on gt200 */ 169 /* wtf, appears to only disable post-divider on gt200 */
168 if (nv_device(priv)->chipset != 0xa0) 170 if (device->chipset != 0xa0)
169 return clk->read(clk, nv_clk_src_dom6); 171 return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
170 } 172 }
171 173
172 N2 = (coef & 0xff000000) >> 24; 174 N2 = (coef & 0xff000000) >> 24;
@@ -186,71 +188,73 @@ read_pll(struct nv50_clk_priv *priv, u32 base)
186 return freq; 188 return freq;
187} 189}
188 190
189static int 191int
190nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) 192nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
191{ 193{
192 struct nv50_clk_priv *priv = (void *)clk; 194 struct nv50_clk *clk = nv50_clk(base);
193 u32 mast = nv_rd32(priv, 0x00c040); 195 struct nvkm_subdev *subdev = &clk->base.subdev;
196 struct nvkm_device *device = subdev->device;
197 u32 mast = nvkm_rd32(device, 0x00c040);
194 u32 P = 0; 198 u32 P = 0;
195 199
196 switch (src) { 200 switch (src) {
197 case nv_clk_src_crystal: 201 case nv_clk_src_crystal:
198 return nv_device(priv)->crystal; 202 return device->crystal;
199 case nv_clk_src_href: 203 case nv_clk_src_href:
200 return 100000; /* PCIE reference clock */ 204 return 100000; /* PCIE reference clock */
201 case nv_clk_src_hclk: 205 case nv_clk_src_hclk:
202 return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000); 206 return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000);
203 case nv_clk_src_hclkm3: 207 case nv_clk_src_hclkm3:
204 return clk->read(clk, nv_clk_src_hclk) * 3; 208 return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
205 case nv_clk_src_hclkm3d2: 209 case nv_clk_src_hclkm3d2:
206 return clk->read(clk, nv_clk_src_hclk) * 3 / 2; 210 return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2;
207 case nv_clk_src_host: 211 case nv_clk_src_host:
208 switch (mast & 0x30000000) { 212 switch (mast & 0x30000000) {
209 case 0x00000000: return clk->read(clk, nv_clk_src_href); 213 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
210 case 0x10000000: break; 214 case 0x10000000: break;
211 case 0x20000000: /* !0x50 */ 215 case 0x20000000: /* !0x50 */
212 case 0x30000000: return clk->read(clk, nv_clk_src_hclk); 216 case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
213 } 217 }
214 break; 218 break;
215 case nv_clk_src_core: 219 case nv_clk_src_core:
216 if (!(mast & 0x00100000)) 220 if (!(mast & 0x00100000))
217 P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16; 221 P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
218 switch (mast & 0x00000003) { 222 switch (mast & 0x00000003) {
219 case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P; 223 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
220 case 0x00000001: return clk->read(clk, nv_clk_src_dom6); 224 case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
221 case 0x00000002: return read_pll(priv, 0x004020) >> P; 225 case 0x00000002: return read_pll(clk, 0x004020) >> P;
222 case 0x00000003: return read_pll(priv, 0x004028) >> P; 226 case 0x00000003: return read_pll(clk, 0x004028) >> P;
223 } 227 }
224 break; 228 break;
225 case nv_clk_src_shader: 229 case nv_clk_src_shader:
226 P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16; 230 P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
227 switch (mast & 0x00000030) { 231 switch (mast & 0x00000030) {
228 case 0x00000000: 232 case 0x00000000:
229 if (mast & 0x00000080) 233 if (mast & 0x00000080)
230 return clk->read(clk, nv_clk_src_host) >> P; 234 return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P;
231 return clk->read(clk, nv_clk_src_crystal) >> P; 235 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
232 case 0x00000010: break; 236 case 0x00000010: break;
233 case 0x00000020: return read_pll(priv, 0x004028) >> P; 237 case 0x00000020: return read_pll(clk, 0x004028) >> P;
234 case 0x00000030: return read_pll(priv, 0x004020) >> P; 238 case 0x00000030: return read_pll(clk, 0x004020) >> P;
235 } 239 }
236 break; 240 break;
237 case nv_clk_src_mem: 241 case nv_clk_src_mem:
238 P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16; 242 P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
239 if (nv_rd32(priv, 0x004008) & 0x00000200) { 243 if (nvkm_rd32(device, 0x004008) & 0x00000200) {
240 switch (mast & 0x0000c000) { 244 switch (mast & 0x0000c000) {
241 case 0x00000000: 245 case 0x00000000:
242 return clk->read(clk, nv_clk_src_crystal) >> P; 246 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
243 case 0x00008000: 247 case 0x00008000:
244 case 0x0000c000: 248 case 0x0000c000:
245 return clk->read(clk, nv_clk_src_href) >> P; 249 return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
246 } 250 }
247 } else { 251 } else {
248 return read_pll(priv, 0x004008) >> P; 252 return read_pll(clk, 0x004008) >> P;
249 } 253 }
250 break; 254 break;
251 case nv_clk_src_vdec: 255 case nv_clk_src_vdec:
252 P = (read_div(priv) & 0x00000700) >> 8; 256 P = (read_div(clk) & 0x00000700) >> 8;
253 switch (nv_device(priv)->chipset) { 257 switch (device->chipset) {
254 case 0x84: 258 case 0x84:
255 case 0x86: 259 case 0x86:
256 case 0x92: 260 case 0x92:
@@ -259,51 +263,51 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
259 case 0xa0: 263 case 0xa0:
260 switch (mast & 0x00000c00) { 264 switch (mast & 0x00000c00) {
261 case 0x00000000: 265 case 0x00000000:
262 if (nv_device(priv)->chipset == 0xa0) /* wtf?? */ 266 if (device->chipset == 0xa0) /* wtf?? */
263 return clk->read(clk, nv_clk_src_core) >> P; 267 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
264 return clk->read(clk, nv_clk_src_crystal) >> P; 268 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
265 case 0x00000400: 269 case 0x00000400:
266 return 0; 270 return 0;
267 case 0x00000800: 271 case 0x00000800:
268 if (mast & 0x01000000) 272 if (mast & 0x01000000)
269 return read_pll(priv, 0x004028) >> P; 273 return read_pll(clk, 0x004028) >> P;
270 return read_pll(priv, 0x004030) >> P; 274 return read_pll(clk, 0x004030) >> P;
271 case 0x00000c00: 275 case 0x00000c00:
272 return clk->read(clk, nv_clk_src_core) >> P; 276 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
273 } 277 }
274 break; 278 break;
275 case 0x98: 279 case 0x98:
276 switch (mast & 0x00000c00) { 280 switch (mast & 0x00000c00) {
277 case 0x00000000: 281 case 0x00000000:
278 return clk->read(clk, nv_clk_src_core) >> P; 282 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
279 case 0x00000400: 283 case 0x00000400:
280 return 0; 284 return 0;
281 case 0x00000800: 285 case 0x00000800:
282 return clk->read(clk, nv_clk_src_hclkm3d2) >> P; 286 return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P;
283 case 0x00000c00: 287 case 0x00000c00:
284 return clk->read(clk, nv_clk_src_mem) >> P; 288 return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P;
285 } 289 }
286 break; 290 break;
287 } 291 }
288 break; 292 break;
289 case nv_clk_src_dom6: 293 case nv_clk_src_dom6:
290 switch (nv_device(priv)->chipset) { 294 switch (device->chipset) {
291 case 0x50: 295 case 0x50:
292 case 0xa0: 296 case 0xa0:
293 return read_pll(priv, 0x00e810) >> 2; 297 return read_pll(clk, 0x00e810) >> 2;
294 case 0x84: 298 case 0x84:
295 case 0x86: 299 case 0x86:
296 case 0x92: 300 case 0x92:
297 case 0x94: 301 case 0x94:
298 case 0x96: 302 case 0x96:
299 case 0x98: 303 case 0x98:
300 P = (read_div(priv) & 0x00000007) >> 0; 304 P = (read_div(clk) & 0x00000007) >> 0;
301 switch (mast & 0x0c000000) { 305 switch (mast & 0x0c000000) {
302 case 0x00000000: return clk->read(clk, nv_clk_src_href); 306 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
303 case 0x04000000: break; 307 case 0x04000000: break;
304 case 0x08000000: return clk->read(clk, nv_clk_src_hclk); 308 case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
305 case 0x0c000000: 309 case 0x0c000000:
306 return clk->read(clk, nv_clk_src_hclkm3) >> P; 310 return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P;
307 } 311 }
308 break; 312 break;
309 default: 313 default:
@@ -313,27 +317,27 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
313 break; 317 break;
314 } 318 }
315 319
316 nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast); 320 nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
317 return -EINVAL; 321 return -EINVAL;
318} 322}
319 323
320static u32 324static u32
321calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P) 325calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P)
322{ 326{
323 struct nvkm_bios *bios = nvkm_bios(priv); 327 struct nvkm_subdev *subdev = &clk->base.subdev;
324 struct nvbios_pll pll; 328 struct nvbios_pll pll;
325 int ret; 329 int ret;
326 330
327 ret = nvbios_pll_parse(bios, reg, &pll); 331 ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
328 if (ret) 332 if (ret)
329 return 0; 333 return 0;
330 334
331 pll.vco2.max_freq = 0; 335 pll.vco2.max_freq = 0;
332 pll.refclk = read_pll_ref(priv, reg); 336 pll.refclk = read_pll_ref(clk, reg);
333 if (!pll.refclk) 337 if (!pll.refclk)
334 return 0; 338 return 0;
335 339
336 return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P); 340 return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P);
337} 341}
338 342
339static inline u32 343static inline u32
@@ -360,11 +364,13 @@ clk_same(u32 a, u32 b)
360 return ((a / 1000) == (b / 1000)); 364 return ((a / 1000) == (b / 1000));
361} 365}
362 366
363static int 367int
364nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate) 368nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
365{ 369{
366 struct nv50_clk_priv *priv = (void *)clk; 370 struct nv50_clk *clk = nv50_clk(base);
367 struct nv50_clk_hwsq *hwsq = &priv->hwsq; 371 struct nv50_clk_hwsq *hwsq = &clk->hwsq;
372 struct nvkm_subdev *subdev = &clk->base.subdev;
373 struct nvkm_device *device = subdev->device;
368 const int shader = cstate->domain[nv_clk_src_shader]; 374 const int shader = cstate->domain[nv_clk_src_shader];
369 const int core = cstate->domain[nv_clk_src_core]; 375 const int core = cstate->domain[nv_clk_src_core];
370 const int vdec = cstate->domain[nv_clk_src_vdec]; 376 const int vdec = cstate->domain[nv_clk_src_vdec];
@@ -375,7 +381,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
375 int freq, out; 381 int freq, out;
376 382
377 /* prepare a hwsq script from which we'll perform the reclock */ 383 /* prepare a hwsq script from which we'll perform the reclock */
378 out = clk_init(hwsq, nv_subdev(clk)); 384 out = clk_init(hwsq, subdev);
379 if (out) 385 if (out)
380 return out; 386 return out;
381 387
@@ -393,15 +399,15 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
393 freq = calc_div(core, vdec, &P1); 399 freq = calc_div(core, vdec, &P1);
394 400
395 /* see how close we can get using xpll/hclk as a source */ 401 /* see how close we can get using xpll/hclk as a source */
396 if (nv_device(priv)->chipset != 0x98) 402 if (device->chipset != 0x98)
397 out = read_pll(priv, 0x004030); 403 out = read_pll(clk, 0x004030);
398 else 404 else
399 out = clk->read(clk, nv_clk_src_hclkm3d2); 405 out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2);
400 out = calc_div(out, vdec, &P2); 406 out = calc_div(out, vdec, &P2);
401 407
402 /* select whichever gets us closest */ 408 /* select whichever gets us closest */
403 if (abs(vdec - freq) <= abs(vdec - out)) { 409 if (abs(vdec - freq) <= abs(vdec - out)) {
404 if (nv_device(priv)->chipset != 0x98) 410 if (device->chipset != 0x98)
405 mastv |= 0x00000c00; 411 mastv |= 0x00000c00;
406 divsv |= P1 << 8; 412 divsv |= P1 << 8;
407 } else { 413 } else {
@@ -417,14 +423,14 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
417 * of the host clock frequency 423 * of the host clock frequency
418 */ 424 */
419 if (dom6) { 425 if (dom6) {
420 if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) { 426 if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) {
421 mastv |= 0x00000000; 427 mastv |= 0x00000000;
422 } else 428 } else
423 if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) { 429 if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) {
424 mastv |= 0x08000000; 430 mastv |= 0x08000000;
425 } else { 431 } else {
426 freq = clk->read(clk, nv_clk_src_hclk) * 3; 432 freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
427 freq = calc_div(freq, dom6, &P1); 433 calc_div(freq, dom6, &P1);
428 434
429 mastv |= 0x0c000000; 435 mastv |= 0x0c000000;
430 divsv |= P1; 436 divsv |= P1;
@@ -444,13 +450,13 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
444 /* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6, 450 /* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
445 * sclk to hclk) before reprogramming 451 * sclk to hclk) before reprogramming
446 */ 452 */
447 if (nv_device(priv)->chipset < 0x92) 453 if (device->chipset < 0x92)
448 clk_mask(hwsq, mast, 0x001000b0, 0x00100080); 454 clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
449 else 455 else
450 clk_mask(hwsq, mast, 0x000000b3, 0x00000081); 456 clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
451 457
452 /* core: for the moment at least, always use nvpll */ 458 /* core: for the moment at least, always use nvpll */
453 freq = calc_pll(priv, 0x4028, core, &N, &M, &P1); 459 freq = calc_pll(clk, 0x4028, core, &N, &M, &P1);
454 if (freq == 0) 460 if (freq == 0)
455 return -ERANGE; 461 return -ERANGE;
456 462
@@ -468,7 +474,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
468 clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16)); 474 clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
469 clk_mask(hwsq, mast, 0x00100033, 0x00000023); 475 clk_mask(hwsq, mast, 0x00100033, 0x00000023);
470 } else { 476 } else {
471 freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1); 477 freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
472 if (freq == 0) 478 if (freq == 0)
473 return -ERANGE; 479 return -ERANGE;
474 480
@@ -485,77 +491,71 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
485 return 0; 491 return 0;
486} 492}
487 493
488static int 494int
489nv50_clk_prog(struct nvkm_clk *clk) 495nv50_clk_prog(struct nvkm_clk *base)
490{ 496{
491 struct nv50_clk_priv *priv = (void *)clk; 497 struct nv50_clk *clk = nv50_clk(base);
492 return clk_exec(&priv->hwsq, true); 498 return clk_exec(&clk->hwsq, true);
493} 499}
494 500
495static void 501void
496nv50_clk_tidy(struct nvkm_clk *clk) 502nv50_clk_tidy(struct nvkm_clk *base)
497{ 503{
498 struct nv50_clk_priv *priv = (void *)clk; 504 struct nv50_clk *clk = nv50_clk(base);
499 clk_exec(&priv->hwsq, false); 505 clk_exec(&clk->hwsq, false);
500} 506}
501 507
502int 508int
503nv50_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 509nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
504 struct nvkm_oclass *oclass, void *data, u32 size, 510 int index, bool allow_reclock, struct nvkm_clk **pclk)
505 struct nvkm_object **pobject)
506{ 511{
507 struct nv50_clk_oclass *pclass = (void *)oclass; 512 struct nv50_clk *clk;
508 struct nv50_clk_priv *priv;
509 int ret; 513 int ret;
510 514
511 ret = nvkm_clk_create(parent, engine, oclass, pclass->domains, 515 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
512 NULL, 0, false, &priv); 516 return -ENOMEM;
513 *pobject = nv_object(priv); 517 ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
518 *pclk = &clk->base;
514 if (ret) 519 if (ret)
515 return ret; 520 return ret;
516 521
517 priv->hwsq.r_fifo = hwsq_reg(0x002504); 522 clk->hwsq.r_fifo = hwsq_reg(0x002504);
518 priv->hwsq.r_spll[0] = hwsq_reg(0x004020); 523 clk->hwsq.r_spll[0] = hwsq_reg(0x004020);
519 priv->hwsq.r_spll[1] = hwsq_reg(0x004024); 524 clk->hwsq.r_spll[1] = hwsq_reg(0x004024);
520 priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028); 525 clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
521 priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c); 526 clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
522 switch (nv_device(priv)->chipset) { 527 switch (device->chipset) {
523 case 0x92: 528 case 0x92:
524 case 0x94: 529 case 0x94:
525 case 0x96: 530 case 0x96:
526 priv->hwsq.r_divs = hwsq_reg(0x004800); 531 clk->hwsq.r_divs = hwsq_reg(0x004800);
527 break; 532 break;
528 default: 533 default:
529 priv->hwsq.r_divs = hwsq_reg(0x004700); 534 clk->hwsq.r_divs = hwsq_reg(0x004700);
530 break; 535 break;
531 } 536 }
532 priv->hwsq.r_mast = hwsq_reg(0x00c040); 537 clk->hwsq.r_mast = hwsq_reg(0x00c040);
533
534 priv->base.read = nv50_clk_read;
535 priv->base.calc = nv50_clk_calc;
536 priv->base.prog = nv50_clk_prog;
537 priv->base.tidy = nv50_clk_tidy;
538 return 0; 538 return 0;
539} 539}
540 540
541static struct nvkm_domain 541static const struct nvkm_clk_func
542nv50_domains[] = { 542nv50_clk = {
543 { nv_clk_src_crystal, 0xff }, 543 .read = nv50_clk_read,
544 { nv_clk_src_href , 0xff }, 544 .calc = nv50_clk_calc,
545 { nv_clk_src_core , 0xff, 0, "core", 1000 }, 545 .prog = nv50_clk_prog,
546 { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, 546 .tidy = nv50_clk_tidy,
547 { nv_clk_src_mem , 0xff, 0, "memory", 1000 }, 547 .domains = {
548 { nv_clk_src_max } 548 { nv_clk_src_crystal, 0xff },
549 { nv_clk_src_href , 0xff },
550 { nv_clk_src_core , 0xff, 0, "core", 1000 },
551 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
552 { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
553 { nv_clk_src_max }
554 }
549}; 555};
550 556
551struct nvkm_oclass * 557int
552nv50_clk_oclass = &(struct nv50_clk_oclass) { 558nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
553 .base.handle = NV_SUBDEV(CLK, 0x50), 559{
554 .base.ofuncs = &(struct nvkm_ofuncs) { 560 return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
555 .ctor = nv50_clk_ctor, 561}
556 .dtor = _nvkm_clk_dtor,
557 .init = _nvkm_clk_init,
558 .fini = _nvkm_clk_fini,
559 },
560 .domains = nv50_domains,
561}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 0ead76a32f10..d3c7fb6efa16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,7 +1,9 @@
1#ifndef __NVKM_CLK_NV50_H__ 1#ifndef __NV50_CLK_H__
2#define __NVKM_CLK_NV50_H__ 2#define __NV50_CLK_H__
3#define nv50_clk(p) container_of((p), struct nv50_clk, base)
4#include "priv.h"
5
3#include <subdev/bus/hwsq.h> 6#include <subdev/bus/hwsq.h>
4#include <subdev/clk.h>
5 7
6struct nv50_clk_hwsq { 8struct nv50_clk_hwsq {
7 struct hwsq base; 9 struct hwsq base;
@@ -12,17 +14,15 @@ struct nv50_clk_hwsq {
12 struct hwsq_reg r_mast; 14 struct hwsq_reg r_mast;
13}; 15};
14 16
15struct nv50_clk_priv { 17struct nv50_clk {
16 struct nvkm_clk base; 18 struct nvkm_clk base;
17 struct nv50_clk_hwsq hwsq; 19 struct nv50_clk_hwsq hwsq;
18}; 20};
19 21
20int nv50_clk_ctor(struct nvkm_object *, struct nvkm_object *, 22int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
21 struct nvkm_oclass *, void *, u32, 23 bool, struct nvkm_clk **);
22 struct nvkm_object **); 24int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
23 25int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
24struct nv50_clk_oclass { 26int nv50_clk_prog(struct nvkm_clk *);
25 struct nvkm_oclass base; 27void nv50_clk_tidy(struct nvkm_clk *);
26 struct nvkm_domain *domains;
27};
28#endif 28#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
index 783a3e78d632..c6fccd600db4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
@@ -79,7 +79,7 @@ gt215_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info,
79 } 79 }
80 80
81 if (unlikely(best_err == ~0)) { 81 if (unlikely(best_err == ~0)) {
82 nv_error(subdev, "unable to find matching pll values\n"); 82 nvkm_error(subdev, "unable to find matching pll values\n");
83 return -EINVAL; 83 return -EINVAL;
84 } 84 }
85 85
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
index f2292895a1a8..5ad67879e703 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
@@ -37,7 +37,7 @@ getMNP_single(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
37 * "clk" parameter in kHz 37 * "clk" parameter in kHz
38 * returns calculated clock 38 * returns calculated clock
39 */ 39 */
40 struct nvkm_bios *bios = nvkm_bios(subdev); 40 struct nvkm_bios *bios = subdev->device->bios;
41 int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq; 41 int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
42 int minM = info->vco1.min_m, maxM = info->vco1.max_m; 42 int minM = info->vco1.min_m, maxM = info->vco1.max_m;
43 int minN = info->vco1.min_n, maxN = info->vco1.max_n; 43 int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -136,7 +136,7 @@ getMNP_double(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
136 * "clk" parameter in kHz 136 * "clk" parameter in kHz
137 * returns calculated clock 137 * returns calculated clock
138 */ 138 */
139 int chip_version = nvkm_bios(subdev)->version.chip; 139 int chip_version = subdev->device->bios->version.chip;
140 int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq; 140 int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
141 int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq; 141 int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
142 int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq; 142 int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
@@ -240,6 +240,6 @@ nv04_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info, u32 freq,
240 } 240 }
241 241
242 if (!ret) 242 if (!ret)
243 nv_error(subdev, "unable to compute acceptable pll values\n"); 243 nvkm_error(subdev, "unable to compute acceptable pll values\n");
244 return ret; 244 return ret;
245} 245}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
new file mode 100644
index 000000000000..51eafc00c8b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -0,0 +1,26 @@
1#ifndef __NVKM_CLK_PRIV_H__
2#define __NVKM_CLK_PRIV_H__
3#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
4#include <subdev/clk.h>
5
6struct nvkm_clk_func {
7 int (*init)(struct nvkm_clk *);
8 void (*fini)(struct nvkm_clk *);
9 int (*read)(struct nvkm_clk *, enum nv_clk_src);
10 int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
11 int (*prog)(struct nvkm_clk *);
12 void (*tidy)(struct nvkm_clk *);
13 struct nvkm_pstate *pstates;
14 int nr_pstates;
15 struct nvkm_domain domains[];
16};
17
18int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
19 bool allow_reclock, struct nvkm_clk *);
20int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
21 bool allow_reclock, struct nvkm_clk **);
22
23int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
24 struct nvkm_pll_vals *);
25int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
26#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index b0d7c5f40db1..5f25402f6b09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -23,74 +23,108 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/device.h>
27#include <core/option.h> 26#include <core/option.h>
28#include <subdev/vga.h> 27#include <subdev/vga.h>
29 28
30int 29u32
31_nvkm_devinit_fini(struct nvkm_object *object, bool suspend) 30nvkm_devinit_mmio(struct nvkm_devinit *init, u32 addr)
32{ 31{
33 struct nvkm_devinit *devinit = (void *)object; 32 if (init->func->mmio)
33 addr = init->func->mmio(init, addr);
34 return addr;
35}
34 36
35 /* force full reinit on resume */ 37int
36 if (suspend) 38nvkm_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 khz)
37 devinit->post = true; 39{
40 return init->func->pll_set(init, type, khz);
41}
38 42
39 /* unlock the extended vga crtc regs */ 43void
40 nv_lockvgac(devinit, false); 44nvkm_devinit_meminit(struct nvkm_devinit *init)
45{
46 if (init->func->meminit)
47 init->func->meminit(init);
48}
41 49
42 return nvkm_subdev_fini(&devinit->base, suspend); 50u64
51nvkm_devinit_disable(struct nvkm_devinit *init)
52{
53 if (init && init->func->disable)
54 return init->func->disable(init);
55 return 0;
43} 56}
44 57
45int 58int
46_nvkm_devinit_init(struct nvkm_object *object) 59nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable)
47{ 60{
48 struct nvkm_devinit_impl *impl = (void *)object->oclass; 61 int ret = 0;
49 struct nvkm_devinit *devinit = (void *)object; 62 if (init && init->func->post)
50 int ret; 63 ret = init->func->post(init, init->post);
64 *disable = nvkm_devinit_disable(init);
65 return ret;
66}
51 67
52 ret = nvkm_subdev_init(&devinit->base); 68static int
53 if (ret) 69nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend)
54 return ret; 70{
71 struct nvkm_devinit *init = nvkm_devinit(subdev);
72 /* force full reinit on resume */
73 if (suspend)
74 init->post = true;
75 return 0;
76}
77
78static int
79nvkm_devinit_preinit(struct nvkm_subdev *subdev)
80{
81 struct nvkm_devinit *init = nvkm_devinit(subdev);
55 82
56 ret = impl->post(&devinit->base, devinit->post); 83 if (init->func->preinit)
57 if (ret) 84 init->func->preinit(init);
58 return ret;
59 85
60 if (impl->disable) 86 /* unlock the extended vga crtc regs */
61 nv_device(devinit)->disable_mask |= impl->disable(devinit); 87 nvkm_lockvgac(subdev->device, false);
62 return 0; 88 return 0;
63} 89}
64 90
65void 91static int
66_nvkm_devinit_dtor(struct nvkm_object *object) 92nvkm_devinit_init(struct nvkm_subdev *subdev)
93{
94 struct nvkm_devinit *init = nvkm_devinit(subdev);
95 if (init->func->init)
96 init->func->init(init);
97 return 0;
98}
99
100static void *
101nvkm_devinit_dtor(struct nvkm_subdev *subdev)
67{ 102{
68 struct nvkm_devinit *devinit = (void *)object; 103 struct nvkm_devinit *init = nvkm_devinit(subdev);
104 void *data = init;
69 105
70 /* lock crtc regs */ 106 if (init->func->dtor)
71 nv_lockvgac(devinit, true); 107 data = init->func->dtor(init);
72 108
73 nvkm_subdev_destroy(&devinit->base); 109 /* lock crtc regs */
110 nvkm_lockvgac(subdev->device, true);
111 return data;
74} 112}
75 113
76int 114static const struct nvkm_subdev_func
77nvkm_devinit_create_(struct nvkm_object *parent, struct nvkm_object *engine, 115nvkm_devinit = {
78 struct nvkm_oclass *oclass, int size, void **pobject) 116 .dtor = nvkm_devinit_dtor,
117 .preinit = nvkm_devinit_preinit,
118 .init = nvkm_devinit_init,
119 .fini = nvkm_devinit_fini,
120};
121
122void
123nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
124 struct nvkm_device *device, int index,
125 struct nvkm_devinit *init)
79{ 126{
80 struct nvkm_devinit_impl *impl = (void *)oclass; 127 nvkm_subdev_ctor(&nvkm_devinit, device, index, 0, &init->subdev);
81 struct nvkm_device *device = nv_device(parent); 128 init->func = func;
82 struct nvkm_devinit *devinit; 129 init->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
83 int ret;
84
85 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
86 "init", size, pobject);
87 devinit = *pobject;
88 if (ret)
89 return ret;
90
91 devinit->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
92 devinit->meminit = impl->meminit;
93 devinit->pll_set = impl->pll_set;
94 devinit->mmio = impl->mmio;
95 return 0;
96} 130}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 36684c3f9e9c..6c5bbff12eb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -23,7 +23,6 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include <core/device.h>
27#include <subdev/fb/regsnv04.h> 26#include <subdev/fb/regsnv04.h>
28 27
29#define NV04_PFB_DEBUG_0 0x00100080 28#define NV04_PFB_DEBUG_0 0x00100080
@@ -48,8 +47,8 @@
48static inline struct io_mapping * 47static inline struct io_mapping *
49fbmem_init(struct nvkm_device *dev) 48fbmem_init(struct nvkm_device *dev)
50{ 49{
51 return io_mapping_create_wc(nv_device_resource_start(dev, 1), 50 return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
52 nv_device_resource_len(dev, 1)); 51 dev->func->resource_size(dev, 1));
53} 52}
54 53
55static inline void 54static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index ca776ce75f4f..e895289bf3c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -27,40 +27,42 @@
27#include <subdev/bios/init.h> 27#include <subdev/bios/init.h>
28 28
29static u64 29static u64
30g84_devinit_disable(struct nvkm_devinit *devinit) 30g84_devinit_disable(struct nvkm_devinit *init)
31{ 31{
32 struct nv50_devinit_priv *priv = (void *)devinit; 32 struct nvkm_device *device = init->subdev.device;
33 u32 r001540 = nv_rd32(priv, 0x001540); 33 u32 r001540 = nvkm_rd32(device, 0x001540);
34 u32 r00154c = nv_rd32(priv, 0x00154c); 34 u32 r00154c = nvkm_rd32(device, 0x00154c);
35 u64 disable = 0ULL; 35 u64 disable = 0ULL;
36 36
37 if (!(r001540 & 0x40000000)) { 37 if (!(r001540 & 0x40000000)) {
38 disable |= (1ULL << NVDEV_ENGINE_MPEG); 38 disable |= (1ULL << NVKM_ENGINE_MPEG);
39 disable |= (1ULL << NVDEV_ENGINE_VP); 39 disable |= (1ULL << NVKM_ENGINE_VP);
40 disable |= (1ULL << NVDEV_ENGINE_BSP); 40 disable |= (1ULL << NVKM_ENGINE_BSP);
41 disable |= (1ULL << NVDEV_ENGINE_CIPHER); 41 disable |= (1ULL << NVKM_ENGINE_CIPHER);
42 } 42 }
43 43
44 if (!(r00154c & 0x00000004)) 44 if (!(r00154c & 0x00000004))
45 disable |= (1ULL << NVDEV_ENGINE_DISP); 45 disable |= (1ULL << NVKM_ENGINE_DISP);
46 if (!(r00154c & 0x00000020)) 46 if (!(r00154c & 0x00000020))
47 disable |= (1ULL << NVDEV_ENGINE_BSP); 47 disable |= (1ULL << NVKM_ENGINE_BSP);
48 if (!(r00154c & 0x00000040)) 48 if (!(r00154c & 0x00000040))
49 disable |= (1ULL << NVDEV_ENGINE_CIPHER); 49 disable |= (1ULL << NVKM_ENGINE_CIPHER);
50 50
51 return disable; 51 return disable;
52} 52}
53 53
54struct nvkm_oclass * 54static const struct nvkm_devinit_func
55g84_devinit_oclass = &(struct nvkm_devinit_impl) { 55g84_devinit = {
56 .base.handle = NV_SUBDEV(DEVINIT, 0x84), 56 .preinit = nv50_devinit_preinit,
57 .base.ofuncs = &(struct nvkm_ofuncs) { 57 .init = nv50_devinit_init,
58 .ctor = nv50_devinit_ctor, 58 .post = nv04_devinit_post,
59 .dtor = _nvkm_devinit_dtor,
60 .init = nv50_devinit_init,
61 .fini = _nvkm_devinit_fini,
62 },
63 .pll_set = nv50_devinit_pll_set, 59 .pll_set = nv50_devinit_pll_set,
64 .disable = g84_devinit_disable, 60 .disable = g84_devinit_disable,
65 .post = nvbios_init, 61};
66}.base; 62
63int
64g84_devinit_new(struct nvkm_device *device, int index,
65 struct nvkm_devinit **pinit)
66{
67 return nv50_devinit_new_(&g84_devinit, device, index, pinit);
68}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index d29bacee65ee..a9d45844df5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -27,39 +27,41 @@
27#include <subdev/bios/init.h> 27#include <subdev/bios/init.h>
28 28
29static u64 29static u64
30g98_devinit_disable(struct nvkm_devinit *devinit) 30g98_devinit_disable(struct nvkm_devinit *init)
31{ 31{
32 struct nv50_devinit_priv *priv = (void *)devinit; 32 struct nvkm_device *device = init->subdev.device;
33 u32 r001540 = nv_rd32(priv, 0x001540); 33 u32 r001540 = nvkm_rd32(device, 0x001540);
34 u32 r00154c = nv_rd32(priv, 0x00154c); 34 u32 r00154c = nvkm_rd32(device, 0x00154c);
35 u64 disable = 0ULL; 35 u64 disable = 0ULL;
36 36
37 if (!(r001540 & 0x40000000)) { 37 if (!(r001540 & 0x40000000)) {
38 disable |= (1ULL << NVDEV_ENGINE_MSPDEC); 38 disable |= (1ULL << NVKM_ENGINE_MSPDEC);
39 disable |= (1ULL << NVDEV_ENGINE_MSVLD); 39 disable |= (1ULL << NVKM_ENGINE_MSVLD);
40 disable |= (1ULL << NVDEV_ENGINE_MSPPP); 40 disable |= (1ULL << NVKM_ENGINE_MSPPP);
41 } 41 }
42 42
43 if (!(r00154c & 0x00000004)) 43 if (!(r00154c & 0x00000004))
44 disable |= (1ULL << NVDEV_ENGINE_DISP); 44 disable |= (1ULL << NVKM_ENGINE_DISP);
45 if (!(r00154c & 0x00000020)) 45 if (!(r00154c & 0x00000020))
46 disable |= (1ULL << NVDEV_ENGINE_MSVLD); 46 disable |= (1ULL << NVKM_ENGINE_MSVLD);
47 if (!(r00154c & 0x00000040)) 47 if (!(r00154c & 0x00000040))
48 disable |= (1ULL << NVDEV_ENGINE_SEC); 48 disable |= (1ULL << NVKM_ENGINE_SEC);
49 49
50 return disable; 50 return disable;
51} 51}
52 52
53struct nvkm_oclass * 53static const struct nvkm_devinit_func
54g98_devinit_oclass = &(struct nvkm_devinit_impl) { 54g98_devinit = {
55 .base.handle = NV_SUBDEV(DEVINIT, 0x98), 55 .preinit = nv50_devinit_preinit,
56 .base.ofuncs = &(struct nvkm_ofuncs) { 56 .init = nv50_devinit_init,
57 .ctor = nv50_devinit_ctor, 57 .post = nv04_devinit_post,
58 .dtor = _nvkm_devinit_dtor,
59 .init = nv50_devinit_init,
60 .fini = _nvkm_devinit_fini,
61 },
62 .pll_set = nv50_devinit_pll_set, 58 .pll_set = nv50_devinit_pll_set,
63 .disable = g98_devinit_disable, 59 .disable = g98_devinit_disable,
64 .post = nvbios_init, 60};
65}.base; 61
62int
63g98_devinit_new(struct nvkm_device *device, int index,
64 struct nvkm_devinit **pinit)
65{
66 return nv50_devinit_new_(&g98_devinit, device, index, pinit);
67}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index c61102f70805..22b0140e28c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -29,19 +29,19 @@
29#include <subdev/clk/pll.h> 29#include <subdev/clk/pll.h>
30 30
31int 31int
32gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq) 32gf100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
33{ 33{
34 struct nv50_devinit_priv *priv = (void *)devinit; 34 struct nvkm_subdev *subdev = &init->subdev;
35 struct nvkm_bios *bios = nvkm_bios(priv); 35 struct nvkm_device *device = subdev->device;
36 struct nvbios_pll info; 36 struct nvbios_pll info;
37 int N, fN, M, P; 37 int N, fN, M, P;
38 int ret; 38 int ret;
39 39
40 ret = nvbios_pll_parse(bios, type, &info); 40 ret = nvbios_pll_parse(device->bios, type, &info);
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P); 44 ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
45 if (ret < 0) 45 if (ret < 0)
46 return ret; 46 return ret;
47 47
@@ -50,12 +50,12 @@ gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
50 case PLL_VPLL1: 50 case PLL_VPLL1:
51 case PLL_VPLL2: 51 case PLL_VPLL2:
52 case PLL_VPLL3: 52 case PLL_VPLL3:
53 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100); 53 nvkm_mask(device, info.reg + 0x0c, 0x00000000, 0x00000100);
54 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M); 54 nvkm_wr32(device, info.reg + 0x04, (P << 16) | (N << 8) | M);
55 nv_wr32(priv, info.reg + 0x10, fN << 16); 55 nvkm_wr32(device, info.reg + 0x10, fN << 16);
56 break; 56 break;
57 default: 57 default:
58 nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq); 58 nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
59 ret = -EINVAL; 59 ret = -EINVAL;
60 break; 60 break;
61 } 61 }
@@ -64,64 +64,44 @@ gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
64} 64}
65 65
66static u64 66static u64
67gf100_devinit_disable(struct nvkm_devinit *devinit) 67gf100_devinit_disable(struct nvkm_devinit *init)
68{ 68{
69 struct nv50_devinit_priv *priv = (void *)devinit; 69 struct nvkm_device *device = init->subdev.device;
70 u32 r022500 = nv_rd32(priv, 0x022500); 70 u32 r022500 = nvkm_rd32(device, 0x022500);
71 u64 disable = 0ULL; 71 u64 disable = 0ULL;
72 72
73 if (r022500 & 0x00000001) 73 if (r022500 & 0x00000001)
74 disable |= (1ULL << NVDEV_ENGINE_DISP); 74 disable |= (1ULL << NVKM_ENGINE_DISP);
75 75
76 if (r022500 & 0x00000002) { 76 if (r022500 & 0x00000002) {
77 disable |= (1ULL << NVDEV_ENGINE_MSPDEC); 77 disable |= (1ULL << NVKM_ENGINE_MSPDEC);
78 disable |= (1ULL << NVDEV_ENGINE_MSPPP); 78 disable |= (1ULL << NVKM_ENGINE_MSPPP);
79 } 79 }
80 80
81 if (r022500 & 0x00000004) 81 if (r022500 & 0x00000004)
82 disable |= (1ULL << NVDEV_ENGINE_MSVLD); 82 disable |= (1ULL << NVKM_ENGINE_MSVLD);
83 if (r022500 & 0x00000008) 83 if (r022500 & 0x00000008)
84 disable |= (1ULL << NVDEV_ENGINE_MSENC); 84 disable |= (1ULL << NVKM_ENGINE_MSENC);
85 if (r022500 & 0x00000100) 85 if (r022500 & 0x00000100)
86 disable |= (1ULL << NVDEV_ENGINE_CE0); 86 disable |= (1ULL << NVKM_ENGINE_CE0);
87 if (r022500 & 0x00000200) 87 if (r022500 & 0x00000200)
88 disable |= (1ULL << NVDEV_ENGINE_CE1); 88 disable |= (1ULL << NVKM_ENGINE_CE1);
89 89
90 return disable; 90 return disable;
91} 91}
92 92
93static const struct nvkm_devinit_func
94gf100_devinit = {
95 .preinit = nv50_devinit_preinit,
96 .init = nv50_devinit_init,
97 .post = nv04_devinit_post,
98 .pll_set = gf100_devinit_pll_set,
99 .disable = gf100_devinit_disable,
100};
101
93int 102int
94gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 103gf100_devinit_new(struct nvkm_device *device, int index,
95 struct nvkm_oclass *oclass, void *data, u32 size, 104 struct nvkm_devinit **pinit)
96 struct nvkm_object **pobject)
97{ 105{
98 struct nvkm_devinit_impl *impl = (void *)oclass; 106 return nv50_devinit_new_(&gf100_devinit, device, index, pinit);
99 struct nv50_devinit_priv *priv;
100 u64 disable;
101 int ret;
102
103 ret = nvkm_devinit_create(parent, engine, oclass, &priv);
104 *pobject = nv_object(priv);
105 if (ret)
106 return ret;
107
108 disable = impl->disable(&priv->base);
109 if (disable & (1ULL << NVDEV_ENGINE_DISP))
110 priv->base.post = true;
111
112 return 0;
113} 107}
114
115struct nvkm_oclass *
116gf100_devinit_oclass = &(struct nvkm_devinit_impl) {
117 .base.handle = NV_SUBDEV(DEVINIT, 0xc0),
118 .base.ofuncs = &(struct nvkm_ofuncs) {
119 .ctor = gf100_devinit_ctor,
120 .dtor = _nvkm_devinit_dtor,
121 .init = nv50_devinit_init,
122 .fini = _nvkm_devinit_fini,
123 },
124 .pll_set = gf100_devinit_pll_set,
125 .disable = gf100_devinit_disable,
126 .post = nvbios_init,
127}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 87ca0ece37b4..2be98bd78214 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -27,33 +27,35 @@
27#include <subdev/bios/init.h> 27#include <subdev/bios/init.h>
28 28
29u64 29u64
30gm107_devinit_disable(struct nvkm_devinit *devinit) 30gm107_devinit_disable(struct nvkm_devinit *init)
31{ 31{
32 struct nv50_devinit_priv *priv = (void *)devinit; 32 struct nvkm_device *device = init->subdev.device;
33 u32 r021c00 = nv_rd32(priv, 0x021c00); 33 u32 r021c00 = nvkm_rd32(device, 0x021c00);
34 u32 r021c04 = nv_rd32(priv, 0x021c04); 34 u32 r021c04 = nvkm_rd32(device, 0x021c04);
35 u64 disable = 0ULL; 35 u64 disable = 0ULL;
36 36
37 if (r021c00 & 0x00000001) 37 if (r021c00 & 0x00000001)
38 disable |= (1ULL << NVDEV_ENGINE_CE0); 38 disable |= (1ULL << NVKM_ENGINE_CE0);
39 if (r021c00 & 0x00000004) 39 if (r021c00 & 0x00000004)
40 disable |= (1ULL << NVDEV_ENGINE_CE2); 40 disable |= (1ULL << NVKM_ENGINE_CE2);
41 if (r021c04 & 0x00000001) 41 if (r021c04 & 0x00000001)
42 disable |= (1ULL << NVDEV_ENGINE_DISP); 42 disable |= (1ULL << NVKM_ENGINE_DISP);
43 43
44 return disable; 44 return disable;
45} 45}
46 46
47struct nvkm_oclass * 47static const struct nvkm_devinit_func
48gm107_devinit_oclass = &(struct nvkm_devinit_impl) { 48gm107_devinit = {
49 .base.handle = NV_SUBDEV(DEVINIT, 0x07), 49 .preinit = nv50_devinit_preinit,
50 .base.ofuncs = &(struct nvkm_ofuncs) { 50 .init = nv50_devinit_init,
51 .ctor = gf100_devinit_ctor, 51 .post = nv04_devinit_post,
52 .dtor = _nvkm_devinit_dtor,
53 .init = nv50_devinit_init,
54 .fini = _nvkm_devinit_fini,
55 },
56 .pll_set = gf100_devinit_pll_set, 52 .pll_set = gf100_devinit_pll_set,
57 .disable = gm107_devinit_disable, 53 .disable = gm107_devinit_disable,
58 .post = nvbios_init, 54};
59}.base; 55
56int
57gm107_devinit_new(struct nvkm_device *device, int index,
58 struct nvkm_devinit **pinit)
59{
60 return nv50_devinit_new_(&gm107_devinit, device, index, pinit);
61}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
index 1076fcf0d716..2b9c3f11b7a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
@@ -28,69 +28,74 @@
28#include <subdev/bios/pmu.h> 28#include <subdev/bios/pmu.h>
29 29
30static void 30static void
31pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec) 31pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
32{ 32{
33 struct nvkm_bios *bios = nvkm_bios(priv); 33 struct nvkm_device *device = init->base.subdev.device;
34 struct nvkm_bios *bios = device->bios;
34 int i; 35 int i;
35 36
36 nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu); 37 nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
37 for (i = 0; i < len; i += 4) { 38 for (i = 0; i < len; i += 4) {
38 if ((i & 0xff) == 0) 39 if ((i & 0xff) == 0)
39 nv_wr32(priv, 0x10a188, (pmu + i) >> 8); 40 nvkm_wr32(device, 0x10a188, (pmu + i) >> 8);
40 nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i)); 41 nvkm_wr32(device, 0x10a184, nvbios_rd32(bios, img + i));
41 } 42 }
42 43
43 while (i & 0xff) { 44 while (i & 0xff) {
44 nv_wr32(priv, 0x10a184, 0x00000000); 45 nvkm_wr32(device, 0x10a184, 0x00000000);
45 i += 4; 46 i += 4;
46 } 47 }
47} 48}
48 49
49static void 50static void
50pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len) 51pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len)
51{ 52{
52 struct nvkm_bios *bios = nvkm_bios(priv); 53 struct nvkm_device *device = init->base.subdev.device;
54 struct nvkm_bios *bios = device->bios;
53 int i; 55 int i;
54 56
55 nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu); 57 nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu);
56 for (i = 0; i < len; i += 4) 58 for (i = 0; i < len; i += 4)
57 nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i)); 59 nvkm_wr32(device, 0x10a1c4, nvbios_rd32(bios, img + i));
58} 60}
59 61
60static u32 62static u32
61pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi) 63pmu_args(struct nv50_devinit *init, u32 argp, u32 argi)
62{ 64{
63 nv_wr32(priv, 0x10a1c0, argp); 65 struct nvkm_device *device = init->base.subdev.device;
64 nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi); 66 nvkm_wr32(device, 0x10a1c0, argp);
65 return nv_rd32(priv, 0x10a1c4); 67 nvkm_wr32(device, 0x10a1c0, nvkm_rd32(device, 0x10a1c4) + argi);
68 return nvkm_rd32(device, 0x10a1c4);
66} 69}
67 70
68static void 71static void
69pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr) 72pmu_exec(struct nv50_devinit *init, u32 init_addr)
70{ 73{
71 nv_wr32(priv, 0x10a104, init_addr); 74 struct nvkm_device *device = init->base.subdev.device;
72 nv_wr32(priv, 0x10a10c, 0x00000000); 75 nvkm_wr32(device, 0x10a104, init_addr);
73 nv_wr32(priv, 0x10a100, 0x00000002); 76 nvkm_wr32(device, 0x10a10c, 0x00000000);
77 nvkm_wr32(device, 0x10a100, 0x00000002);
74} 78}
75 79
76static int 80static int
77pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post, 81pmu_load(struct nv50_devinit *init, u8 type, bool post,
78 u32 *init_addr_pmu, u32 *args_addr_pmu) 82 u32 *init_addr_pmu, u32 *args_addr_pmu)
79{ 83{
80 struct nvkm_bios *bios = nvkm_bios(priv); 84 struct nvkm_subdev *subdev = &init->base.subdev;
85 struct nvkm_bios *bios = subdev->device->bios;
81 struct nvbios_pmuR pmu; 86 struct nvbios_pmuR pmu;
82 87
83 if (!nvbios_pmuRm(bios, type, &pmu)) { 88 if (!nvbios_pmuRm(bios, type, &pmu)) {
84 nv_error(priv, "VBIOS PMU fuc %02x not found\n", type); 89 nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
85 return -EINVAL; 90 return -EINVAL;
86 } 91 }
87 92
88 if (!post) 93 if (!post)
89 return 0; 94 return 0;
90 95
91 pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false); 96 pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
92 pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true); 97 pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
93 pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size); 98 pmu_data(init, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
94 99
95 if (init_addr_pmu) { 100 if (init_addr_pmu) {
96 *init_addr_pmu = pmu.init_addr_pmu; 101 *init_addr_pmu = pmu.init_addr_pmu;
@@ -98,75 +103,79 @@ pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
98 return 0; 103 return 0;
99 } 104 }
100 105
101 return pmu_exec(priv, pmu.init_addr_pmu), 0; 106 return pmu_exec(init, pmu.init_addr_pmu), 0;
102} 107}
103 108
104static int 109static int
105gm204_devinit_post(struct nvkm_subdev *subdev, bool post) 110gm204_devinit_post(struct nvkm_devinit *base, bool post)
106{ 111{
107 struct nv50_devinit_priv *priv = (void *)nvkm_devinit(subdev); 112 struct nv50_devinit *init = nv50_devinit(base);
108 struct nvkm_bios *bios = nvkm_bios(priv); 113 struct nvkm_subdev *subdev = &init->base.subdev;
114 struct nvkm_device *device = subdev->device;
115 struct nvkm_bios *bios = device->bios;
109 struct bit_entry bit_I; 116 struct bit_entry bit_I;
110 u32 init, args; 117 u32 exec, args;
111 int ret; 118 int ret;
112 119
113 if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 || 120 if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
114 bit_I.length < 0x1c) { 121 bit_I.length < 0x1c) {
115 nv_error(priv, "VBIOS PMU init data not found\n"); 122 nvkm_error(subdev, "VBIOS PMU init data not found\n");
116 return -EINVAL; 123 return -EINVAL;
117 } 124 }
118 125
119 /* reset PMU and load init table parser ucode */ 126 /* reset PMU and load init table parser ucode */
120 if (post) { 127 if (post) {
121 nv_mask(priv, 0x000200, 0x00002000, 0x00000000); 128 nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
122 nv_mask(priv, 0x000200, 0x00002000, 0x00002000); 129 nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
123 nv_rd32(priv, 0x000200); 130 nvkm_rd32(device, 0x000200);
124 while (nv_rd32(priv, 0x10a10c) & 0x00000006) { 131 while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
125 } 132 }
126 } 133 }
127 134
128 ret = pmu_load(priv, 0x04, post, &init, &args); 135 ret = pmu_load(init, 0x04, post, &exec, &args);
129 if (ret) 136 if (ret)
130 return ret; 137 return ret;
131 138
132 /* upload first chunk of init data */ 139 /* upload first chunk of init data */
133 if (post) { 140 if (post) {
134 u32 pmu = pmu_args(priv, args + 0x08, 0x08); 141 u32 pmu = pmu_args(init, args + 0x08, 0x08);
135 u32 img = nv_ro16(bios, bit_I.offset + 0x14); 142 u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
136 u32 len = nv_ro16(bios, bit_I.offset + 0x16); 143 u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
137 pmu_data(priv, pmu, img, len); 144 pmu_data(init, pmu, img, len);
138 } 145 }
139 146
140 /* upload second chunk of init data */ 147 /* upload second chunk of init data */
141 if (post) { 148 if (post) {
142 u32 pmu = pmu_args(priv, args + 0x08, 0x10); 149 u32 pmu = pmu_args(init, args + 0x08, 0x10);
143 u32 img = nv_ro16(bios, bit_I.offset + 0x18); 150 u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
144 u32 len = nv_ro16(bios, bit_I.offset + 0x1a); 151 u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
145 pmu_data(priv, pmu, img, len); 152 pmu_data(init, pmu, img, len);
146 } 153 }
147 154
148 /* execute init tables */ 155 /* execute init tables */
149 if (post) { 156 if (post) {
150 nv_wr32(priv, 0x10a040, 0x00005000); 157 nvkm_wr32(device, 0x10a040, 0x00005000);
151 pmu_exec(priv, init); 158 pmu_exec(init, exec);
152 while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) { 159 while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
153 } 160 }
154 } 161 }
155 162
156 /* load and execute some other ucode image (bios therm?) */ 163 /* load and execute some other ucode image (bios therm?) */
157 return pmu_load(priv, 0x01, post, NULL, NULL); 164 return pmu_load(init, 0x01, post, NULL, NULL);
158} 165}
159 166
160struct nvkm_oclass * 167static const struct nvkm_devinit_func
161gm204_devinit_oclass = &(struct nvkm_devinit_impl) { 168gm204_devinit = {
162 .base.handle = NV_SUBDEV(DEVINIT, 0x07), 169 .preinit = nv50_devinit_preinit,
163 .base.ofuncs = &(struct nvkm_ofuncs) { 170 .init = nv50_devinit_init,
164 .ctor = gf100_devinit_ctor, 171 .post = gm204_devinit_post,
165 .dtor = _nvkm_devinit_dtor,
166 .init = nv50_devinit_init,
167 .fini = _nvkm_devinit_fini,
168 },
169 .pll_set = gf100_devinit_pll_set, 172 .pll_set = gf100_devinit_pll_set,
170 .disable = gm107_devinit_disable, 173 .disable = gm107_devinit_disable,
171 .post = gm204_devinit_post, 174};
172}.base; 175
176int
177gm204_devinit_new(struct nvkm_device *device, int index,
178 struct nvkm_devinit **pinit)
179{
180 return nv50_devinit_new_(&gm204_devinit, device, index, pinit);
181}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 6a3e8d4efed7..9a8522fa9c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -29,32 +29,32 @@
29#include <subdev/clk/pll.h> 29#include <subdev/clk/pll.h>
30 30
31int 31int
32gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq) 32gt215_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
33{ 33{
34 struct nv50_devinit_priv *priv = (void *)devinit; 34 struct nvkm_subdev *subdev = &init->subdev;
35 struct nvkm_bios *bios = nvkm_bios(priv); 35 struct nvkm_device *device = subdev->device;
36 struct nvbios_pll info; 36 struct nvbios_pll info;
37 int N, fN, M, P; 37 int N, fN, M, P;
38 int ret; 38 int ret;
39 39
40 ret = nvbios_pll_parse(bios, type, &info); 40 ret = nvbios_pll_parse(device->bios, type, &info);
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P); 44 ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
45 if (ret < 0) 45 if (ret < 0)
46 return ret; 46 return ret;
47 47
48 switch (info.type) { 48 switch (info.type) {
49 case PLL_VPLL0: 49 case PLL_VPLL0:
50 case PLL_VPLL1: 50 case PLL_VPLL1:
51 nv_wr32(priv, info.reg + 0, 0x50000610); 51 nvkm_wr32(device, info.reg + 0, 0x50000610);
52 nv_mask(priv, info.reg + 4, 0x003fffff, 52 nvkm_mask(device, info.reg + 4, 0x003fffff,
53 (P << 16) | (M << 8) | N); 53 (P << 16) | (M << 8) | N);
54 nv_wr32(priv, info.reg + 8, fN); 54 nvkm_wr32(device, info.reg + 8, fN);
55 break; 55 break;
56 default: 56 default:
57 nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq); 57 nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
58 ret = -EINVAL; 58 ret = -EINVAL;
59 break; 59 break;
60 } 60 }
@@ -63,24 +63,24 @@ gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
63} 63}
64 64
65static u64 65static u64
66gt215_devinit_disable(struct nvkm_devinit *devinit) 66gt215_devinit_disable(struct nvkm_devinit *init)
67{ 67{
68 struct nv50_devinit_priv *priv = (void *)devinit; 68 struct nvkm_device *device = init->subdev.device;
69 u32 r001540 = nv_rd32(priv, 0x001540); 69 u32 r001540 = nvkm_rd32(device, 0x001540);
70 u32 r00154c = nv_rd32(priv, 0x00154c); 70 u32 r00154c = nvkm_rd32(device, 0x00154c);
71 u64 disable = 0ULL; 71 u64 disable = 0ULL;
72 72
73 if (!(r001540 & 0x40000000)) { 73 if (!(r001540 & 0x40000000)) {
74 disable |= (1ULL << NVDEV_ENGINE_MSPDEC); 74 disable |= (1ULL << NVKM_ENGINE_MSPDEC);
75 disable |= (1ULL << NVDEV_ENGINE_MSPPP); 75 disable |= (1ULL << NVKM_ENGINE_MSPPP);
76 } 76 }
77 77
78 if (!(r00154c & 0x00000004)) 78 if (!(r00154c & 0x00000004))
79 disable |= (1ULL << NVDEV_ENGINE_DISP); 79 disable |= (1ULL << NVKM_ENGINE_DISP);
80 if (!(r00154c & 0x00000020)) 80 if (!(r00154c & 0x00000020))
81 disable |= (1ULL << NVDEV_ENGINE_MSVLD); 81 disable |= (1ULL << NVKM_ENGINE_MSVLD);
82 if (!(r00154c & 0x00000200)) 82 if (!(r00154c & 0x00000200))
83 disable |= (1ULL << NVDEV_ENGINE_CE0); 83 disable |= (1ULL << NVKM_ENGINE_CE0);
84 84
85 return disable; 85 return disable;
86} 86}
@@ -99,9 +99,10 @@ gt215_devinit_mmio_part[] = {
99}; 99};
100 100
101static u32 101static u32
102gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr) 102gt215_devinit_mmio(struct nvkm_devinit *base, u32 addr)
103{ 103{
104 struct nv50_devinit_priv *priv = (void *)devinit; 104 struct nv50_devinit *init = nv50_devinit(base);
105 struct nvkm_device *device = init->base.subdev.device;
105 u32 *mmio = gt215_devinit_mmio_part; 106 u32 *mmio = gt215_devinit_mmio_part;
106 107
107 /* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP 108 /* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
@@ -113,7 +114,7 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
113 * 114 *
114 * the binary driver avoids touching these registers at all, however, 115 * the binary driver avoids touching these registers at all, however,
115 * the video bios doesn't care and does what the scripts say. it's 116 * the video bios doesn't care and does what the scripts say. it's
116 * presumed that the io-port access to priv registers isn't effected 117 * presumed that the io-port access to init registers isn't effected
117 * by the screw-up bug mentioned above. 118 * by the screw-up bug mentioned above.
118 * 119 *
119 * really, a new opcode should've been invented to handle these 120 * really, a new opcode should've been invented to handle these
@@ -122,9 +123,9 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
122 while (mmio[0]) { 123 while (mmio[0]) {
123 if (addr >= mmio[0] && addr <= mmio[1]) { 124 if (addr >= mmio[0] && addr <= mmio[1]) {
124 u32 part = (addr / mmio[2]) & 7; 125 u32 part = (addr / mmio[2]) & 7;
125 if (!priv->r001540) 126 if (!init->r001540)
126 priv->r001540 = nv_rd32(priv, 0x001540); 127 init->r001540 = nvkm_rd32(device, 0x001540);
127 if (part >= hweight8((priv->r001540 >> 16) & 0xff)) 128 if (part >= hweight8((init->r001540 >> 16) & 0xff))
128 return ~0; 129 return ~0;
129 return addr; 130 return addr;
130 } 131 }
@@ -134,17 +135,19 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
134 return addr; 135 return addr;
135} 136}
136 137
137struct nvkm_oclass * 138static const struct nvkm_devinit_func
138gt215_devinit_oclass = &(struct nvkm_devinit_impl) { 139gt215_devinit = {
139 .base.handle = NV_SUBDEV(DEVINIT, 0xa3), 140 .preinit = nv50_devinit_preinit,
140 .base.ofuncs = &(struct nvkm_ofuncs) { 141 .init = nv50_devinit_init,
141 .ctor = nv50_devinit_ctor, 142 .post = nv04_devinit_post,
142 .dtor = _nvkm_devinit_dtor, 143 .mmio = gt215_devinit_mmio,
143 .init = nv50_devinit_init,
144 .fini = _nvkm_devinit_fini,
145 },
146 .pll_set = gt215_devinit_pll_set, 144 .pll_set = gt215_devinit_pll_set,
147 .disable = gt215_devinit_disable, 145 .disable = gt215_devinit_disable,
148 .mmio = gt215_devinit_mmio, 146};
149 .post = nvbios_init, 147
150}.base; 148int
149gt215_devinit_new(struct nvkm_device *device, int index,
150 struct nvkm_devinit **pinit)
151{
152 return nv50_devinit_new_(&gt215_devinit, device, index, pinit);
153}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index 55cf48bbca1c..ce4f718e98a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -27,40 +27,42 @@
27#include <subdev/bios/init.h> 27#include <subdev/bios/init.h>
28 28
29static u64 29static u64
30mcp89_devinit_disable(struct nvkm_devinit *devinit) 30mcp89_devinit_disable(struct nvkm_devinit *init)
31{ 31{
32 struct nv50_devinit_priv *priv = (void *)devinit; 32 struct nvkm_device *device = init->subdev.device;
33 u32 r001540 = nv_rd32(priv, 0x001540); 33 u32 r001540 = nvkm_rd32(device, 0x001540);
34 u32 r00154c = nv_rd32(priv, 0x00154c); 34 u32 r00154c = nvkm_rd32(device, 0x00154c);
35 u64 disable = 0; 35 u64 disable = 0;
36 36
37 if (!(r001540 & 0x40000000)) { 37 if (!(r001540 & 0x40000000)) {
38 disable |= (1ULL << NVDEV_ENGINE_MSPDEC); 38 disable |= (1ULL << NVKM_ENGINE_MSPDEC);
39 disable |= (1ULL << NVDEV_ENGINE_MSPPP); 39 disable |= (1ULL << NVKM_ENGINE_MSPPP);
40 } 40 }
41 41
42 if (!(r00154c & 0x00000004)) 42 if (!(r00154c & 0x00000004))
43 disable |= (1ULL << NVDEV_ENGINE_DISP); 43 disable |= (1ULL << NVKM_ENGINE_DISP);
44 if (!(r00154c & 0x00000020)) 44 if (!(r00154c & 0x00000020))
45 disable |= (1ULL << NVDEV_ENGINE_MSVLD); 45 disable |= (1ULL << NVKM_ENGINE_MSVLD);
46 if (!(r00154c & 0x00000040)) 46 if (!(r00154c & 0x00000040))
47 disable |= (1ULL << NVDEV_ENGINE_VIC); 47 disable |= (1ULL << NVKM_ENGINE_VIC);
48 if (!(r00154c & 0x00000200)) 48 if (!(r00154c & 0x00000200))
49 disable |= (1ULL << NVDEV_ENGINE_CE0); 49 disable |= (1ULL << NVKM_ENGINE_CE0);
50 50
51 return disable; 51 return disable;
52} 52}
53 53
54struct nvkm_oclass * 54static const struct nvkm_devinit_func
55mcp89_devinit_oclass = &(struct nvkm_devinit_impl) { 55mcp89_devinit = {
56 .base.handle = NV_SUBDEV(DEVINIT, 0xaf), 56 .preinit = nv50_devinit_preinit,
57 .base.ofuncs = &(struct nvkm_ofuncs) { 57 .init = nv50_devinit_init,
58 .ctor = nv50_devinit_ctor, 58 .post = nv04_devinit_post,
59 .dtor = _nvkm_devinit_dtor,
60 .init = nv50_devinit_init,
61 .fini = _nvkm_devinit_fini,
62 },
63 .pll_set = gt215_devinit_pll_set, 59 .pll_set = gt215_devinit_pll_set,
64 .disable = mcp89_devinit_disable, 60 .disable = mcp89_devinit_disable,
65 .post = nvbios_init, 61};
66}.base; 62
63int
64mcp89_devinit_new(struct nvkm_device *device, int index,
65 struct nvkm_devinit **pinit)
66{
67 return nv50_devinit_new_(&mcp89_devinit, device, index, pinit);
68}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 03a0da834244..c8d455346fcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -33,25 +33,26 @@
33#include <subdev/vga.h> 33#include <subdev/vga.h>
34 34
35static void 35static void
36nv04_devinit_meminit(struct nvkm_devinit *devinit) 36nv04_devinit_meminit(struct nvkm_devinit *init)
37{ 37{
38 struct nv04_devinit_priv *priv = (void *)devinit; 38 struct nvkm_subdev *subdev = &init->subdev;
39 struct nvkm_device *device = subdev->device;
39 u32 patt = 0xdeadbeef; 40 u32 patt = 0xdeadbeef;
40 struct io_mapping *fb; 41 struct io_mapping *fb;
41 int i; 42 int i;
42 43
43 /* Map the framebuffer aperture */ 44 /* Map the framebuffer aperture */
44 fb = fbmem_init(nv_device(priv)); 45 fb = fbmem_init(device);
45 if (!fb) { 46 if (!fb) {
46 nv_error(priv, "failed to map fb\n"); 47 nvkm_error(subdev, "failed to map fb\n");
47 return; 48 return;
48 } 49 }
49 50
50 /* Sequencer and refresh off */ 51 /* Sequencer and refresh off */
51 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20); 52 nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
52 nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF); 53 nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
53 54
54 nv_mask(priv, NV04_PFB_BOOT_0, ~0, 55 nvkm_mask(device, NV04_PFB_BOOT_0, ~0,
55 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB | 56 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
56 NV04_PFB_BOOT_0_RAM_WIDTH_128 | 57 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
57 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT); 58 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
@@ -62,49 +63,49 @@ nv04_devinit_meminit(struct nvkm_devinit *devinit)
62 fbmem_poke(fb, 0x400000, patt + 1); 63 fbmem_poke(fb, 0x400000, patt + 1);
63 64
64 if (fbmem_peek(fb, 0) == patt + 1) { 65 if (fbmem_peek(fb, 0) == patt + 1) {
65 nv_mask(priv, NV04_PFB_BOOT_0, 66 nvkm_mask(device, NV04_PFB_BOOT_0,
66 NV04_PFB_BOOT_0_RAM_TYPE, 67 NV04_PFB_BOOT_0_RAM_TYPE,
67 NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT); 68 NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
68 nv_mask(priv, NV04_PFB_DEBUG_0, 69 nvkm_mask(device, NV04_PFB_DEBUG_0,
69 NV04_PFB_DEBUG_0_REFRESH_OFF, 0); 70 NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
70 71
71 for (i = 0; i < 4; i++) 72 for (i = 0; i < 4; i++)
72 fbmem_poke(fb, 4 * i, patt); 73 fbmem_poke(fb, 4 * i, patt);
73 74
74 if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff)) 75 if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
75 nv_mask(priv, NV04_PFB_BOOT_0, 76 nvkm_mask(device, NV04_PFB_BOOT_0,
76 NV04_PFB_BOOT_0_RAM_WIDTH_128 | 77 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
77 NV04_PFB_BOOT_0_RAM_AMOUNT, 78 NV04_PFB_BOOT_0_RAM_AMOUNT,
78 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); 79 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
79 } else 80 } else
80 if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) { 81 if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
81 nv_mask(priv, NV04_PFB_BOOT_0, 82 nvkm_mask(device, NV04_PFB_BOOT_0,
82 NV04_PFB_BOOT_0_RAM_WIDTH_128 | 83 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
83 NV04_PFB_BOOT_0_RAM_AMOUNT, 84 NV04_PFB_BOOT_0_RAM_AMOUNT,
84 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); 85 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
85 } else 86 } else
86 if (fbmem_peek(fb, 0) != patt) { 87 if (fbmem_peek(fb, 0) != patt) {
87 if (fbmem_readback(fb, 0x800000, patt)) 88 if (fbmem_readback(fb, 0x800000, patt))
88 nv_mask(priv, NV04_PFB_BOOT_0, 89 nvkm_mask(device, NV04_PFB_BOOT_0,
89 NV04_PFB_BOOT_0_RAM_AMOUNT, 90 NV04_PFB_BOOT_0_RAM_AMOUNT,
90 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); 91 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
91 else 92 else
92 nv_mask(priv, NV04_PFB_BOOT_0, 93 nvkm_mask(device, NV04_PFB_BOOT_0,
93 NV04_PFB_BOOT_0_RAM_AMOUNT, 94 NV04_PFB_BOOT_0_RAM_AMOUNT,
94 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); 95 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
95 96
96 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE, 97 nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
97 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT); 98 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
98 } else 99 } else
99 if (!fbmem_readback(fb, 0x800000, patt)) { 100 if (!fbmem_readback(fb, 0x800000, patt)) {
100 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, 101 nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
101 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); 102 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
102 103
103 } 104 }
104 105
105 /* Refresh on, sequencer on */ 106 /* Refresh on, sequencer on */
106 nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); 107 nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
107 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20); 108 nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
108 fbmem_fini(fb); 109 fbmem_fini(fb);
109} 110}
110 111
@@ -139,11 +140,12 @@ powerctrl_1_shift(int chip_version, int reg)
139} 140}
140 141
141void 142void
142setPLL_single(struct nvkm_devinit *devinit, u32 reg, 143setPLL_single(struct nvkm_devinit *init, u32 reg,
143 struct nvkm_pll_vals *pv) 144 struct nvkm_pll_vals *pv)
144{ 145{
145 int chip_version = nvkm_bios(devinit)->version.chip; 146 struct nvkm_device *device = init->subdev.device;
146 uint32_t oldpll = nv_rd32(devinit, reg); 147 int chip_version = device->bios->version.chip;
148 uint32_t oldpll = nvkm_rd32(device, reg);
147 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff; 149 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
148 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1; 150 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
149 uint32_t saved_powerctrl_1 = 0; 151 uint32_t saved_powerctrl_1 = 0;
@@ -153,30 +155,30 @@ setPLL_single(struct nvkm_devinit *devinit, u32 reg,
153 return; /* already set */ 155 return; /* already set */
154 156
155 if (shift_powerctrl_1 >= 0) { 157 if (shift_powerctrl_1 >= 0) {
156 saved_powerctrl_1 = nv_rd32(devinit, 0x001584); 158 saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
157 nv_wr32(devinit, 0x001584, 159 nvkm_wr32(device, 0x001584,
158 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) | 160 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
159 1 << shift_powerctrl_1); 161 1 << shift_powerctrl_1);
160 } 162 }
161 163
162 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1)) 164 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
163 /* upclock -- write new post divider first */ 165 /* upclock -- write new post divider first */
164 nv_wr32(devinit, reg, pv->log2P << 16 | (oldpll & 0xffff)); 166 nvkm_wr32(device, reg, pv->log2P << 16 | (oldpll & 0xffff));
165 else 167 else
166 /* downclock -- write new NM first */ 168 /* downclock -- write new NM first */
167 nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1); 169 nvkm_wr32(device, reg, (oldpll & 0xffff0000) | pv->NM1);
168 170
169 if ((chip_version < 0x17 || chip_version == 0x1a) && 171 if ((chip_version < 0x17 || chip_version == 0x1a) &&
170 chip_version != 0x11) 172 chip_version != 0x11)
171 /* wait a bit on older chips */ 173 /* wait a bit on older chips */
172 msleep(64); 174 msleep(64);
173 nv_rd32(devinit, reg); 175 nvkm_rd32(device, reg);
174 176
175 /* then write the other half as well */ 177 /* then write the other half as well */
176 nv_wr32(devinit, reg, pll); 178 nvkm_wr32(device, reg, pll);
177 179
178 if (shift_powerctrl_1 >= 0) 180 if (shift_powerctrl_1 >= 0)
179 nv_wr32(devinit, 0x001584, saved_powerctrl_1); 181 nvkm_wr32(device, 0x001584, saved_powerctrl_1);
180} 182}
181 183
182static uint32_t 184static uint32_t
@@ -193,14 +195,15 @@ new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
193} 195}
194 196
195void 197void
196setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1, 198setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
197 struct nvkm_pll_vals *pv) 199 struct nvkm_pll_vals *pv)
198{ 200{
199 int chip_version = nvkm_bios(devinit)->version.chip; 201 struct nvkm_device *device = init->subdev.device;
202 int chip_version = device->bios->version.chip;
200 bool nv3035 = chip_version == 0x30 || chip_version == 0x35; 203 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
201 uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70); 204 uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
202 uint32_t oldpll1 = nv_rd32(devinit, reg1); 205 uint32_t oldpll1 = nvkm_rd32(device, reg1);
203 uint32_t oldpll2 = !nv3035 ? nv_rd32(devinit, reg2) : 0; 206 uint32_t oldpll2 = !nv3035 ? nvkm_rd32(device, reg2) : 0;
204 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1; 207 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
205 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2; 208 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
206 uint32_t oldramdac580 = 0, ramdac580 = 0; 209 uint32_t oldramdac580 = 0, ramdac580 = 0;
@@ -215,7 +218,7 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
215 pll2 = 0; 218 pll2 = 0;
216 } 219 }
217 if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */ 220 if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
218 oldramdac580 = nv_rd32(devinit, 0x680580); 221 oldramdac580 = nvkm_rd32(device, 0x680580);
219 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580); 222 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
220 if (oldramdac580 != ramdac580) 223 if (oldramdac580 != ramdac580)
221 oldpll1 = ~0; /* force mismatch */ 224 oldpll1 = ~0; /* force mismatch */
@@ -231,8 +234,8 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
231 return; /* already set */ 234 return; /* already set */
232 235
233 if (shift_powerctrl_1 >= 0) { 236 if (shift_powerctrl_1 >= 0) {
234 saved_powerctrl_1 = nv_rd32(devinit, 0x001584); 237 saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
235 nv_wr32(devinit, 0x001584, 238 nvkm_wr32(device, 0x001584,
236 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) | 239 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
237 1 << shift_powerctrl_1); 240 1 << shift_powerctrl_1);
238 } 241 }
@@ -251,26 +254,26 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
251 shift_c040 += 2; 254 shift_c040 += 2;
252 } 255 }
253 256
254 savedc040 = nv_rd32(devinit, 0xc040); 257 savedc040 = nvkm_rd32(device, 0xc040);
255 if (shift_c040 != 14) 258 if (shift_c040 != 14)
256 nv_wr32(devinit, 0xc040, savedc040 & ~(3 << shift_c040)); 259 nvkm_wr32(device, 0xc040, savedc040 & ~(3 << shift_c040));
257 } 260 }
258 261
259 if (oldramdac580 != ramdac580) 262 if (oldramdac580 != ramdac580)
260 nv_wr32(devinit, 0x680580, ramdac580); 263 nvkm_wr32(device, 0x680580, ramdac580);
261 264
262 if (!nv3035) 265 if (!nv3035)
263 nv_wr32(devinit, reg2, pll2); 266 nvkm_wr32(device, reg2, pll2);
264 nv_wr32(devinit, reg1, pll1); 267 nvkm_wr32(device, reg1, pll1);
265 268
266 if (shift_powerctrl_1 >= 0) 269 if (shift_powerctrl_1 >= 0)
267 nv_wr32(devinit, 0x001584, saved_powerctrl_1); 270 nvkm_wr32(device, 0x001584, saved_powerctrl_1);
268 if (chip_version >= 0x40) 271 if (chip_version >= 0x40)
269 nv_wr32(devinit, 0xc040, savedc040); 272 nvkm_wr32(device, 0xc040, savedc040);
270} 273}
271 274
272void 275void
273setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg, 276setPLL_double_lowregs(struct nvkm_devinit *init, u32 NMNMreg,
274 struct nvkm_pll_vals *pv) 277 struct nvkm_pll_vals *pv)
275{ 278{
276 /* When setting PLLs, there is a merry game of disabling and enabling 279 /* When setting PLLs, there is a merry game of disabling and enabling
@@ -280,10 +283,10 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
280 * combined herein. Without luck it deviates from each card's formula 283 * combined herein. Without luck it deviates from each card's formula
281 * so as to not work on any :) 284 * so as to not work on any :)
282 */ 285 */
283 286 struct nvkm_device *device = init->subdev.device;
284 uint32_t Preg = NMNMreg - 4; 287 uint32_t Preg = NMNMreg - 4;
285 bool mpll = Preg == 0x4020; 288 bool mpll = Preg == 0x4020;
286 uint32_t oldPval = nv_rd32(devinit, Preg); 289 uint32_t oldPval = nvkm_rd32(device, Preg);
287 uint32_t NMNM = pv->NM2 << 16 | pv->NM1; 290 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
288 uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) | 291 uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
289 0xc << 28 | pv->log2P << 16; 292 0xc << 28 | pv->log2P << 16;
@@ -292,7 +295,7 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
292 uint32_t maskc040 = ~(3 << 14), savedc040; 295 uint32_t maskc040 = ~(3 << 14), savedc040;
293 bool single_stage = !pv->NM2 || pv->N2 == pv->M2; 296 bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
294 297
295 if (nv_rd32(devinit, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval) 298 if (nvkm_rd32(device, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
296 return; 299 return;
297 300
298 if (Preg == 0x4000) 301 if (Preg == 0x4000)
@@ -304,7 +307,7 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
304 struct nvbios_pll info; 307 struct nvbios_pll info;
305 uint8_t Pval2; 308 uint8_t Pval2;
306 309
307 if (nvbios_pll_parse(nvkm_bios(devinit), Preg, &info)) 310 if (nvbios_pll_parse(device->bios, Preg, &info))
308 return; 311 return;
309 312
310 Pval2 = pv->log2P + info.bias_p; 313 Pval2 = pv->log2P + info.bias_p;
@@ -312,47 +315,48 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
312 Pval2 = info.max_p; 315 Pval2 = info.max_p;
313 Pval |= 1 << 28 | Pval2 << 20; 316 Pval |= 1 << 28 | Pval2 << 20;
314 317
315 saved4600 = nv_rd32(devinit, 0x4600); 318 saved4600 = nvkm_rd32(device, 0x4600);
316 nv_wr32(devinit, 0x4600, saved4600 | 8 << 28); 319 nvkm_wr32(device, 0x4600, saved4600 | 8 << 28);
317 } 320 }
318 if (single_stage) 321 if (single_stage)
319 Pval |= mpll ? 1 << 12 : 1 << 8; 322 Pval |= mpll ? 1 << 12 : 1 << 8;
320 323
321 nv_wr32(devinit, Preg, oldPval | 1 << 28); 324 nvkm_wr32(device, Preg, oldPval | 1 << 28);
322 nv_wr32(devinit, Preg, Pval & ~(4 << 28)); 325 nvkm_wr32(device, Preg, Pval & ~(4 << 28));
323 if (mpll) { 326 if (mpll) {
324 Pval |= 8 << 20; 327 Pval |= 8 << 20;
325 nv_wr32(devinit, 0x4020, Pval & ~(0xc << 28)); 328 nvkm_wr32(device, 0x4020, Pval & ~(0xc << 28));
326 nv_wr32(devinit, 0x4038, Pval & ~(0xc << 28)); 329 nvkm_wr32(device, 0x4038, Pval & ~(0xc << 28));
327 } 330 }
328 331
329 savedc040 = nv_rd32(devinit, 0xc040); 332 savedc040 = nvkm_rd32(device, 0xc040);
330 nv_wr32(devinit, 0xc040, savedc040 & maskc040); 333 nvkm_wr32(device, 0xc040, savedc040 & maskc040);
331 334
332 nv_wr32(devinit, NMNMreg, NMNM); 335 nvkm_wr32(device, NMNMreg, NMNM);
333 if (NMNMreg == 0x4024) 336 if (NMNMreg == 0x4024)
334 nv_wr32(devinit, 0x403c, NMNM); 337 nvkm_wr32(device, 0x403c, NMNM);
335 338
336 nv_wr32(devinit, Preg, Pval); 339 nvkm_wr32(device, Preg, Pval);
337 if (mpll) { 340 if (mpll) {
338 Pval &= ~(8 << 20); 341 Pval &= ~(8 << 20);
339 nv_wr32(devinit, 0x4020, Pval); 342 nvkm_wr32(device, 0x4020, Pval);
340 nv_wr32(devinit, 0x4038, Pval); 343 nvkm_wr32(device, 0x4038, Pval);
341 nv_wr32(devinit, 0x4600, saved4600); 344 nvkm_wr32(device, 0x4600, saved4600);
342 } 345 }
343 346
344 nv_wr32(devinit, 0xc040, savedc040); 347 nvkm_wr32(device, 0xc040, savedc040);
345 348
346 if (mpll) { 349 if (mpll) {
347 nv_wr32(devinit, 0x4020, Pval & ~(1 << 28)); 350 nvkm_wr32(device, 0x4020, Pval & ~(1 << 28));
348 nv_wr32(devinit, 0x4038, Pval & ~(1 << 28)); 351 nvkm_wr32(device, 0x4038, Pval & ~(1 << 28));
349 } 352 }
350} 353}
351 354
352int 355int
353nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq) 356nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
354{ 357{
355 struct nvkm_bios *bios = nvkm_bios(devinit); 358 struct nvkm_subdev *subdev = &devinit->subdev;
359 struct nvkm_bios *bios = subdev->device->bios;
356 struct nvkm_pll_vals pv; 360 struct nvkm_pll_vals pv;
357 struct nvbios_pll info; 361 struct nvbios_pll info;
358 int cv = bios->version.chip; 362 int cv = bios->version.chip;
@@ -363,8 +367,7 @@ nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
363 if (ret) 367 if (ret)
364 return ret; 368 return ret;
365 369
366 ret = nv04_pll_calc(nv_subdev(devinit), &info, freq, 370 ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
367 &N1, &M1, &N2, &M2, &P);
368 if (!ret) 371 if (!ret)
369 return -EINVAL; 372 return -EINVAL;
370 373
@@ -388,83 +391,76 @@ nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
388} 391}
389 392
390int 393int
391nv04_devinit_fini(struct nvkm_object *object, bool suspend) 394nv04_devinit_post(struct nvkm_devinit *init, bool execute)
392{ 395{
393 struct nv04_devinit_priv *priv = (void *)object; 396 return nvbios_init(&init->subdev, execute);
394 int ret; 397}
395 398
396 /* make i2c busses accessible */ 399void
397 nv_mask(priv, 0x000200, 0x00000001, 0x00000001); 400nv04_devinit_preinit(struct nvkm_devinit *base)
401{
402 struct nv04_devinit *init = nv04_devinit(base);
403 struct nvkm_subdev *subdev = &init->base.subdev;
404 struct nvkm_device *device = subdev->device;
398 405
399 ret = nvkm_devinit_fini(&priv->base, suspend); 406 /* make i2c busses accessible */
400 if (ret) 407 nvkm_mask(device, 0x000200, 0x00000001, 0x00000001);
401 return ret;
402 408
403 /* unslave crtcs */ 409 /* unslave crtcs */
404 if (priv->owner < 0) 410 if (init->owner < 0)
405 priv->owner = nv_rdvgaowner(priv); 411 init->owner = nvkm_rdvgaowner(device);
406 nv_wrvgaowner(priv, 0); 412 nvkm_wrvgaowner(device, 0);
407 return 0; 413
408} 414 if (!init->base.post) {
409 415 u32 htotal = nvkm_rdvgac(device, 0, 0x06);
410int 416 htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8;
411nv04_devinit_init(struct nvkm_object *object) 417 htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4;
412{ 418 htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10;
413 struct nv04_devinit_priv *priv = (void *)object; 419 htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11;
414
415 if (!priv->base.post) {
416 u32 htotal = nv_rdvgac(priv, 0, 0x06);
417 htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
418 htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
419 htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
420 htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
421 if (!htotal) { 420 if (!htotal) {
422 nv_info(priv, "adaptor not initialised\n"); 421 nvkm_debug(subdev, "adaptor not initialised\n");
423 priv->base.post = true; 422 init->base.post = true;
424 } 423 }
425 } 424 }
426
427 return nvkm_devinit_init(&priv->base);
428} 425}
429 426
430void 427void *
431nv04_devinit_dtor(struct nvkm_object *object) 428nv04_devinit_dtor(struct nvkm_devinit *base)
432{ 429{
433 struct nv04_devinit_priv *priv = (void *)object; 430 struct nv04_devinit *init = nv04_devinit(base);
434
435 /* restore vga owner saved at first init */ 431 /* restore vga owner saved at first init */
436 nv_wrvgaowner(priv, priv->owner); 432 nvkm_wrvgaowner(init->base.subdev.device, init->owner);
437 433 return init;
438 nvkm_devinit_destroy(&priv->base);
439} 434}
440 435
441int 436int
442nv04_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 437nv04_devinit_new_(const struct nvkm_devinit_func *func,
443 struct nvkm_oclass *oclass, void *data, u32 size, 438 struct nvkm_device *device, int index,
444 struct nvkm_object **pobject) 439 struct nvkm_devinit **pinit)
445{ 440{
446 struct nv04_devinit_priv *priv; 441 struct nv04_devinit *init;
447 int ret;
448 442
449 ret = nvkm_devinit_create(parent, engine, oclass, &priv); 443 if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
450 *pobject = nv_object(priv); 444 return -ENOMEM;
451 if (ret) 445 *pinit = &init->base;
452 return ret;
453 446
454 priv->owner = -1; 447 nvkm_devinit_ctor(func, device, index, &init->base);
448 init->owner = -1;
455 return 0; 449 return 0;
456} 450}
457 451
458struct nvkm_oclass * 452static const struct nvkm_devinit_func
459nv04_devinit_oclass = &(struct nvkm_devinit_impl) { 453nv04_devinit = {
460 .base.handle = NV_SUBDEV(DEVINIT, 0x04), 454 .dtor = nv04_devinit_dtor,
461 .base.ofuncs = &(struct nvkm_ofuncs) { 455 .preinit = nv04_devinit_preinit,
462 .ctor = nv04_devinit_ctor, 456 .post = nv04_devinit_post,
463 .dtor = nv04_devinit_dtor,
464 .init = nv04_devinit_init,
465 .fini = nv04_devinit_fini,
466 },
467 .meminit = nv04_devinit_meminit, 457 .meminit = nv04_devinit_meminit,
468 .pll_set = nv04_devinit_pll_set, 458 .pll_set = nv04_devinit_pll_set,
469 .post = nvbios_init, 459};
470}.base; 460
461int
462nv04_devinit_new(struct nvkm_device *device, int index,
463 struct nvkm_devinit **pinit)
464{
465 return nv04_devinit_new_(&nv04_devinit, device, index, pinit);
466}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 7c63abf11e22..4a87c8c2bce8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -1,19 +1,19 @@
1#ifndef __NVKM_DEVINIT_NV04_H__ 1#ifndef __NV04_DEVINIT_H__
2#define __NVKM_DEVINIT_NV04_H__ 2#define __NV04_DEVINIT_H__
3#define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
3#include "priv.h" 4#include "priv.h"
4struct nvkm_pll_vals; 5struct nvkm_pll_vals;
5 6
6struct nv04_devinit_priv { 7struct nv04_devinit {
7 struct nvkm_devinit base; 8 struct nvkm_devinit base;
8 int owner; 9 int owner;
9}; 10};
10 11
11int nv04_devinit_ctor(struct nvkm_object *, struct nvkm_object *, 12int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
12 struct nvkm_oclass *, void *, u32, 13 int, struct nvkm_devinit **);
13 struct nvkm_object **); 14void *nv04_devinit_dtor(struct nvkm_devinit *);
14void nv04_devinit_dtor(struct nvkm_object *); 15void nv04_devinit_preinit(struct nvkm_devinit *);
15int nv04_devinit_init(struct nvkm_object *); 16void nv04_devinit_fini(struct nvkm_devinit *);
16int nv04_devinit_fini(struct nvkm_object *, bool);
17int nv04_devinit_pll_set(struct nvkm_devinit *, u32, u32); 17int nv04_devinit_pll_set(struct nvkm_devinit *, u32, u32);
18 18
19void setPLL_single(struct nvkm_devinit *, u32, struct nvkm_pll_vals *); 19void setPLL_single(struct nvkm_devinit *, u32, struct nvkm_pll_vals *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index def8649216c2..9891eadca1ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -32,7 +32,7 @@
32#include <subdev/vga.h> 32#include <subdev/vga.h>
33 33
34static void 34static void
35nv05_devinit_meminit(struct nvkm_devinit *devinit) 35nv05_devinit_meminit(struct nvkm_devinit *init)
36{ 36{
37 static const u8 default_config_tab[][2] = { 37 static const u8 default_config_tab[][2] = {
38 { 0x24, 0x00 }, 38 { 0x24, 0x00 },
@@ -44,8 +44,9 @@ nv05_devinit_meminit(struct nvkm_devinit *devinit)
44 { 0x06, 0x00 }, 44 { 0x06, 0x00 },
45 { 0x00, 0x00 } 45 { 0x00, 0x00 }
46 }; 46 };
47 struct nv04_devinit_priv *priv = (void *)devinit; 47 struct nvkm_subdev *subdev = &init->subdev;
48 struct nvkm_bios *bios = nvkm_bios(priv); 48 struct nvkm_device *device = subdev->device;
49 struct nvkm_bios *bios = device->bios;
49 struct io_mapping *fb; 50 struct io_mapping *fb;
50 u32 patt = 0xdeadbeef; 51 u32 patt = 0xdeadbeef;
51 u16 data; 52 u16 data;
@@ -53,88 +54,90 @@ nv05_devinit_meminit(struct nvkm_devinit *devinit)
53 int i, v; 54 int i, v;
54 55
55 /* Map the framebuffer aperture */ 56 /* Map the framebuffer aperture */
56 fb = fbmem_init(nv_device(priv)); 57 fb = fbmem_init(device);
57 if (!fb) { 58 if (!fb) {
58 nv_error(priv, "failed to map fb\n"); 59 nvkm_error(subdev, "failed to map fb\n");
59 return; 60 return;
60 } 61 }
61 62
62 strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2; 63 strap = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
63 if ((data = bmp_mem_init_table(bios))) { 64 if ((data = bmp_mem_init_table(bios))) {
64 ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0); 65 ramcfg[0] = nvbios_rd08(bios, data + 2 * strap + 0);
65 ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1); 66 ramcfg[1] = nvbios_rd08(bios, data + 2 * strap + 1);
66 } else { 67 } else {
67 ramcfg[0] = default_config_tab[strap][0]; 68 ramcfg[0] = default_config_tab[strap][0];
68 ramcfg[1] = default_config_tab[strap][1]; 69 ramcfg[1] = default_config_tab[strap][1];
69 } 70 }
70 71
71 /* Sequencer off */ 72 /* Sequencer off */
72 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20); 73 nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
73 74
74 if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE) 75 if (nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
75 goto out; 76 goto out;
76 77
77 nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); 78 nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
78 79
79 /* If present load the hardcoded scrambling table */ 80 /* If present load the hardcoded scrambling table */
80 if (data) { 81 if (data) {
81 for (i = 0, data += 0x10; i < 8; i++, data += 4) { 82 for (i = 0, data += 0x10; i < 8; i++, data += 4) {
82 u32 scramble = nv_ro32(bios, data); 83 u32 scramble = nvbios_rd32(bios, data);
83 nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble); 84 nvkm_wr32(device, NV04_PFB_SCRAMBLE(i), scramble);
84 } 85 }
85 } 86 }
86 87
87 /* Set memory type/width/length defaults depending on the straps */ 88 /* Set memory type/width/length defaults depending on the straps */
88 nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]); 89 nvkm_mask(device, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
89 90
90 if (ramcfg[1] & 0x80) 91 if (ramcfg[1] & 0x80)
91 nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE); 92 nvkm_mask(device, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
92 93
93 nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20); 94 nvkm_mask(device, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
94 nv_mask(priv, NV04_PFB_CFG1, 0, 1); 95 nvkm_mask(device, NV04_PFB_CFG1, 0, 1);
95 96
96 /* Probe memory bus width */ 97 /* Probe memory bus width */
97 for (i = 0; i < 4; i++) 98 for (i = 0; i < 4; i++)
98 fbmem_poke(fb, 4 * i, patt); 99 fbmem_poke(fb, 4 * i, patt);
99 100
100 if (fbmem_peek(fb, 0xc) != patt) 101 if (fbmem_peek(fb, 0xc) != patt)
101 nv_mask(priv, NV04_PFB_BOOT_0, 102 nvkm_mask(device, NV04_PFB_BOOT_0,
102 NV04_PFB_BOOT_0_RAM_WIDTH_128, 0); 103 NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
103 104
104 /* Probe memory length */ 105 /* Probe memory length */
105 v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT; 106 v = nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
106 107
107 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB && 108 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
108 (!fbmem_readback(fb, 0x1000000, ++patt) || 109 (!fbmem_readback(fb, 0x1000000, ++patt) ||
109 !fbmem_readback(fb, 0, ++patt))) 110 !fbmem_readback(fb, 0, ++patt)))
110 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, 111 nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
111 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB); 112 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
112 113
113 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB && 114 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
114 !fbmem_readback(fb, 0x800000, ++patt)) 115 !fbmem_readback(fb, 0x800000, ++patt))
115 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, 116 nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
116 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); 117 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
117 118
118 if (!fbmem_readback(fb, 0x400000, ++patt)) 119 if (!fbmem_readback(fb, 0x400000, ++patt))
119 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, 120 nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
120 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); 121 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
121 122
122out: 123out:
123 /* Sequencer on */ 124 /* Sequencer on */
124 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20); 125 nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
125 fbmem_fini(fb); 126 fbmem_fini(fb);
126} 127}
127 128
128struct nvkm_oclass * 129static const struct nvkm_devinit_func
129nv05_devinit_oclass = &(struct nvkm_devinit_impl) { 130nv05_devinit = {
130 .base.handle = NV_SUBDEV(DEVINIT, 0x05), 131 .dtor = nv04_devinit_dtor,
131 .base.ofuncs = &(struct nvkm_ofuncs) { 132 .preinit = nv04_devinit_preinit,
132 .ctor = nv04_devinit_ctor, 133 .post = nv04_devinit_post,
133 .dtor = nv04_devinit_dtor,
134 .init = nv04_devinit_init,
135 .fini = nv04_devinit_fini,
136 },
137 .meminit = nv05_devinit_meminit, 134 .meminit = nv05_devinit_meminit,
138 .pll_set = nv04_devinit_pll_set, 135 .pll_set = nv04_devinit_pll_set,
139 .post = nvbios_init, 136};
140}.base; 137
138int
139nv05_devinit_new(struct nvkm_device *device, int index,
140 struct nvkm_devinit **pinit)
141{
142 return nv04_devinit_new_(&nv05_devinit, device, index, pinit);
143}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 7aabc1bf0640..570822f83acf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -30,33 +30,33 @@
30#include <subdev/bios/init.h> 30#include <subdev/bios/init.h>
31 31
32static void 32static void
33nv10_devinit_meminit(struct nvkm_devinit *devinit) 33nv10_devinit_meminit(struct nvkm_devinit *init)
34{ 34{
35 struct nv04_devinit_priv *priv = (void *)devinit; 35 struct nvkm_subdev *subdev = &init->subdev;
36 struct nvkm_device *device = subdev->device;
36 static const int mem_width[] = { 0x10, 0x00, 0x20 }; 37 static const int mem_width[] = { 0x10, 0x00, 0x20 };
37 int mem_width_count; 38 int mem_width_count;
38 uint32_t patt = 0xdeadbeef; 39 uint32_t patt = 0xdeadbeef;
39 struct io_mapping *fb; 40 struct io_mapping *fb;
40 int i, j, k; 41 int i, j, k;
41 42
42 if (nv_device(priv)->card_type >= NV_11 && 43 if (device->card_type >= NV_11 && device->chipset >= 0x17)
43 nv_device(priv)->chipset >= 0x17)
44 mem_width_count = 3; 44 mem_width_count = 3;
45 else 45 else
46 mem_width_count = 2; 46 mem_width_count = 2;
47 47
48 /* Map the framebuffer aperture */ 48 /* Map the framebuffer aperture */
49 fb = fbmem_init(nv_device(priv)); 49 fb = fbmem_init(device);
50 if (!fb) { 50 if (!fb) {
51 nv_error(priv, "failed to map fb\n"); 51 nvkm_error(subdev, "failed to map fb\n");
52 return; 52 return;
53 } 53 }
54 54
55 nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1); 55 nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
56 56
57 /* Probe memory bus width */ 57 /* Probe memory bus width */
58 for (i = 0; i < mem_width_count; i++) { 58 for (i = 0; i < mem_width_count; i++) {
59 nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]); 59 nvkm_mask(device, NV04_PFB_CFG0, 0x30, mem_width[i]);
60 60
61 for (j = 0; j < 4; j++) { 61 for (j = 0; j < 4; j++) {
62 for (k = 0; k < 4; k++) 62 for (k = 0; k < 4; k++)
@@ -75,7 +75,7 @@ mem_width_found:
75 75
76 /* Probe amount of installed memory */ 76 /* Probe amount of installed memory */
77 for (i = 0; i < 4; i++) { 77 for (i = 0; i < 4; i++) {
78 int off = nv_rd32(priv, 0x10020c) - 0x100000; 78 int off = nvkm_rd32(device, 0x10020c) - 0x100000;
79 79
80 fbmem_poke(fb, off, patt); 80 fbmem_poke(fb, off, patt);
81 fbmem_poke(fb, 0, 0); 81 fbmem_poke(fb, 0, 0);
@@ -90,22 +90,24 @@ mem_width_found:
90 } 90 }
91 91
92 /* IC missing - disable the upper half memory space. */ 92 /* IC missing - disable the upper half memory space. */
93 nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0); 93 nvkm_mask(device, NV04_PFB_CFG0, 0x1000, 0);
94 94
95amount_found: 95amount_found:
96 fbmem_fini(fb); 96 fbmem_fini(fb);
97} 97}
98 98
99struct nvkm_oclass * 99static const struct nvkm_devinit_func
100nv10_devinit_oclass = &(struct nvkm_devinit_impl) { 100nv10_devinit = {
101 .base.handle = NV_SUBDEV(DEVINIT, 0x10), 101 .dtor = nv04_devinit_dtor,
102 .base.ofuncs = &(struct nvkm_ofuncs) { 102 .preinit = nv04_devinit_preinit,
103 .ctor = nv04_devinit_ctor, 103 .post = nv04_devinit_post,
104 .dtor = nv04_devinit_dtor,
105 .init = nv04_devinit_init,
106 .fini = nv04_devinit_fini,
107 },
108 .meminit = nv10_devinit_meminit, 104 .meminit = nv10_devinit_meminit,
109 .pll_set = nv04_devinit_pll_set, 105 .pll_set = nv04_devinit_pll_set,
110 .post = nvbios_init, 106};
111}.base; 107
108int
109nv10_devinit_new(struct nvkm_device *device, int index,
110 struct nvkm_devinit **pinit)
111{
112 return nv04_devinit_new_(&nv10_devinit, device, index, pinit);
113}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index 9f36fff5a1c3..fefafec7e2a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -26,15 +26,17 @@
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bios/init.h> 27#include <subdev/bios/init.h>
28 28
29struct nvkm_oclass * 29static const struct nvkm_devinit_func
30nv1a_devinit_oclass = &(struct nvkm_devinit_impl) { 30nv1a_devinit = {
31 .base.handle = NV_SUBDEV(DEVINIT, 0x1a), 31 .dtor = nv04_devinit_dtor,
32 .base.ofuncs = &(struct nvkm_ofuncs) { 32 .preinit = nv04_devinit_preinit,
33 .ctor = nv04_devinit_ctor, 33 .post = nv04_devinit_post,
34 .dtor = nv04_devinit_dtor,
35 .init = nv04_devinit_init,
36 .fini = nv04_devinit_fini,
37 },
38 .pll_set = nv04_devinit_pll_set, 34 .pll_set = nv04_devinit_pll_set,
39 .post = nvbios_init, 35};
40}.base; 36
37int
38nv1a_devinit_new(struct nvkm_device *device, int index,
39 struct nvkm_devinit **pinit)
40{
41 return nv04_devinit_new_(&nv1a_devinit, device, index, pinit);
42}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 02fcfd921c42..4ef04e0d8826 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -30,48 +30,50 @@
30#include <subdev/bios/init.h> 30#include <subdev/bios/init.h>
31 31
32static void 32static void
33nv20_devinit_meminit(struct nvkm_devinit *devinit) 33nv20_devinit_meminit(struct nvkm_devinit *init)
34{ 34{
35 struct nv04_devinit_priv *priv = (void *)devinit; 35 struct nvkm_subdev *subdev = &init->subdev;
36 struct nvkm_device *device = nv_device(priv); 36 struct nvkm_device *device = subdev->device;
37 uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900); 37 uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
38 uint32_t amount, off; 38 uint32_t amount, off;
39 struct io_mapping *fb; 39 struct io_mapping *fb;
40 40
41 /* Map the framebuffer aperture */ 41 /* Map the framebuffer aperture */
42 fb = fbmem_init(nv_device(priv)); 42 fb = fbmem_init(device);
43 if (!fb) { 43 if (!fb) {
44 nv_error(priv, "failed to map fb\n"); 44 nvkm_error(subdev, "failed to map fb\n");
45 return; 45 return;
46 } 46 }
47 47
48 nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1); 48 nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
49 49
50 /* Allow full addressing */ 50 /* Allow full addressing */
51 nv_mask(priv, NV04_PFB_CFG0, 0, mask); 51 nvkm_mask(device, NV04_PFB_CFG0, 0, mask);
52 52
53 amount = nv_rd32(priv, 0x10020c); 53 amount = nvkm_rd32(device, 0x10020c);
54 for (off = amount; off > 0x2000000; off -= 0x2000000) 54 for (off = amount; off > 0x2000000; off -= 0x2000000)
55 fbmem_poke(fb, off - 4, off); 55 fbmem_poke(fb, off - 4, off);
56 56
57 amount = nv_rd32(priv, 0x10020c); 57 amount = nvkm_rd32(device, 0x10020c);
58 if (amount != fbmem_peek(fb, amount - 4)) 58 if (amount != fbmem_peek(fb, amount - 4))
59 /* IC missing - disable the upper half memory space. */ 59 /* IC missing - disable the upper half memory space. */
60 nv_mask(priv, NV04_PFB_CFG0, mask, 0); 60 nvkm_mask(device, NV04_PFB_CFG0, mask, 0);
61 61
62 fbmem_fini(fb); 62 fbmem_fini(fb);
63} 63}
64 64
65struct nvkm_oclass * 65static const struct nvkm_devinit_func
66nv20_devinit_oclass = &(struct nvkm_devinit_impl) { 66nv20_devinit = {
67 .base.handle = NV_SUBDEV(DEVINIT, 0x20), 67 .dtor = nv04_devinit_dtor,
68 .base.ofuncs = &(struct nvkm_ofuncs) { 68 .preinit = nv04_devinit_preinit,
69 .ctor = nv04_devinit_ctor, 69 .post = nv04_devinit_post,
70 .dtor = nv04_devinit_dtor,
71 .init = nv04_devinit_init,
72 .fini = nv04_devinit_fini,
73 },
74 .meminit = nv20_devinit_meminit, 70 .meminit = nv20_devinit_meminit,
75 .pll_set = nv04_devinit_pll_set, 71 .pll_set = nv04_devinit_pll_set,
76 .post = nvbios_init, 72};
77}.base; 73
74int
75nv20_devinit_new(struct nvkm_device *device, int index,
76 struct nvkm_devinit **pinit)
77{
78 return nv04_devinit_new_(&nv20_devinit, device, index, pinit);
79}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 26b7cb13e167..337c2c692dc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -29,47 +29,48 @@
29#include <subdev/bios/init.h> 29#include <subdev/bios/init.h>
30#include <subdev/bios/pll.h> 30#include <subdev/bios/pll.h>
31#include <subdev/clk/pll.h> 31#include <subdev/clk/pll.h>
32#include <subdev/ibus.h>
33#include <subdev/vga.h> 32#include <subdev/vga.h>
34 33
35int 34int
36nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq) 35nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
37{ 36{
38 struct nv50_devinit_priv *priv = (void *)devinit; 37 struct nvkm_subdev *subdev = &init->subdev;
39 struct nvkm_bios *bios = nvkm_bios(priv); 38 struct nvkm_device *device = subdev->device;
39 struct nvkm_bios *bios = device->bios;
40 struct nvbios_pll info; 40 struct nvbios_pll info;
41 int N1, M1, N2, M2, P; 41 int N1, M1, N2, M2, P;
42 int ret; 42 int ret;
43 43
44 ret = nvbios_pll_parse(bios, type, &info); 44 ret = nvbios_pll_parse(bios, type, &info);
45 if (ret) { 45 if (ret) {
46 nv_error(devinit, "failed to retrieve pll data, %d\n", ret); 46 nvkm_error(subdev, "failed to retrieve pll data, %d\n", ret);
47 return ret; 47 return ret;
48 } 48 }
49 49
50 ret = nv04_pll_calc(nv_subdev(devinit), &info, freq, &N1, &M1, &N2, &M2, &P); 50 ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
51 if (!ret) { 51 if (!ret) {
52 nv_error(devinit, "failed pll calculation\n"); 52 nvkm_error(subdev, "failed pll calculation\n");
53 return ret; 53 return ret;
54 } 54 }
55 55
56 switch (info.type) { 56 switch (info.type) {
57 case PLL_VPLL0: 57 case PLL_VPLL0:
58 case PLL_VPLL1: 58 case PLL_VPLL1:
59 nv_wr32(priv, info.reg + 0, 0x10000611); 59 nvkm_wr32(device, info.reg + 0, 0x10000611);
60 nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1); 60 nvkm_mask(device, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
61 nv_mask(priv, info.reg + 8, 0x7fff00ff, (P << 28) | 61 nvkm_mask(device, info.reg + 8, 0x7fff00ff, (P << 28) |
62 (M2 << 16) | N2); 62 (M2 << 16) | N2);
63 break; 63 break;
64 case PLL_MEMORY: 64 case PLL_MEMORY:
65 nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) | 65 nvkm_mask(device, info.reg + 0, 0x01ff0000,
66 (info.bias_p << 19) | 66 (P << 22) |
67 (P << 16)); 67 (info.bias_p << 19) |
68 nv_wr32(priv, info.reg + 4, (N1 << 8) | M1); 68 (P << 16));
69 nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
69 break; 70 break;
70 default: 71 default:
71 nv_mask(priv, info.reg + 0, 0x00070000, (P << 16)); 72 nvkm_mask(device, info.reg + 0, 0x00070000, (P << 16));
72 nv_wr32(priv, info.reg + 4, (N1 << 8) | M1); 73 nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
73 break; 74 break;
74 } 75 }
75 76
@@ -77,57 +78,68 @@ nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
77} 78}
78 79
79static u64 80static u64
80nv50_devinit_disable(struct nvkm_devinit *devinit) 81nv50_devinit_disable(struct nvkm_devinit *init)
81{ 82{
82 struct nv50_devinit_priv *priv = (void *)devinit; 83 struct nvkm_device *device = init->subdev.device;
83 u32 r001540 = nv_rd32(priv, 0x001540); 84 u32 r001540 = nvkm_rd32(device, 0x001540);
84 u64 disable = 0ULL; 85 u64 disable = 0ULL;
85 86
86 if (!(r001540 & 0x40000000)) 87 if (!(r001540 & 0x40000000))
87 disable |= (1ULL << NVDEV_ENGINE_MPEG); 88 disable |= (1ULL << NVKM_ENGINE_MPEG);
88 89
89 return disable; 90 return disable;
90} 91}
91 92
92int 93void
93nv50_devinit_init(struct nvkm_object *object) 94nv50_devinit_preinit(struct nvkm_devinit *base)
94{ 95{
95 struct nvkm_bios *bios = nvkm_bios(object); 96 struct nv50_devinit *init = nv50_devinit(base);
96 struct nvkm_ibus *ibus = nvkm_ibus(object); 97 struct nvkm_subdev *subdev = &init->base.subdev;
97 struct nv50_devinit_priv *priv = (void *)object; 98 struct nvkm_device *device = subdev->device;
98 struct nvbios_outp info;
99 struct dcb_output outp;
100 u8 ver = 0xff, hdr, cnt, len;
101 int ret, i = 0;
102 99
103 if (!priv->base.post) { 100 /* our heuristics can't detect whether the board has had its
104 if (!nv_rdvgac(priv, 0, 0x00) && 101 * devinit scripts executed or not if the display engine is
105 !nv_rdvgac(priv, 0, 0x1a)) { 102 * missing, assume it's a secondary gpu which requires post
106 nv_info(priv, "adaptor not initialised\n"); 103 */
107 priv->base.post = true; 104 if (!init->base.post) {
108 } 105 u64 disable = nvkm_devinit_disable(&init->base);
106 if (disable & (1ULL << NVKM_ENGINE_DISP))
107 init->base.post = true;
109 } 108 }
110 109
111 /* some boards appear to require certain priv register timeouts 110 /* magic to detect whether or not x86 vbios code has executed
112 * to be bumped before runing devinit scripts. not a clue why 111 * the devinit scripts to initialise the board
113 * the vbios engineers didn't make the scripts just work...
114 */ 112 */
115 if (priv->base.post && ibus) 113 if (!init->base.post) {
116 nv_ofuncs(ibus)->init(nv_object(ibus)); 114 if (!nvkm_rdvgac(device, 0, 0x00) &&
115 !nvkm_rdvgac(device, 0, 0x1a)) {
116 nvkm_debug(subdev, "adaptor not initialised\n");
117 init->base.post = true;
118 }
119 }
120}
117 121
118 ret = nvkm_devinit_init(&priv->base); 122void
119 if (ret) 123nv50_devinit_init(struct nvkm_devinit *base)
120 return ret; 124{
125 struct nv50_devinit *init = nv50_devinit(base);
126 struct nvkm_subdev *subdev = &init->base.subdev;
127 struct nvkm_device *device = subdev->device;
128 struct nvkm_bios *bios = device->bios;
129 struct nvbios_outp info;
130 struct dcb_output outp;
131 u8 ver = 0xff, hdr, cnt, len;
132 int i = 0;
121 133
122 /* if we ran the init tables, we have to execute the first script 134 /* if we ran the init tables, we have to execute the first script
123 * pointer of each dcb entry's display encoder table in order 135 * pointer of each dcb entry's display encoder table in order
124 * to properly initialise each encoder. 136 * to properly initialise each encoder.
125 */ 137 */
126 while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) { 138 while (init->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
127 if (nvbios_outp_match(bios, outp.hasht, outp.hashm, 139 if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
128 &ver, &hdr, &cnt, &len, &info)) { 140 &ver, &hdr, &cnt, &len, &info)) {
129 struct nvbios_init init = { 141 struct nvbios_init exec = {
130 .subdev = nv_subdev(priv), 142 .subdev = subdev,
131 .bios = bios, 143 .bios = bios,
132 .offset = info.script[0], 144 .offset = info.script[0],
133 .outp = &outp, 145 .outp = &outp,
@@ -135,40 +147,39 @@ nv50_devinit_init(struct nvkm_object *object)
135 .execute = 1, 147 .execute = 1,
136 }; 148 };
137 149
138 nvbios_exec(&init); 150 nvbios_exec(&exec);
139 } 151 }
140 i++; 152 i++;
141 } 153 }
142
143 return 0;
144} 154}
145 155
146int 156int
147nv50_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 157nv50_devinit_new_(const struct nvkm_devinit_func *func,
148 struct nvkm_oclass *oclass, void *data, u32 size, 158 struct nvkm_device *device, int index,
149 struct nvkm_object **pobject) 159 struct nvkm_devinit **pinit)
150{ 160{
151 struct nv50_devinit_priv *priv; 161 struct nv50_devinit *init;
152 int ret;
153 162
154 ret = nvkm_devinit_create(parent, engine, oclass, &priv); 163 if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
155 *pobject = nv_object(priv); 164 return -ENOMEM;
156 if (ret) 165 *pinit = &init->base;
157 return ret;
158 166
167 nvkm_devinit_ctor(func, device, index, &init->base);
159 return 0; 168 return 0;
160} 169}
161 170
162struct nvkm_oclass * 171static const struct nvkm_devinit_func
163nv50_devinit_oclass = &(struct nvkm_devinit_impl) { 172nv50_devinit = {
164 .base.handle = NV_SUBDEV(DEVINIT, 0x50), 173 .preinit = nv50_devinit_preinit,
165 .base.ofuncs = &(struct nvkm_ofuncs) { 174 .init = nv50_devinit_init,
166 .ctor = nv50_devinit_ctor, 175 .post = nv04_devinit_post,
167 .dtor = _nvkm_devinit_dtor,
168 .init = nv50_devinit_init,
169 .fini = _nvkm_devinit_fini,
170 },
171 .pll_set = nv50_devinit_pll_set, 176 .pll_set = nv50_devinit_pll_set,
172 .disable = nv50_devinit_disable, 177 .disable = nv50_devinit_disable,
173 .post = nvbios_init, 178};
174}.base; 179
180int
181nv50_devinit_new(struct nvkm_device *device, int index,
182 struct nvkm_devinit **pinit)
183{
184 return nv50_devinit_new_(&nv50_devinit, device, index, pinit);
185}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 9243521c80ac..5de70a8486b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -1,16 +1,17 @@
1#ifndef __NVKM_DEVINIT_NV50_H__ 1#ifndef __NV50_DEVINIT_H__
2#define __NVKM_DEVINIT_NV50_H__ 2#define __NV50_DEVINIT_H__
3#define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
3#include "priv.h" 4#include "priv.h"
4 5
5struct nv50_devinit_priv { 6struct nv50_devinit {
6 struct nvkm_devinit base; 7 struct nvkm_devinit base;
7 u32 r001540; 8 u32 r001540;
8}; 9};
9 10
10int nv50_devinit_ctor(struct nvkm_object *, struct nvkm_object *, 11int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
11 struct nvkm_oclass *, void *, u32, 12 int, struct nvkm_devinit **);
12 struct nvkm_object **); 13void nv50_devinit_preinit(struct nvkm_devinit *);
13int nv50_devinit_init(struct nvkm_object *); 14void nv50_devinit_init(struct nvkm_devinit *);
14int nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32); 15int nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
15 16
16int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32); 17int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index bb51a95d8012..e1f6ae58f1d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -1,34 +1,21 @@
1#ifndef __NVKM_DEVINIT_PRIV_H__ 1#ifndef __NVKM_DEVINIT_PRIV_H__
2#define __NVKM_DEVINIT_PRIV_H__ 2#define __NVKM_DEVINIT_PRIV_H__
3#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
3#include <subdev/devinit.h> 4#include <subdev/devinit.h>
4 5
5struct nvkm_devinit_impl { 6struct nvkm_devinit_func {
6 struct nvkm_oclass base; 7 void *(*dtor)(struct nvkm_devinit *);
8 void (*preinit)(struct nvkm_devinit *);
9 void (*init)(struct nvkm_devinit *);
10 int (*post)(struct nvkm_devinit *, bool post);
11 u32 (*mmio)(struct nvkm_devinit *, u32);
7 void (*meminit)(struct nvkm_devinit *); 12 void (*meminit)(struct nvkm_devinit *);
8 int (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq); 13 int (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
9 u64 (*disable)(struct nvkm_devinit *); 14 u64 (*disable)(struct nvkm_devinit *);
10 u32 (*mmio)(struct nvkm_devinit *, u32);
11 int (*post)(struct nvkm_subdev *, bool);
12}; 15};
13 16
14#define nvkm_devinit_create(p,e,o,d) \ 17void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
15 nvkm_devinit_create_((p), (e), (o), sizeof(**d), (void **)d) 18 int index, struct nvkm_devinit *);
16#define nvkm_devinit_destroy(p) ({ \
17 struct nvkm_devinit *d = (p); \
18 _nvkm_devinit_dtor(nv_object(d)); \
19})
20#define nvkm_devinit_init(p) ({ \
21 struct nvkm_devinit *d = (p); \
22 _nvkm_devinit_init(nv_object(d)); \
23})
24#define nvkm_devinit_fini(p,s) ({ \
25 struct nvkm_devinit *d = (p); \
26 _nvkm_devinit_fini(nv_object(d), (s)); \
27})
28 19
29int nvkm_devinit_create_(struct nvkm_object *, struct nvkm_object *, 20int nv04_devinit_post(struct nvkm_devinit *, bool);
30 struct nvkm_oclass *, int, void **);
31void _nvkm_devinit_dtor(struct nvkm_object *);
32int _nvkm_devinit_init(struct nvkm_object *);
33int _nvkm_devinit_fini(struct nvkm_object *, bool suspend);
34#endif 21#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d6be4c6c5408..08105701af7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -23,6 +23,8 @@ nvkm-y += nvkm/subdev/fb/gf100.o
23nvkm-y += nvkm/subdev/fb/gk104.o 23nvkm-y += nvkm/subdev/fb/gk104.o
24nvkm-y += nvkm/subdev/fb/gk20a.o 24nvkm-y += nvkm/subdev/fb/gk20a.o
25nvkm-y += nvkm/subdev/fb/gm107.o 25nvkm-y += nvkm/subdev/fb/gm107.o
26
27nvkm-y += nvkm/subdev/fb/ram.o
26nvkm-y += nvkm/subdev/fb/ramnv04.o 28nvkm-y += nvkm/subdev/fb/ramnv04.o
27nvkm-y += nvkm/subdev/fb/ramnv10.o 29nvkm-y += nvkm/subdev/fb/ramnv10.o
28nvkm-y += nvkm/subdev/fb/ramnv1a.o 30nvkm-y += nvkm/subdev/fb/ramnv1a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 61fde43dab71..a719b9becb73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -22,144 +22,151 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25#include "ram.h"
25 26
26#include <subdev/bios.h> 27#include <subdev/bios.h>
27#include <subdev/bios/M0203.h> 28#include <subdev/bios/M0203.h>
29#include <engine/gr.h>
30#include <engine/mpeg.h>
31
32bool
33nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
34{
35 return fb->func->memtype_valid(fb, memtype);
36}
37
38void
39nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
40{
41 fb->func->tile.fini(fb, region, tile);
42}
43
44void
45nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size,
46 u32 pitch, u32 flags, struct nvkm_fb_tile *tile)
47{
48 fb->func->tile.init(fb, region, addr, size, pitch, flags, tile);
49}
50
51void
52nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
53{
54 struct nvkm_device *device = fb->subdev.device;
55 if (fb->func->tile.prog) {
56 fb->func->tile.prog(fb, region, tile);
57 if (device->gr)
58 nvkm_engine_tile(&device->gr->engine, region);
59 if (device->mpeg)
60 nvkm_engine_tile(device->mpeg, region);
61 }
62}
28 63
29int 64int
30nvkm_fb_bios_memtype(struct nvkm_bios *bios) 65nvkm_fb_bios_memtype(struct nvkm_bios *bios)
31{ 66{
32 const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2; 67 struct nvkm_subdev *subdev = &bios->subdev;
68 struct nvkm_device *device = subdev->device;
69 const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
33 struct nvbios_M0203E M0203E; 70 struct nvbios_M0203E M0203E;
34 u8 ver, hdr; 71 u8 ver, hdr;
35 72
36 if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) { 73 if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
37 switch (M0203E.type) { 74 switch (M0203E.type) {
38 case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2; 75 case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
39 case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3; 76 case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
40 case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3; 77 case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
41 case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5; 78 case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
42 default: 79 default:
43 nv_warn(bios, "M0203E type %02x\n", M0203E.type); 80 nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
44 return NV_MEM_TYPE_UNKNOWN; 81 return NVKM_RAM_TYPE_UNKNOWN;
45 } 82 }
46 } 83 }
47 84
48 nv_warn(bios, "M0203E not matched!\n"); 85 nvkm_warn(subdev, "M0203E not matched!\n");
49 return NV_MEM_TYPE_UNKNOWN; 86 return NVKM_RAM_TYPE_UNKNOWN;
50} 87}
51 88
52int 89static void
53_nvkm_fb_fini(struct nvkm_object *object, bool suspend) 90nvkm_fb_intr(struct nvkm_subdev *subdev)
54{ 91{
55 struct nvkm_fb *pfb = (void *)object; 92 struct nvkm_fb *fb = nvkm_fb(subdev);
56 int ret; 93 if (fb->func->intr)
94 fb->func->intr(fb);
95}
57 96
58 if (pfb->ram) { 97static int
59 ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend); 98nvkm_fb_oneinit(struct nvkm_subdev *subdev)
60 if (ret && suspend) 99{
100 struct nvkm_fb *fb = nvkm_fb(subdev);
101 if (fb->func->ram_new) {
102 int ret = fb->func->ram_new(fb, &fb->ram);
103 if (ret) {
104 nvkm_error(subdev, "vram setup failed, %d\n", ret);
61 return ret; 105 return ret;
106 }
62 } 107 }
63 108 return 0;
64 return nvkm_subdev_fini(&pfb->base, suspend);
65} 109}
66 110
67int 111static int
68_nvkm_fb_init(struct nvkm_object *object) 112nvkm_fb_init(struct nvkm_subdev *subdev)
69{ 113{
70 struct nvkm_fb *pfb = (void *)object; 114 struct nvkm_fb *fb = nvkm_fb(subdev);
71 int ret, i; 115 int ret, i;
72 116
73 ret = nvkm_subdev_init(&pfb->base); 117 if (fb->ram) {
74 if (ret) 118 ret = nvkm_ram_init(fb->ram);
75 return ret;
76
77 if (pfb->ram) {
78 ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
79 if (ret) 119 if (ret)
80 return ret; 120 return ret;
81 } 121 }
82 122
83 for (i = 0; i < pfb->tile.regions; i++) 123 for (i = 0; i < fb->tile.regions; i++)
84 pfb->tile.prog(pfb, i, &pfb->tile.region[i]); 124 fb->func->tile.prog(fb, i, &fb->tile.region[i]);
85 125
126 if (fb->func->init)
127 fb->func->init(fb);
86 return 0; 128 return 0;
87} 129}
88 130
89void 131static void *
90_nvkm_fb_dtor(struct nvkm_object *object) 132nvkm_fb_dtor(struct nvkm_subdev *subdev)
91{ 133{
92 struct nvkm_fb *pfb = (void *)object; 134 struct nvkm_fb *fb = nvkm_fb(subdev);
93 int i; 135 int i;
94 136
95 for (i = 0; i < pfb->tile.regions; i++) 137 for (i = 0; i < fb->tile.regions; i++)
96 pfb->tile.fini(pfb, i, &pfb->tile.region[i]); 138 fb->func->tile.fini(fb, i, &fb->tile.region[i]);
97 nvkm_mm_fini(&pfb->tags);
98 139
99 if (pfb->ram) { 140 nvkm_ram_del(&fb->ram);
100 nvkm_mm_fini(&pfb->vram);
101 nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
102 }
103 141
104 nvkm_subdev_destroy(&pfb->base); 142 if (fb->func->dtor)
143 return fb->func->dtor(fb);
144 return fb;
105} 145}
106 146
107int 147static const struct nvkm_subdev_func
108nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine, 148nvkm_fb = {
109 struct nvkm_oclass *oclass, int length, void **pobject) 149 .dtor = nvkm_fb_dtor,
110{ 150 .oneinit = nvkm_fb_oneinit,
111 struct nvkm_fb_impl *impl = (void *)oclass; 151 .init = nvkm_fb_init,
112 static const char *name[] = { 152 .intr = nvkm_fb_intr,
113 [NV_MEM_TYPE_UNKNOWN] = "unknown", 153};
114 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
115 [NV_MEM_TYPE_SGRAM ] = "SGRAM",
116 [NV_MEM_TYPE_SDRAM ] = "SDRAM",
117 [NV_MEM_TYPE_DDR1 ] = "DDR1",
118 [NV_MEM_TYPE_DDR2 ] = "DDR2",
119 [NV_MEM_TYPE_DDR3 ] = "DDR3",
120 [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
121 [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
122 [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
123 [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
124 };
125 struct nvkm_object *ram;
126 struct nvkm_fb *pfb;
127 int ret;
128
129 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PFB", "fb",
130 length, pobject);
131 pfb = *pobject;
132 if (ret)
133 return ret;
134
135 pfb->memtype_valid = impl->memtype;
136
137 if (!impl->ram)
138 return 0;
139
140 ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
141 if (ret) {
142 nv_fatal(pfb, "error detecting memory configuration!!\n");
143 return ret;
144 }
145
146 pfb->ram = (void *)ram;
147 154
148 if (!nvkm_mm_initialised(&pfb->vram)) { 155void
149 ret = nvkm_mm_init(&pfb->vram, 0, pfb->ram->size >> 12, 1); 156nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
150 if (ret) 157 int index, struct nvkm_fb *fb)
151 return ret; 158{
152 } 159 nvkm_subdev_ctor(&nvkm_fb, device, index, 0, &fb->subdev);
153 160 fb->func = func;
154 if (!nvkm_mm_initialised(&pfb->tags)) { 161 fb->tile.regions = fb->func->tile.regions;
155 ret = nvkm_mm_init(&pfb->tags, 0, pfb->ram->tags ? 162}
156 ++pfb->ram->tags : 0, 1);
157 if (ret)
158 return ret;
159 }
160 163
161 nv_info(pfb, "RAM type: %s\n", name[pfb->ram->type]); 164int
162 nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram->size >> 20)); 165nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
163 nv_info(pfb, " ZCOMP: %d tags\n", pfb->ram->tags); 166 int index, struct nvkm_fb **pfb)
167{
168 if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
169 return -ENOMEM;
170 nvkm_fb_ctor(func, device, index, *pfb);
164 return 0; 171 return 0;
165} 172}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 6c968d1e98b3..9c28392d07e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -22,17 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "ram.h"
25 26
26struct nvkm_oclass * 27static const struct nv50_fb_func
27g84_fb_oclass = &(struct nv50_fb_impl) { 28g84_fb = {
28 .base.base.handle = NV_SUBDEV(FB, 0x84), 29 .ram_new = nv50_ram_new,
29 .base.base.ofuncs = &(struct nvkm_ofuncs) {
30 .ctor = nv50_fb_ctor,
31 .dtor = nv50_fb_dtor,
32 .init = nv50_fb_init,
33 .fini = _nvkm_fb_fini,
34 },
35 .base.memtype = nv50_fb_memtype_valid,
36 .base.ram = &nv50_ram_oclass,
37 .trap = 0x001d07ff, 30 .trap = 0x001d07ff,
38}.base.base; 31};
32
33int
34g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
35{
36 return nv50_fb_new_(&g84_fb, device, index, pfb);
37}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 15b462ae33cb..79b523aa52aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 * Roy Spliet <rspliet@eclipso.eu> 23 * Roy Spliet <rspliet@eclipso.eu>
24 */ 24 */
25#include "priv.h" 25#include "ram.h"
26 26
27struct ramxlat { 27struct ramxlat {
28 int id; 28 int id;
@@ -42,9 +42,9 @@ ramxlat(const struct ramxlat *xlat, int id)
42 42
43static const struct ramxlat 43static const struct ramxlat
44ramgddr3_cl_lo[] = { 44ramgddr3_cl_lo[] = {
45 { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 }, 45 { 5, 5 }, { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 }, { 12, 8 },
46 /* the below are mentioned in some, but not all, gddr3 docs */ 46 /* the below are mentioned in some, but not all, gddr3 docs */
47 { 12, 4 }, { 13, 5 }, { 14, 6 }, 47 { 13, 9 }, { 14, 6 },
48 /* XXX: Per Samsung docs, are these used? They overlap with Qimonda */ 48 /* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
49 /* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 }, 49 /* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
50 * { 15, 11 }, */ 50 * { 15, 11 }, */
@@ -61,24 +61,25 @@ ramgddr3_cl_hi[] = {
61static const struct ramxlat 61static const struct ramxlat
62ramgddr3_wr_lo[] = { 62ramgddr3_wr_lo[] = {
63 { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 }, 63 { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
64 { 11, 0 }, 64 { 11, 0 }, { 13 , 1 },
65 /* the below are mentioned in some, but not all, gddr3 docs */ 65 /* the below are mentioned in some, but not all, gddr3 docs */
66 { 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 }, 66 { 4, 1 }, { 6, 3 }, { 12, 1 },
67 { -1 } 67 { -1 }
68}; 68};
69 69
70int 70int
71nvkm_gddr3_calc(struct nvkm_ram *ram) 71nvkm_gddr3_calc(struct nvkm_ram *ram)
72{ 72{
73 int CL, WR, CWL, DLL = 0, ODT = 0, hi; 73 int CL, WR, CWL, DLL = 0, ODT = 0, RON, hi;
74 74
75 switch (ram->next->bios.timing_ver) { 75 switch (ram->next->bios.timing_ver) {
76 case 0x10: 76 case 0x10:
77 CWL = ram->next->bios.timing_10_CWL; 77 CWL = ram->next->bios.timing_10_CWL;
78 CL = ram->next->bios.timing_10_CL; 78 CL = ram->next->bios.timing_10_CL;
79 WR = ram->next->bios.timing_10_WR; 79 WR = ram->next->bios.timing_10_WR;
80 DLL = !ram->next->bios.ramcfg_10_DLLoff; 80 DLL = !ram->next->bios.ramcfg_DLLoff;
81 ODT = ram->next->bios.timing_10_ODT; 81 ODT = ram->next->bios.timing_10_ODT;
82 RON = ram->next->bios.ramcfg_RON;
82 break; 83 break;
83 case 0x20: 84 case 0x20:
84 CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7; 85 CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
@@ -89,6 +90,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
89 ODT = (ram->mr[1] & 0x004) >> 2 | 90 ODT = (ram->mr[1] & 0x004) >> 2 |
90 (ram->mr[1] & 0x040) >> 5 | 91 (ram->mr[1] & 0x040) >> 5 |
91 (ram->mr[1] & 0x200) >> 7; 92 (ram->mr[1] & 0x200) >> 7;
93 RON = !(ram->mr[1] & 0x300) >> 8;
92 break; 94 break;
93 default: 95 default:
94 return -ENOSYS; 96 return -ENOSYS;
@@ -107,7 +109,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
107 109
108 ram->mr[1] &= ~0x3fc; 110 ram->mr[1] &= ~0x3fc;
109 ram->mr[1] |= (ODT & 0x03) << 2; 111 ram->mr[1] |= (ODT & 0x03) << 2;
110 ram->mr[1] |= (ODT & 0x03) << 8; 112 ram->mr[1] |= (RON & 0x03) << 8;
111 ram->mr[1] |= (WR & 0x03) << 4; 113 ram->mr[1] |= (WR & 0x03) << 4;
112 ram->mr[1] |= (WR & 0x04) << 5; 114 ram->mr[1] |= (WR & 0x04) << 5;
113 ram->mr[1] |= !DLL << 6; 115 ram->mr[1] |= !DLL << 6;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
index f6f9eee1dcd0..24f83b09e6a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "priv.h" 24#include "ram.h"
25 25
26/* binary driver only executes this path if the condition (a) is true 26/* binary driver only executes this path if the condition (a) is true
27 * for any configuration (combination of rammap+ramcfg+timing) that 27 * for any configuration (combination of rammap+ramcfg+timing) that
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index d51aa0237baf..008bb9849f3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -22,101 +22,90 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gf100.h" 24#include "gf100.h"
25 25#include "ram.h"
26#include <core/device.h>
27 26
28extern const u8 gf100_pte_storage_type_map[256]; 27extern const u8 gf100_pte_storage_type_map[256];
29 28
30bool 29bool
31gf100_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags) 30gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
32{ 31{
33 u8 memtype = (tile_flags & 0x0000ff00) >> 8; 32 u8 memtype = (tile_flags & 0x0000ff00) >> 8;
34 return likely((gf100_pte_storage_type_map[memtype] != 0xff)); 33 return likely((gf100_pte_storage_type_map[memtype] != 0xff));
35} 34}
36 35
37static void 36void
38gf100_fb_intr(struct nvkm_subdev *subdev) 37gf100_fb_intr(struct nvkm_fb *base)
39{ 38{
40 struct gf100_fb_priv *priv = (void *)subdev; 39 struct gf100_fb *fb = gf100_fb(base);
41 u32 intr = nv_rd32(priv, 0x000100); 40 struct nvkm_subdev *subdev = &fb->base.subdev;
42 if (intr & 0x08000000) { 41 struct nvkm_device *device = subdev->device;
43 nv_debug(priv, "PFFB intr\n"); 42 u32 intr = nvkm_rd32(device, 0x000100);
44 intr &= ~0x08000000; 43 if (intr & 0x08000000)
45 } 44 nvkm_debug(subdev, "PFFB intr\n");
46 if (intr & 0x00002000) { 45 if (intr & 0x00002000)
47 nv_debug(priv, "PBFB intr\n"); 46 nvkm_debug(subdev, "PBFB intr\n");
48 intr &= ~0x00002000;
49 }
50} 47}
51 48
52int 49void
53gf100_fb_init(struct nvkm_object *object) 50gf100_fb_init(struct nvkm_fb *base)
54{ 51{
55 struct gf100_fb_priv *priv = (void *)object; 52 struct gf100_fb *fb = gf100_fb(base);
56 int ret; 53 struct nvkm_device *device = fb->base.subdev.device;
57 54
58 ret = nvkm_fb_init(&priv->base); 55 if (fb->r100c10_page)
59 if (ret) 56 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
60 return ret;
61 57
62 if (priv->r100c10_page) 58 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
63 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
64
65 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
66 return 0;
67} 59}
68 60
69void 61void *
70gf100_fb_dtor(struct nvkm_object *object) 62gf100_fb_dtor(struct nvkm_fb *base)
71{ 63{
72 struct nvkm_device *device = nv_device(object); 64 struct gf100_fb *fb = gf100_fb(base);
73 struct gf100_fb_priv *priv = (void *)object; 65 struct nvkm_device *device = fb->base.subdev.device;
74 66
75 if (priv->r100c10_page) { 67 if (fb->r100c10_page) {
76 dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE, 68 dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
77 DMA_BIDIRECTIONAL); 69 DMA_BIDIRECTIONAL);
78 __free_page(priv->r100c10_page); 70 __free_page(fb->r100c10_page);
79 } 71 }
80 72
81 nvkm_fb_destroy(&priv->base); 73 return fb;
82} 74}
83 75
84int 76int
85gf100_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 77gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
86 struct nvkm_oclass *oclass, void *data, u32 size, 78 int index, struct nvkm_fb **pfb)
87 struct nvkm_object **pobject)
88{ 79{
89 struct nvkm_device *device = nv_device(parent); 80 struct gf100_fb *fb;
90 struct gf100_fb_priv *priv; 81
91 int ret; 82 if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
92 83 return -ENOMEM;
93 ret = nvkm_fb_create(parent, engine, oclass, &priv); 84 nvkm_fb_ctor(func, device, index, &fb->base);
94 *pobject = nv_object(priv); 85 *pfb = &fb->base;
95 if (ret) 86
96 return ret; 87 fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
97 88 if (fb->r100c10_page) {
98 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 89 fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
99 if (priv->r100c10_page) { 90 PAGE_SIZE, DMA_BIDIRECTIONAL);
100 priv->r100c10 = dma_map_page(nv_device_base(device), 91 if (dma_mapping_error(device->dev, fb->r100c10))
101 priv->r100c10_page, 0, PAGE_SIZE,
102 DMA_BIDIRECTIONAL);
103 if (dma_mapping_error(nv_device_base(device), priv->r100c10))
104 return -EFAULT; 92 return -EFAULT;
105 } 93 }
106 94
107 nv_subdev(priv)->intr = gf100_fb_intr;
108 return 0; 95 return 0;
109} 96}
110 97
111struct nvkm_oclass * 98static const struct nvkm_fb_func
112gf100_fb_oclass = &(struct nvkm_fb_impl) { 99gf100_fb = {
113 .base.handle = NV_SUBDEV(FB, 0xc0), 100 .dtor = gf100_fb_dtor,
114 .base.ofuncs = &(struct nvkm_ofuncs) { 101 .init = gf100_fb_init,
115 .ctor = gf100_fb_ctor, 102 .intr = gf100_fb_intr,
116 .dtor = gf100_fb_dtor, 103 .ram_new = gf100_ram_new,
117 .init = gf100_fb_init, 104 .memtype_valid = gf100_fb_memtype_valid,
118 .fini = _nvkm_fb_fini, 105};
119 }, 106
120 .memtype = gf100_fb_memtype_valid, 107int
121 .ram = &gf100_ram_oclass, 108gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
122}.base; 109{
110 return gf100_fb_new_(&gf100_fb, device, index, pfb);
111}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 0af4da259471..2160e5a39c9a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -1,28 +1,17 @@
1#ifndef __NVKM_RAM_NVC0_H__ 1#ifndef __NVKM_RAM_NVC0_H__
2#define __NVKM_RAM_NVC0_H__ 2#define __NVKM_RAM_NVC0_H__
3#define gf100_fb(p) container_of((p), struct gf100_fb, base)
3#include "priv.h" 4#include "priv.h"
4#include "nv50.h"
5 5
6struct gf100_fb_priv { 6struct gf100_fb {
7 struct nvkm_fb base; 7 struct nvkm_fb base;
8 struct page *r100c10_page; 8 struct page *r100c10_page;
9 dma_addr_t r100c10; 9 dma_addr_t r100c10;
10}; 10};
11 11
12int gf100_fb_ctor(struct nvkm_object *, struct nvkm_object *, 12int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
13 struct nvkm_oclass *, void *, u32, 13 int index, struct nvkm_fb **);
14 struct nvkm_object **); 14void *gf100_fb_dtor(struct nvkm_fb *);
15void gf100_fb_dtor(struct nvkm_object *); 15void gf100_fb_init(struct nvkm_fb *);
16int gf100_fb_init(struct nvkm_object *); 16void gf100_fb_intr(struct nvkm_fb *);
17bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
18
19#define gf100_ram_create(p,e,o,m,d) \
20 gf100_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
21int gf100_ram_create_(struct nvkm_object *, struct nvkm_object *,
22 struct nvkm_oclass *, u32, int, void **);
23int gf100_ram_get(struct nvkm_fb *, u64, u32, u32, u32,
24 struct nvkm_mem **);
25void gf100_ram_put(struct nvkm_fb *, struct nvkm_mem **);
26
27int gk104_ram_init(struct nvkm_object*);
28#endif 17#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 1c08317665bb..0edb3c316f5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -22,16 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gf100.h" 24#include "gf100.h"
25#include "ram.h"
25 26
26struct nvkm_oclass * 27static const struct nvkm_fb_func
27gk104_fb_oclass = &(struct nvkm_fb_impl) { 28gk104_fb = {
28 .base.handle = NV_SUBDEV(FB, 0xe0), 29 .dtor = gf100_fb_dtor,
29 .base.ofuncs = &(struct nvkm_ofuncs) { 30 .init = gf100_fb_init,
30 .ctor = gf100_fb_ctor, 31 .intr = gf100_fb_intr,
31 .dtor = gf100_fb_dtor, 32 .ram_new = gk104_ram_new,
32 .init = gf100_fb_init, 33 .memtype_valid = gf100_fb_memtype_valid,
33 .fini = _nvkm_fb_fini, 34};
34 }, 35
35 .memtype = gf100_fb_memtype_valid, 36int
36 .ram = &gk104_ram_oclass, 37gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
37}.base; 38{
39 return gf100_fb_new_(&gk104_fb, device, index, pfb);
40}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index a5d7857d3898..81447eb4c948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -19,50 +19,23 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22#include "gf100.h" 22#include "priv.h"
23 23
24struct gk20a_fb_priv { 24static void
25 struct nvkm_fb base; 25gk20a_fb_init(struct nvkm_fb *fb)
26};
27
28static int
29gk20a_fb_init(struct nvkm_object *object)
30{ 26{
31 struct gk20a_fb_priv *priv = (void *)object; 27 struct nvkm_device *device = fb->subdev.device;
32 int ret; 28 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
33
34 ret = nvkm_fb_init(&priv->base);
35 if (ret)
36 return ret;
37
38 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
39 return 0;
40} 29}
41 30
42static int 31static const struct nvkm_fb_func
43gk20a_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 32gk20a_fb = {
44 struct nvkm_oclass *oclass, void *data, u32 size, 33 .init = gk20a_fb_init,
45 struct nvkm_object **pobject) 34 .memtype_valid = gf100_fb_memtype_valid,
46{ 35};
47 struct gk20a_fb_priv *priv;
48 int ret;
49
50 ret = nvkm_fb_create(parent, engine, oclass, &priv);
51 *pobject = nv_object(priv);
52 if (ret)
53 return ret;
54 36
55 return 0; 37int
38gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
39{
40 return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
56} 41}
57
58struct nvkm_oclass *
59gk20a_fb_oclass = &(struct nvkm_fb_impl) {
60 .base.handle = NV_SUBDEV(FB, 0xea),
61 .base.ofuncs = &(struct nvkm_ofuncs) {
62 .ctor = gk20a_fb_ctor,
63 .dtor = _nvkm_fb_dtor,
64 .init = gk20a_fb_init,
65 .fini = _nvkm_fb_fini,
66 },
67 .memtype = gf100_fb_memtype_valid,
68}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 843f9356b360..2a91df8655dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -22,16 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gf100.h" 24#include "gf100.h"
25#include "ram.h"
25 26
26struct nvkm_oclass * 27static const struct nvkm_fb_func
27gm107_fb_oclass = &(struct nvkm_fb_impl) { 28gm107_fb = {
28 .base.handle = NV_SUBDEV(FB, 0x07), 29 .dtor = gf100_fb_dtor,
29 .base.ofuncs = &(struct nvkm_ofuncs) { 30 .init = gf100_fb_init,
30 .ctor = gf100_fb_ctor, 31 .intr = gf100_fb_intr,
31 .dtor = gf100_fb_dtor, 32 .ram_new = gm107_ram_new,
32 .init = gf100_fb_init, 33 .memtype_valid = gf100_fb_memtype_valid,
33 .fini = _nvkm_fb_fini, 34};
34 }, 35
35 .memtype = gf100_fb_memtype_valid, 36int
36 .ram = &gm107_ram_oclass, 37gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
37}.base; 38{
39 return gf100_fb_new_(&gm107_fb, device, index, pfb);
40}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index dd9b8a0a3c8e..ebb30608d5ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -22,17 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "ram.h"
25 26
26struct nvkm_oclass * 27static const struct nv50_fb_func
27gt215_fb_oclass = &(struct nv50_fb_impl) { 28gt215_fb = {
28 .base.base.handle = NV_SUBDEV(FB, 0xa3), 29 .ram_new = gt215_ram_new,
29 .base.base.ofuncs = &(struct nvkm_ofuncs) {
30 .ctor = nv50_fb_ctor,
31 .dtor = nv50_fb_dtor,
32 .init = nv50_fb_init,
33 .fini = _nvkm_fb_fini,
34 },
35 .base.memtype = nv50_fb_memtype_valid,
36 .base.ram = &gt215_ram_oclass,
37 .trap = 0x000d0fff, 30 .trap = 0x000d0fff,
38}.base.base; 31};
32
33int
34gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
35{
36 return nv50_fb_new_(&gt215_fb, device, index, pfb);
37}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index 7be4a47ef4ad..73b3b86a2826 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -22,17 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "ram.h"
25 26
26struct nvkm_oclass * 27static const struct nv50_fb_func
27mcp77_fb_oclass = &(struct nv50_fb_impl) { 28mcp77_fb = {
28 .base.base.handle = NV_SUBDEV(FB, 0xaa), 29 .ram_new = mcp77_ram_new,
29 .base.base.ofuncs = &(struct nvkm_ofuncs) {
30 .ctor = nv50_fb_ctor,
31 .dtor = nv50_fb_dtor,
32 .init = nv50_fb_init,
33 .fini = _nvkm_fb_fini,
34 },
35 .base.memtype = nv50_fb_memtype_valid,
36 .base.ram = &mcp77_ram_oclass,
37 .trap = 0x001d07ff, 30 .trap = 0x001d07ff,
38}.base.base; 31};
32
33int
34mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
35{
36 return nv50_fb_new_(&mcp77_fb, device, index, pfb);
37}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 2d00656faef5..6d11e32ec7ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -22,17 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "ram.h"
25 26
26struct nvkm_oclass * 27static const struct nv50_fb_func
27mcp89_fb_oclass = &(struct nv50_fb_impl) { 28mcp89_fb = {
28 .base.base.handle = NV_SUBDEV(FB, 0xaf), 29 .ram_new = mcp77_ram_new,
29 .base.base.ofuncs = &(struct nvkm_ofuncs) {
30 .ctor = nv50_fb_ctor,
31 .dtor = nv50_fb_dtor,
32 .init = nv50_fb_init,
33 .fini = _nvkm_fb_fini,
34 },
35 .base.memtype = nv50_fb_memtype_valid,
36 .base.ram = &mcp77_ram_oclass,
37 .trap = 0x089d1fff, 30 .trap = 0x089d1fff,
38}.base.base; 31};
32
33int
34mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
35{
36 return nv50_fb_new_(&mcp89_fb, device, index, pfb);
37}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index c063dec7d03a..8ff2e5db4571 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -21,67 +21,39 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25#include "ram.h"
25#include "regsnv04.h" 26#include "regsnv04.h"
26 27
27bool 28bool
28nv04_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags) 29nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
29{ 30{
30 if (!(tile_flags & 0xff00)) 31 if (!(tile_flags & 0xff00))
31 return true; 32 return true;
32
33 return false; 33 return false;
34} 34}
35 35
36static int 36static void
37nv04_fb_init(struct nvkm_object *object) 37nv04_fb_init(struct nvkm_fb *fb)
38{ 38{
39 struct nv04_fb_priv *priv = (void *)object; 39 struct nvkm_device *device = fb->subdev.device;
40 int ret;
41
42 ret = nvkm_fb_init(&priv->base);
43 if (ret)
44 return ret;
45 40
46 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows 41 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
47 * nvidia reading PFB_CFG_0, then writing back its original value. 42 * nvidia reading PFB_CFG_0, then writing back its original value.
48 * (which was 0x701114 in this case) 43 * (which was 0x701114 in this case)
49 */ 44 */
50 nv_wr32(priv, NV04_PFB_CFG0, 0x1114); 45 nvkm_wr32(device, NV04_PFB_CFG0, 0x1114);
51 return 0;
52} 46}
53 47
48static const struct nvkm_fb_func
49nv04_fb = {
50 .init = nv04_fb_init,
51 .ram_new = nv04_ram_new,
52 .memtype_valid = nv04_fb_memtype_valid,
53};
54
54int 55int
55nv04_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 56nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
56 struct nvkm_oclass *oclass, void *data, u32 size,
57 struct nvkm_object **pobject)
58{ 57{
59 struct nv04_fb_impl *impl = (void *)oclass; 58 return nvkm_fb_new_(&nv04_fb, device, index, pfb);
60 struct nv04_fb_priv *priv;
61 int ret;
62
63 ret = nvkm_fb_create(parent, engine, oclass, &priv);
64 *pobject = nv_object(priv);
65 if (ret)
66 return ret;
67
68 priv->base.tile.regions = impl->tile.regions;
69 priv->base.tile.init = impl->tile.init;
70 priv->base.tile.comp = impl->tile.comp;
71 priv->base.tile.fini = impl->tile.fini;
72 priv->base.tile.prog = impl->tile.prog;
73 return 0;
74} 59}
75
76struct nvkm_oclass *
77nv04_fb_oclass = &(struct nv04_fb_impl) {
78 .base.base.handle = NV_SUBDEV(FB, 0x04),
79 .base.base.ofuncs = &(struct nvkm_ofuncs) {
80 .ctor = nv04_fb_ctor,
81 .dtor = _nvkm_fb_dtor,
82 .init = nv04_fb_init,
83 .fini = _nvkm_fb_fini,
84 },
85 .base.memtype = nv04_fb_memtype_valid,
86 .base.ram = &nv04_ram_oclass,
87}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
deleted file mode 100644
index caa0d03aaacc..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef __NVKM_FB_NV04_H__
2#define __NVKM_FB_NV04_H__
3#include "priv.h"
4
5struct nv04_fb_priv {
6 struct nvkm_fb base;
7};
8
9int nv04_fb_ctor(struct nvkm_object *, struct nvkm_object *,
10 struct nvkm_oclass *, void *, u32,
11 struct nvkm_object **);
12
13struct nv04_fb_impl {
14 struct nvkm_fb_impl base;
15 struct {
16 int regions;
17 void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
18 u32 pitch, u32 flags, struct nvkm_fb_tile *);
19 void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
20 struct nvkm_fb_tile *);
21 void (*fini)(struct nvkm_fb *, int i,
22 struct nvkm_fb_tile *);
23 void (*prog)(struct nvkm_fb *, int i,
24 struct nvkm_fb_tile *);
25 } tile;
26};
27
28void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
29 u32 pitch, u32 flags, struct nvkm_fb_tile *);
30void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
31void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
32
33void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
34 u32 pitch, u32 flags, struct nvkm_fb_tile *);
35void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
36void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
37
38int nv30_fb_init(struct nvkm_object *);
39void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
40 u32 pitch, u32 flags, struct nvkm_fb_tile *);
41
42void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
43 struct nvkm_fb_tile *);
44
45int nv41_fb_init(struct nvkm_object *);
46void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
47
48int nv44_fb_init(struct nvkm_object *);
49void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
50
51void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
52 u32 pitch, u32 flags, struct nvkm_fb_tile *);
53#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index f3530e4a6760..e8c44f5a3d84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -23,10 +23,11 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28void 29void
29nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv10_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
30 u32 flags, struct nvkm_fb_tile *tile) 31 u32 flags, struct nvkm_fb_tile *tile)
31{ 32{
32 tile->addr = 0x80000000 | addr; 33 tile->addr = 0x80000000 | addr;
@@ -35,7 +36,7 @@ nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35} 36}
36 37
37void 38void
38nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile) 39nv10_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
39{ 40{
40 tile->addr = 0; 41 tile->addr = 0;
41 tile->limit = 0; 42 tile->limit = 0;
@@ -44,27 +45,27 @@ nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
44} 45}
45 46
46void 47void
47nv10_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile) 48nv10_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
48{ 49{
49 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); 50 struct nvkm_device *device = fb->subdev.device;
50 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); 51 nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
51 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); 52 nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
52 nv_rd32(pfb, 0x100240 + (i * 0x10)); 53 nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
54 nvkm_rd32(device, 0x100240 + (i * 0x10));
53} 55}
54 56
55struct nvkm_oclass * 57static const struct nvkm_fb_func
56nv10_fb_oclass = &(struct nv04_fb_impl) { 58nv10_fb = {
57 .base.base.handle = NV_SUBDEV(FB, 0x10),
58 .base.base.ofuncs = &(struct nvkm_ofuncs) {
59 .ctor = nv04_fb_ctor,
60 .dtor = _nvkm_fb_dtor,
61 .init = _nvkm_fb_init,
62 .fini = _nvkm_fb_fini,
63 },
64 .base.memtype = nv04_fb_memtype_valid,
65 .base.ram = &nv10_ram_oclass,
66 .tile.regions = 8, 59 .tile.regions = 8,
67 .tile.init = nv10_fb_tile_init, 60 .tile.init = nv10_fb_tile_init,
68 .tile.fini = nv10_fb_tile_fini, 61 .tile.fini = nv10_fb_tile_fini,
69 .tile.prog = nv10_fb_tile_prog, 62 .tile.prog = nv10_fb_tile_prog,
70}.base.base; 63 .ram_new = nv10_ram_new,
64 .memtype_valid = nv04_fb_memtype_valid,
65};
66
67int
68nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
69{
70 return nvkm_fb_new_(&nv10_fb, device, index, pfb);
71}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 83bcb73caf0a..2ae0beb87567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -23,21 +23,21 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28struct nvkm_oclass * 29static const struct nvkm_fb_func
29nv1a_fb_oclass = &(struct nv04_fb_impl) { 30nv1a_fb = {
30 .base.base.handle = NV_SUBDEV(FB, 0x1a),
31 .base.base.ofuncs = &(struct nvkm_ofuncs) {
32 .ctor = nv04_fb_ctor,
33 .dtor = _nvkm_fb_dtor,
34 .init = _nvkm_fb_init,
35 .fini = _nvkm_fb_fini,
36 },
37 .base.memtype = nv04_fb_memtype_valid,
38 .base.ram = &nv1a_ram_oclass,
39 .tile.regions = 8, 31 .tile.regions = 8,
40 .tile.init = nv10_fb_tile_init, 32 .tile.init = nv10_fb_tile_init,
41 .tile.fini = nv10_fb_tile_fini, 33 .tile.fini = nv10_fb_tile_fini,
42 .tile.prog = nv10_fb_tile_prog, 34 .tile.prog = nv10_fb_tile_prog,
43}.base.base; 35 .ram_new = nv1a_ram_new,
36 .memtype_valid = nv04_fb_memtype_valid,
37};
38
39int
40nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
41{
42 return nvkm_fb_new_(&nv1a_fb, device, index, pfb);
43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index e37084b8d05e..126865dfe777 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -23,28 +23,29 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28void 29void
29nv20_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv20_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
30 u32 flags, struct nvkm_fb_tile *tile) 31 u32 flags, struct nvkm_fb_tile *tile)
31{ 32{
32 tile->addr = 0x00000001 | addr; 33 tile->addr = 0x00000001 | addr;
33 tile->limit = max(1u, addr + size) - 1; 34 tile->limit = max(1u, addr + size) - 1;
34 tile->pitch = pitch; 35 tile->pitch = pitch;
35 if (flags & 4) { 36 if (flags & 4) {
36 pfb->tile.comp(pfb, i, size, flags, tile); 37 fb->func->tile.comp(fb, i, size, flags, tile);
37 tile->addr |= 2; 38 tile->addr |= 2;
38 } 39 }
39} 40}
40 41
41static void 42static void
42nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags, 43nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
43 struct nvkm_fb_tile *tile) 44 struct nvkm_fb_tile *tile)
44{ 45{
45 u32 tiles = DIV_ROUND_UP(size, 0x40); 46 u32 tiles = DIV_ROUND_UP(size, 0x40);
46 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 47 u32 tags = round_up(tiles / fb->ram->parts, 0x40);
47 if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) { 48 if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
48 if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */ 49 if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
49 else tile->zcomp = 0x04000000; /* Z24S8 */ 50 else tile->zcomp = 0x04000000; /* Z24S8 */
50 tile->zcomp |= tile->tag->offset; 51 tile->zcomp |= tile->tag->offset;
@@ -56,39 +57,39 @@ nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
56} 57}
57 58
58void 59void
59nv20_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile) 60nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
60{ 61{
61 tile->addr = 0; 62 tile->addr = 0;
62 tile->limit = 0; 63 tile->limit = 0;
63 tile->pitch = 0; 64 tile->pitch = 0;
64 tile->zcomp = 0; 65 tile->zcomp = 0;
65 nvkm_mm_free(&pfb->tags, &tile->tag); 66 nvkm_mm_free(&fb->ram->tags, &tile->tag);
66} 67}
67 68
68void 69void
69nv20_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile) 70nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
70{ 71{
71 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); 72 struct nvkm_device *device = fb->subdev.device;
72 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); 73 nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
73 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); 74 nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
74 nv_rd32(pfb, 0x100240 + (i * 0x10)); 75 nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
75 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp); 76 nvkm_rd32(device, 0x100240 + (i * 0x10));
77 nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
76} 78}
77 79
78struct nvkm_oclass * 80static const struct nvkm_fb_func
79nv20_fb_oclass = &(struct nv04_fb_impl) { 81nv20_fb = {
80 .base.base.handle = NV_SUBDEV(FB, 0x20),
81 .base.base.ofuncs = &(struct nvkm_ofuncs) {
82 .ctor = nv04_fb_ctor,
83 .dtor = _nvkm_fb_dtor,
84 .init = _nvkm_fb_init,
85 .fini = _nvkm_fb_fini,
86 },
87 .base.memtype = nv04_fb_memtype_valid,
88 .base.ram = &nv20_ram_oclass,
89 .tile.regions = 8, 82 .tile.regions = 8,
90 .tile.init = nv20_fb_tile_init, 83 .tile.init = nv20_fb_tile_init,
91 .tile.comp = nv20_fb_tile_comp, 84 .tile.comp = nv20_fb_tile_comp,
92 .tile.fini = nv20_fb_tile_fini, 85 .tile.fini = nv20_fb_tile_fini,
93 .tile.prog = nv20_fb_tile_prog, 86 .tile.prog = nv20_fb_tile_prog,
94}.base.base; 87 .ram_new = nv20_ram_new,
88 .memtype_valid = nv04_fb_memtype_valid,
89};
90
91int
92nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
93{
94 return nvkm_fb_new_(&nv20_fb, device, index, pfb);
95}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index bc9f54f38fba..c56746d2a502 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -23,15 +23,16 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28static void 29static void
29nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags, 30nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
30 struct nvkm_fb_tile *tile) 31 struct nvkm_fb_tile *tile)
31{ 32{
32 u32 tiles = DIV_ROUND_UP(size, 0x40); 33 u32 tiles = DIV_ROUND_UP(size, 0x40);
33 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40);
34 if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) { 35 if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
35 if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */ 36 if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
36 else tile->zcomp = 0x00200000; /* Z24S8 */ 37 else tile->zcomp = 0x00200000; /* Z24S8 */
37 tile->zcomp |= tile->tag->offset; 38 tile->zcomp |= tile->tag->offset;
@@ -41,20 +42,19 @@ nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
41 } 42 }
42} 43}
43 44
44struct nvkm_oclass * 45static const struct nvkm_fb_func
45nv25_fb_oclass = &(struct nv04_fb_impl) { 46nv25_fb = {
46 .base.base.handle = NV_SUBDEV(FB, 0x25),
47 .base.base.ofuncs = &(struct nvkm_ofuncs) {
48 .ctor = nv04_fb_ctor,
49 .dtor = _nvkm_fb_dtor,
50 .init = _nvkm_fb_init,
51 .fini = _nvkm_fb_fini,
52 },
53 .base.memtype = nv04_fb_memtype_valid,
54 .base.ram = &nv20_ram_oclass,
55 .tile.regions = 8, 47 .tile.regions = 8,
56 .tile.init = nv20_fb_tile_init, 48 .tile.init = nv20_fb_tile_init,
57 .tile.comp = nv25_fb_tile_comp, 49 .tile.comp = nv25_fb_tile_comp,
58 .tile.fini = nv20_fb_tile_fini, 50 .tile.fini = nv20_fb_tile_fini,
59 .tile.prog = nv20_fb_tile_prog, 51 .tile.prog = nv20_fb_tile_prog,
60}.base.base; 52 .ram_new = nv20_ram_new,
53 .memtype_valid = nv04_fb_memtype_valid,
54};
55
56int
57nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
58{
59 return nvkm_fb_new_(&nv25_fb, device, index, pfb);
60}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 09ebb9477e00..2a7c4831b821 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -23,20 +23,19 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27 27#include "ram.h"
28#include <core/device.h>
29 28
30void 29void
31nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv30_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
32 u32 flags, struct nvkm_fb_tile *tile) 31 u32 flags, struct nvkm_fb_tile *tile)
33{ 32{
34 /* for performance, select alternate bank offset for zeta */ 33 /* for performance, select alternate bank offset for zeta */
35 if (!(flags & 4)) { 34 if (!(flags & 4)) {
36 tile->addr = (0 << 4); 35 tile->addr = (0 << 4);
37 } else { 36 } else {
38 if (pfb->tile.comp) /* z compression */ 37 if (fb->func->tile.comp) /* z compression */
39 pfb->tile.comp(pfb, i, size, flags, tile); 38 fb->func->tile.comp(fb, i, size, flags, tile);
40 tile->addr = (1 << 4); 39 tile->addr = (1 << 4);
41 } 40 }
42 41
@@ -47,12 +46,12 @@ nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
47} 46}
48 47
49static void 48static void
50nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags, 49nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
51 struct nvkm_fb_tile *tile) 50 struct nvkm_fb_tile *tile)
52{ 51{
53 u32 tiles = DIV_ROUND_UP(size, 0x40); 52 u32 tiles = DIV_ROUND_UP(size, 0x40);
54 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 53 u32 tags = round_up(tiles / fb->ram->parts, 0x40);
55 if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) { 54 if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
56 if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */ 55 if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
57 else tile->zcomp |= 0x02000000; /* Z24S8 */ 56 else tile->zcomp |= 0x02000000; /* Z24S8 */
58 tile->zcomp |= ((tile->tag->offset ) >> 6); 57 tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -64,23 +63,24 @@ nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
64} 63}
65 64
66static int 65static int
67calc_bias(struct nv04_fb_priv *priv, int k, int i, int j) 66calc_bias(struct nvkm_fb *fb, int k, int i, int j)
68{ 67{
69 struct nvkm_device *device = nv_device(priv); 68 struct nvkm_device *device = fb->subdev.device;
70 int b = (device->chipset > 0x30 ? 69 int b = (device->chipset > 0x30 ?
71 nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) : 70 nvkm_rd32(device, 0x122c + 0x10 * k + 0x4 * j) >>
71 (4 * (i ^ 1)) :
72 0) & 0xf; 72 0) & 0xf;
73 73
74 return 2 * (b & 0x8 ? b - 0x10 : b); 74 return 2 * (b & 0x8 ? b - 0x10 : b);
75} 75}
76 76
77static int 77static int
78calc_ref(struct nv04_fb_priv *priv, int l, int k, int i) 78calc_ref(struct nvkm_fb *fb, int l, int k, int i)
79{ 79{
80 int j, x = 0; 80 int j, x = 0;
81 81
82 for (j = 0; j < 4; j++) { 82 for (j = 0; j < 4; j++) {
83 int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j); 83 int m = (l >> (8 * i) & 0xff) + calc_bias(fb, k, i, j);
84 84
85 x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j); 85 x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
86 } 86 }
@@ -88,16 +88,11 @@ calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
88 return x; 88 return x;
89} 89}
90 90
91int 91void
92nv30_fb_init(struct nvkm_object *object) 92nv30_fb_init(struct nvkm_fb *fb)
93{ 93{
94 struct nvkm_device *device = nv_device(object); 94 struct nvkm_device *device = fb->subdev.device;
95 struct nv04_fb_priv *priv = (void *)object; 95 int i, j;
96 int ret, i, j;
97
98 ret = nvkm_fb_init(&priv->base);
99 if (ret)
100 return ret;
101 96
102 /* Init the memory timing regs at 0x10037c/0x1003ac */ 97 /* Init the memory timing regs at 0x10037c/0x1003ac */
103 if (device->chipset == 0x30 || 98 if (device->chipset == 0x30 ||
@@ -105,36 +100,34 @@ nv30_fb_init(struct nvkm_object *object)
105 device->chipset == 0x35) { 100 device->chipset == 0x35) {
106 /* Related to ROP count */ 101 /* Related to ROP count */
107 int n = (device->chipset == 0x31 ? 2 : 4); 102 int n = (device->chipset == 0x31 ? 2 : 4);
108 int l = nv_rd32(priv, 0x1003d0); 103 int l = nvkm_rd32(device, 0x1003d0);
109 104
110 for (i = 0; i < n; i++) { 105 for (i = 0; i < n; i++) {
111 for (j = 0; j < 3; j++) 106 for (j = 0; j < 3; j++)
112 nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j, 107 nvkm_wr32(device, 0x10037c + 0xc * i + 0x4 * j,
113 calc_ref(priv, l, 0, j)); 108 calc_ref(fb, l, 0, j));
114 109
115 for (j = 0; j < 2; j++) 110 for (j = 0; j < 2; j++)
116 nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j, 111 nvkm_wr32(device, 0x1003ac + 0x8 * i + 0x4 * j,
117 calc_ref(priv, l, 1, j)); 112 calc_ref(fb, l, 1, j));
118 } 113 }
119 } 114 }
120
121 return 0;
122} 115}
123 116
124struct nvkm_oclass * 117static const struct nvkm_fb_func
125nv30_fb_oclass = &(struct nv04_fb_impl) { 118nv30_fb = {
126 .base.base.handle = NV_SUBDEV(FB, 0x30), 119 .init = nv30_fb_init,
127 .base.base.ofuncs = &(struct nvkm_ofuncs) {
128 .ctor = nv04_fb_ctor,
129 .dtor = _nvkm_fb_dtor,
130 .init = nv30_fb_init,
131 .fini = _nvkm_fb_fini,
132 },
133 .base.memtype = nv04_fb_memtype_valid,
134 .base.ram = &nv20_ram_oclass,
135 .tile.regions = 8, 120 .tile.regions = 8,
136 .tile.init = nv30_fb_tile_init, 121 .tile.init = nv30_fb_tile_init,
137 .tile.comp = nv30_fb_tile_comp, 122 .tile.comp = nv30_fb_tile_comp,
138 .tile.fini = nv20_fb_tile_fini, 123 .tile.fini = nv20_fb_tile_fini,
139 .tile.prog = nv20_fb_tile_prog, 124 .tile.prog = nv20_fb_tile_prog,
140}.base.base; 125 .ram_new = nv20_ram_new,
126 .memtype_valid = nv04_fb_memtype_valid,
127};
128
129int
130nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
131{
132 return nvkm_fb_new_(&nv30_fb, device, index, pfb);
133}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index c01dc1839ea4..1604b3789ad1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -23,15 +23,16 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28static void 29static void
29nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags, 30nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
30 struct nvkm_fb_tile *tile) 31 struct nvkm_fb_tile *tile)
31{ 32{
32 u32 tiles = DIV_ROUND_UP(size, 0x40); 33 u32 tiles = DIV_ROUND_UP(size, 0x40);
33 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40);
34 if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) { 35 if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
35 if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */ 36 if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
36 else tile->zcomp |= 0x08000000; /* Z24S8 */ 37 else tile->zcomp |= 0x08000000; /* Z24S8 */
37 tile->zcomp |= ((tile->tag->offset ) >> 6); 38 tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -42,20 +43,20 @@ nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
42 } 43 }
43} 44}
44 45
45struct nvkm_oclass * 46static const struct nvkm_fb_func
46nv35_fb_oclass = &(struct nv04_fb_impl) { 47nv35_fb = {
47 .base.base.handle = NV_SUBDEV(FB, 0x35), 48 .init = nv30_fb_init,
48 .base.base.ofuncs = &(struct nvkm_ofuncs) {
49 .ctor = nv04_fb_ctor,
50 .dtor = _nvkm_fb_dtor,
51 .init = nv30_fb_init,
52 .fini = _nvkm_fb_fini,
53 },
54 .base.memtype = nv04_fb_memtype_valid,
55 .base.ram = &nv20_ram_oclass,
56 .tile.regions = 8, 49 .tile.regions = 8,
57 .tile.init = nv30_fb_tile_init, 50 .tile.init = nv30_fb_tile_init,
58 .tile.comp = nv35_fb_tile_comp, 51 .tile.comp = nv35_fb_tile_comp,
59 .tile.fini = nv20_fb_tile_fini, 52 .tile.fini = nv20_fb_tile_fini,
60 .tile.prog = nv20_fb_tile_prog, 53 .tile.prog = nv20_fb_tile_prog,
61}.base.base; 54 .ram_new = nv20_ram_new,
55 .memtype_valid = nv04_fb_memtype_valid,
56};
57
58int
59nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
60{
61 return nvkm_fb_new_(&nv35_fb, device, index, pfb);
62}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index cad75a1cef22..80cc0a6e3416 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -23,15 +23,16 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28static void 29static void
29nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags, 30nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
30 struct nvkm_fb_tile *tile) 31 struct nvkm_fb_tile *tile)
31{ 32{
32 u32 tiles = DIV_ROUND_UP(size, 0x40); 33 u32 tiles = DIV_ROUND_UP(size, 0x40);
33 u32 tags = round_up(tiles / pfb->ram->parts, 0x40); 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40);
34 if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) { 35 if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
35 if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */ 36 if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
36 else tile->zcomp |= 0x20000000; /* Z24S8 */ 37 else tile->zcomp |= 0x20000000; /* Z24S8 */
37 tile->zcomp |= ((tile->tag->offset ) >> 6); 38 tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -42,20 +43,20 @@ nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
42 } 43 }
43} 44}
44 45
45struct nvkm_oclass * 46static const struct nvkm_fb_func
46nv36_fb_oclass = &(struct nv04_fb_impl) { 47nv36_fb = {
47 .base.base.handle = NV_SUBDEV(FB, 0x36), 48 .init = nv30_fb_init,
48 .base.base.ofuncs = &(struct nvkm_ofuncs) {
49 .ctor = nv04_fb_ctor,
50 .dtor = _nvkm_fb_dtor,
51 .init = nv30_fb_init,
52 .fini = _nvkm_fb_fini,
53 },
54 .base.memtype = nv04_fb_memtype_valid,
55 .base.ram = &nv20_ram_oclass,
56 .tile.regions = 8, 49 .tile.regions = 8,
57 .tile.init = nv30_fb_tile_init, 50 .tile.init = nv30_fb_tile_init,
58 .tile.comp = nv36_fb_tile_comp, 51 .tile.comp = nv36_fb_tile_comp,
59 .tile.fini = nv20_fb_tile_fini, 52 .tile.fini = nv20_fb_tile_fini,
60 .tile.prog = nv20_fb_tile_prog, 53 .tile.prog = nv20_fb_tile_prog,
61}.base.base; 54 .ram_new = nv20_ram_new,
55 .memtype_valid = nv04_fb_memtype_valid,
56};
57
58int
59nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
60{
61 return nvkm_fb_new_(&nv36_fb, device, index, pfb);
62}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index dbe5c1910c2c..deec46a310f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -23,16 +23,17 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28void 29void
29nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags, 30nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
30 struct nvkm_fb_tile *tile) 31 struct nvkm_fb_tile *tile)
31{ 32{
32 u32 tiles = DIV_ROUND_UP(size, 0x80); 33 u32 tiles = DIV_ROUND_UP(size, 0x80);
33 u32 tags = round_up(tiles / pfb->ram->parts, 0x100); 34 u32 tags = round_up(tiles / fb->ram->parts, 0x100);
34 if ( (flags & 2) && 35 if ( (flags & 2) &&
35 !nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) { 36 !nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
36 tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */ 37 tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
37 tile->zcomp |= ((tile->tag->offset ) >> 8); 38 tile->zcomp |= ((tile->tag->offset ) >> 8);
38 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -42,34 +43,26 @@ nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
42 } 43 }
43} 44}
44 45
45static int 46static void
46nv40_fb_init(struct nvkm_object *object) 47nv40_fb_init(struct nvkm_fb *fb)
47{ 48{
48 struct nv04_fb_priv *priv = (void *)object; 49 nvkm_mask(fb->subdev.device, 0x10033c, 0x00008000, 0x00000000);
49 int ret;
50
51 ret = nvkm_fb_init(&priv->base);
52 if (ret)
53 return ret;
54
55 nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
56 return 0;
57} 50}
58 51
59struct nvkm_oclass * 52static const struct nvkm_fb_func
60nv40_fb_oclass = &(struct nv04_fb_impl) { 53nv40_fb = {
61 .base.base.handle = NV_SUBDEV(FB, 0x40), 54 .init = nv40_fb_init,
62 .base.base.ofuncs = &(struct nvkm_ofuncs) {
63 .ctor = nv04_fb_ctor,
64 .dtor = _nvkm_fb_dtor,
65 .init = nv40_fb_init,
66 .fini = _nvkm_fb_fini,
67 },
68 .base.memtype = nv04_fb_memtype_valid,
69 .base.ram = &nv40_ram_oclass,
70 .tile.regions = 8, 55 .tile.regions = 8,
71 .tile.init = nv30_fb_tile_init, 56 .tile.init = nv30_fb_tile_init,
72 .tile.comp = nv40_fb_tile_comp, 57 .tile.comp = nv40_fb_tile_comp,
73 .tile.fini = nv20_fb_tile_fini, 58 .tile.fini = nv20_fb_tile_fini,
74 .tile.prog = nv20_fb_tile_prog, 59 .tile.prog = nv20_fb_tile_prog,
75}.base.base; 60 .ram_new = nv40_ram_new,
61 .memtype_valid = nv04_fb_memtype_valid,
62};
63
64int
65nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
66{
67 return nvkm_fb_new_(&nv40_fb, device, index, pfb);
68}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
deleted file mode 100644
index 602182661820..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __NVKM_FB_NV40_H__
2#define __NVKM_FB_NV40_H__
3#include "priv.h"
4
5struct nv40_ram {
6 struct nvkm_ram base;
7 u32 ctrl;
8 u32 coef;
9};
10
11int nv40_ram_calc(struct nvkm_fb *, u32);
12int nv40_ram_prog(struct nvkm_fb *);
13void nv40_ram_tidy(struct nvkm_fb *);
14#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index d9e1a40a2955..79e57dd5a00f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -23,46 +23,40 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28void 29void
29nv41_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile) 30nv41_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
30{ 31{
31 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); 32 struct nvkm_device *device = fb->subdev.device;
32 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); 33 nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
33 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); 34 nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
34 nv_rd32(pfb, 0x100600 + (i * 0x10)); 35 nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
35 nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp); 36 nvkm_rd32(device, 0x100600 + (i * 0x10));
37 nvkm_wr32(device, 0x100700 + (i * 0x04), tile->zcomp);
36} 38}
37 39
38int 40void
39nv41_fb_init(struct nvkm_object *object) 41nv41_fb_init(struct nvkm_fb *fb)
40{ 42{
41 struct nv04_fb_priv *priv = (void *)object; 43 nvkm_wr32(fb->subdev.device, 0x100800, 0x00000001);
42 int ret;
43
44 ret = nvkm_fb_init(&priv->base);
45 if (ret)
46 return ret;
47
48 nv_wr32(priv, 0x100800, 0x00000001);
49 return 0;
50} 44}
51 45
52struct nvkm_oclass * 46static const struct nvkm_fb_func
53nv41_fb_oclass = &(struct nv04_fb_impl) { 47nv41_fb = {
54 .base.base.handle = NV_SUBDEV(FB, 0x41), 48 .init = nv41_fb_init,
55 .base.base.ofuncs = &(struct nvkm_ofuncs) {
56 .ctor = nv04_fb_ctor,
57 .dtor = _nvkm_fb_dtor,
58 .init = nv41_fb_init,
59 .fini = _nvkm_fb_fini,
60 },
61 .base.memtype = nv04_fb_memtype_valid,
62 .base.ram = &nv41_ram_oclass,
63 .tile.regions = 12, 49 .tile.regions = 12,
64 .tile.init = nv30_fb_tile_init, 50 .tile.init = nv30_fb_tile_init,
65 .tile.comp = nv40_fb_tile_comp, 51 .tile.comp = nv40_fb_tile_comp,
66 .tile.fini = nv20_fb_tile_fini, 52 .tile.fini = nv20_fb_tile_fini,
67 .tile.prog = nv41_fb_tile_prog, 53 .tile.prog = nv41_fb_tile_prog,
68}.base.base; 54 .ram_new = nv41_ram_new,
55 .memtype_valid = nv04_fb_memtype_valid,
56};
57
58int
59nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
60{
61 return nvkm_fb_new_(&nv41_fb, device, index, pfb);
62}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index 20b97c83c4af..06246cce5ec4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -23,10 +23,11 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28static void 29static void
29nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv44_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
30 u32 flags, struct nvkm_fb_tile *tile) 31 u32 flags, struct nvkm_fb_tile *tile)
31{ 32{
32 tile->addr = 0x00000001; /* mode = vram */ 33 tile->addr = 0x00000001; /* mode = vram */
@@ -36,42 +37,36 @@ nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
36} 37}
37 38
38void 39void
39nv44_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile) 40nv44_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
40{ 41{
41 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); 42 struct nvkm_device *device = fb->subdev.device;
42 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); 43 nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
43 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); 44 nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
44 nv_rd32(pfb, 0x100600 + (i * 0x10)); 45 nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
46 nvkm_rd32(device, 0x100600 + (i * 0x10));
45} 47}
46 48
47int 49void
48nv44_fb_init(struct nvkm_object *object) 50nv44_fb_init(struct nvkm_fb *fb)
49{ 51{
50 struct nv04_fb_priv *priv = (void *)object; 52 struct nvkm_device *device = fb->subdev.device;
51 int ret; 53 nvkm_wr32(device, 0x100850, 0x80000000);
52 54 nvkm_wr32(device, 0x100800, 0x00000001);
53 ret = nvkm_fb_init(&priv->base);
54 if (ret)
55 return ret;
56
57 nv_wr32(priv, 0x100850, 0x80000000);
58 nv_wr32(priv, 0x100800, 0x00000001);
59 return 0;
60} 55}
61 56
62struct nvkm_oclass * 57static const struct nvkm_fb_func
63nv44_fb_oclass = &(struct nv04_fb_impl) { 58nv44_fb = {
64 .base.base.handle = NV_SUBDEV(FB, 0x44), 59 .init = nv44_fb_init,
65 .base.base.ofuncs = &(struct nvkm_ofuncs) {
66 .ctor = nv04_fb_ctor,
67 .dtor = _nvkm_fb_dtor,
68 .init = nv44_fb_init,
69 .fini = _nvkm_fb_fini,
70 },
71 .base.memtype = nv04_fb_memtype_valid,
72 .base.ram = &nv44_ram_oclass,
73 .tile.regions = 12, 60 .tile.regions = 12,
74 .tile.init = nv44_fb_tile_init, 61 .tile.init = nv44_fb_tile_init,
75 .tile.fini = nv20_fb_tile_fini, 62 .tile.fini = nv20_fb_tile_fini,
76 .tile.prog = nv44_fb_tile_prog, 63 .tile.prog = nv44_fb_tile_prog,
77}.base.base; 64 .ram_new = nv44_ram_new,
65 .memtype_valid = nv04_fb_memtype_valid,
66};
67
68int
69nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
70{
71 return nvkm_fb_new_(&nv44_fb, device, index, pfb);
72}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index 5bfac38cdf24..3598a1aa65be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -23,10 +23,11 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28void 29void
29nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 30nv46_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
30 u32 flags, struct nvkm_fb_tile *tile) 31 u32 flags, struct nvkm_fb_tile *tile)
31{ 32{
32 /* for performance, select alternate bank offset for zeta */ 33 /* for performance, select alternate bank offset for zeta */
@@ -39,19 +40,19 @@ nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
39 tile->pitch = pitch; 40 tile->pitch = pitch;
40} 41}
41 42
42struct nvkm_oclass * 43static const struct nvkm_fb_func
43nv46_fb_oclass = &(struct nv04_fb_impl) { 44nv46_fb = {
44 .base.base.handle = NV_SUBDEV(FB, 0x46), 45 .init = nv44_fb_init,
45 .base.base.ofuncs = &(struct nvkm_ofuncs) {
46 .ctor = nv04_fb_ctor,
47 .dtor = _nvkm_fb_dtor,
48 .init = nv44_fb_init,
49 .fini = _nvkm_fb_fini,
50 },
51 .base.memtype = nv04_fb_memtype_valid,
52 .base.ram = &nv44_ram_oclass,
53 .tile.regions = 15, 46 .tile.regions = 15,
54 .tile.init = nv46_fb_tile_init, 47 .tile.init = nv46_fb_tile_init,
55 .tile.fini = nv20_fb_tile_fini, 48 .tile.fini = nv20_fb_tile_fini,
56 .tile.prog = nv44_fb_tile_prog, 49 .tile.prog = nv44_fb_tile_prog,
57}.base.base; 50 .ram_new = nv44_ram_new,
51 .memtype_valid = nv04_fb_memtype_valid,
52};
53
54int
55nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
56{
57 return nvkm_fb_new_(&nv46_fb, device, index, pfb);
58}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index d3b3988d1d49..c505e4429314 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -23,22 +23,23 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28struct nvkm_oclass * 29static const struct nvkm_fb_func
29nv47_fb_oclass = &(struct nv04_fb_impl) { 30nv47_fb = {
30 .base.base.handle = NV_SUBDEV(FB, 0x47), 31 .init = nv41_fb_init,
31 .base.base.ofuncs = &(struct nvkm_ofuncs) {
32 .ctor = nv04_fb_ctor,
33 .dtor = _nvkm_fb_dtor,
34 .init = nv41_fb_init,
35 .fini = _nvkm_fb_fini,
36 },
37 .base.memtype = nv04_fb_memtype_valid,
38 .base.ram = &nv41_ram_oclass,
39 .tile.regions = 15, 32 .tile.regions = 15,
40 .tile.init = nv30_fb_tile_init, 33 .tile.init = nv30_fb_tile_init,
41 .tile.comp = nv40_fb_tile_comp, 34 .tile.comp = nv40_fb_tile_comp,
42 .tile.fini = nv20_fb_tile_fini, 35 .tile.fini = nv20_fb_tile_fini,
43 .tile.prog = nv41_fb_tile_prog, 36 .tile.prog = nv41_fb_tile_prog,
44}.base.base; 37 .ram_new = nv41_ram_new,
38 .memtype_valid = nv04_fb_memtype_valid,
39};
40
41int
42nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
43{
44 return nvkm_fb_new_(&nv47_fb, device, index, pfb);
45}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 236e36c5054e..7b91b9f170e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -23,22 +23,23 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28struct nvkm_oclass * 29static const struct nvkm_fb_func
29nv49_fb_oclass = &(struct nv04_fb_impl) { 30nv49_fb = {
30 .base.base.handle = NV_SUBDEV(FB, 0x49), 31 .init = nv41_fb_init,
31 .base.base.ofuncs = &(struct nvkm_ofuncs) {
32 .ctor = nv04_fb_ctor,
33 .dtor = _nvkm_fb_dtor,
34 .init = nv41_fb_init,
35 .fini = _nvkm_fb_fini,
36 },
37 .base.memtype = nv04_fb_memtype_valid,
38 .base.ram = &nv49_ram_oclass,
39 .tile.regions = 15, 32 .tile.regions = 15,
40 .tile.init = nv30_fb_tile_init, 33 .tile.init = nv30_fb_tile_init,
41 .tile.comp = nv40_fb_tile_comp, 34 .tile.comp = nv40_fb_tile_comp,
42 .tile.fini = nv20_fb_tile_fini, 35 .tile.fini = nv20_fb_tile_fini,
43 .tile.prog = nv41_fb_tile_prog, 36 .tile.prog = nv41_fb_tile_prog,
44}.base.base; 37 .ram_new = nv49_ram_new,
38 .memtype_valid = nv04_fb_memtype_valid,
39};
40
41int
42nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
43{
44 return nvkm_fb_new_(&nv49_fb, device, index, pfb);
45}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 1352b6a73fb0..4e98210c1b1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -23,21 +23,22 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include "nv04.h" 26#include "priv.h"
27#include "ram.h"
27 28
28struct nvkm_oclass * 29static const struct nvkm_fb_func
29nv4e_fb_oclass = &(struct nv04_fb_impl) { 30nv4e_fb = {
30 .base.base.handle = NV_SUBDEV(FB, 0x4e), 31 .init = nv44_fb_init,
31 .base.base.ofuncs = &(struct nvkm_ofuncs) {
32 .ctor = nv04_fb_ctor,
33 .dtor = _nvkm_fb_dtor,
34 .init = nv44_fb_init,
35 .fini = _nvkm_fb_fini,
36 },
37 .base.memtype = nv04_fb_memtype_valid,
38 .base.ram = &nv4e_ram_oclass,
39 .tile.regions = 12, 32 .tile.regions = 12,
40 .tile.init = nv46_fb_tile_init, 33 .tile.init = nv46_fb_tile_init,
41 .tile.fini = nv20_fb_tile_fini, 34 .tile.fini = nv20_fb_tile_fini,
42 .tile.prog = nv44_fb_tile_prog, 35 .tile.prog = nv44_fb_tile_prog,
43}.base.base; 36 .ram_new = nv44_ram_new,
37 .memtype_valid = nv04_fb_memtype_valid,
38};
39
40int
41nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
42{
43 return nvkm_fb_new_(&nv4e_fb, device, index, pfb);
44}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 0480ce52aa06..f5edfadb5b46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -22,11 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "nv50.h"
25#include "ram.h"
25 26
26#include <core/client.h> 27#include <core/client.h>
27#include <core/device.h>
28#include <core/engctx.h>
29#include <core/enum.h> 28#include <core/enum.h>
29#include <engine/fifo.h>
30 30
31int 31int
32nv50_fb_memtype[0x80] = { 32nv50_fb_memtype[0x80] = {
@@ -40,130 +40,139 @@ nv50_fb_memtype[0x80] = {
40 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 40 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
41}; 41};
42 42
43bool 43static int
44nv50_fb_memtype_valid(struct nvkm_fb *pfb, u32 memtype) 44nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
45{
46 struct nv50_fb *fb = nv50_fb(base);
47 return fb->func->ram_new(&fb->base, pram);
48}
49
50static bool
51nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
45{ 52{
46 return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0; 53 return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
47} 54}
48 55
49static const struct nvkm_enum vm_dispatch_subclients[] = { 56static const struct nvkm_enum vm_dispatch_subclients[] = {
50 { 0x00000000, "GRCTX", NULL }, 57 { 0x00000000, "GRCTX" },
51 { 0x00000001, "NOTIFY", NULL }, 58 { 0x00000001, "NOTIFY" },
52 { 0x00000002, "QUERY", NULL }, 59 { 0x00000002, "QUERY" },
53 { 0x00000003, "COND", NULL }, 60 { 0x00000003, "COND" },
54 { 0x00000004, "M2M_IN", NULL }, 61 { 0x00000004, "M2M_IN" },
55 { 0x00000005, "M2M_OUT", NULL }, 62 { 0x00000005, "M2M_OUT" },
56 { 0x00000006, "M2M_NOTIFY", NULL }, 63 { 0x00000006, "M2M_NOTIFY" },
57 {} 64 {}
58}; 65};
59 66
60static const struct nvkm_enum vm_ccache_subclients[] = { 67static const struct nvkm_enum vm_ccache_subclients[] = {
61 { 0x00000000, "CB", NULL }, 68 { 0x00000000, "CB" },
62 { 0x00000001, "TIC", NULL }, 69 { 0x00000001, "TIC" },
63 { 0x00000002, "TSC", NULL }, 70 { 0x00000002, "TSC" },
64 {} 71 {}
65}; 72};
66 73
67static const struct nvkm_enum vm_prop_subclients[] = { 74static const struct nvkm_enum vm_prop_subclients[] = {
68 { 0x00000000, "RT0", NULL }, 75 { 0x00000000, "RT0" },
69 { 0x00000001, "RT1", NULL }, 76 { 0x00000001, "RT1" },
70 { 0x00000002, "RT2", NULL }, 77 { 0x00000002, "RT2" },
71 { 0x00000003, "RT3", NULL }, 78 { 0x00000003, "RT3" },
72 { 0x00000004, "RT4", NULL }, 79 { 0x00000004, "RT4" },
73 { 0x00000005, "RT5", NULL }, 80 { 0x00000005, "RT5" },
74 { 0x00000006, "RT6", NULL }, 81 { 0x00000006, "RT6" },
75 { 0x00000007, "RT7", NULL }, 82 { 0x00000007, "RT7" },
76 { 0x00000008, "ZETA", NULL }, 83 { 0x00000008, "ZETA" },
77 { 0x00000009, "LOCAL", NULL }, 84 { 0x00000009, "LOCAL" },
78 { 0x0000000a, "GLOBAL", NULL }, 85 { 0x0000000a, "GLOBAL" },
79 { 0x0000000b, "STACK", NULL }, 86 { 0x0000000b, "STACK" },
80 { 0x0000000c, "DST2D", NULL }, 87 { 0x0000000c, "DST2D" },
81 {} 88 {}
82}; 89};
83 90
84static const struct nvkm_enum vm_pfifo_subclients[] = { 91static const struct nvkm_enum vm_pfifo_subclients[] = {
85 { 0x00000000, "PUSHBUF", NULL }, 92 { 0x00000000, "PUSHBUF" },
86 { 0x00000001, "SEMAPHORE", NULL }, 93 { 0x00000001, "SEMAPHORE" },
87 {} 94 {}
88}; 95};
89 96
90static const struct nvkm_enum vm_bar_subclients[] = { 97static const struct nvkm_enum vm_bar_subclients[] = {
91 { 0x00000000, "FB", NULL }, 98 { 0x00000000, "FB" },
92 { 0x00000001, "IN", NULL }, 99 { 0x00000001, "IN" },
93 {} 100 {}
94}; 101};
95 102
96static const struct nvkm_enum vm_client[] = { 103static const struct nvkm_enum vm_client[] = {
97 { 0x00000000, "STRMOUT", NULL }, 104 { 0x00000000, "STRMOUT" },
98 { 0x00000003, "DISPATCH", vm_dispatch_subclients }, 105 { 0x00000003, "DISPATCH", vm_dispatch_subclients },
99 { 0x00000004, "PFIFO_WRITE", NULL }, 106 { 0x00000004, "PFIFO_WRITE" },
100 { 0x00000005, "CCACHE", vm_ccache_subclients }, 107 { 0x00000005, "CCACHE", vm_ccache_subclients },
101 { 0x00000006, "PMSPPP", NULL }, 108 { 0x00000006, "PMSPPP" },
102 { 0x00000007, "CLIPID", NULL }, 109 { 0x00000007, "CLIPID" },
103 { 0x00000008, "PFIFO_READ", NULL }, 110 { 0x00000008, "PFIFO_READ" },
104 { 0x00000009, "VFETCH", NULL }, 111 { 0x00000009, "VFETCH" },
105 { 0x0000000a, "TEXTURE", NULL }, 112 { 0x0000000a, "TEXTURE" },
106 { 0x0000000b, "PROP", vm_prop_subclients }, 113 { 0x0000000b, "PROP", vm_prop_subclients },
107 { 0x0000000c, "PVP", NULL }, 114 { 0x0000000c, "PVP" },
108 { 0x0000000d, "PBSP", NULL }, 115 { 0x0000000d, "PBSP" },
109 { 0x0000000e, "PCRYPT", NULL }, 116 { 0x0000000e, "PCRYPT" },
110 { 0x0000000f, "PCOUNTER", NULL }, 117 { 0x0000000f, "PCOUNTER" },
111 { 0x00000011, "PDAEMON", NULL }, 118 { 0x00000011, "PDAEMON" },
112 {} 119 {}
113}; 120};
114 121
115static const struct nvkm_enum vm_engine[] = { 122static const struct nvkm_enum vm_engine[] = {
116 { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR }, 123 { 0x00000000, "PGRAPH" },
117 { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP }, 124 { 0x00000001, "PVP" },
118 { 0x00000004, "PEEPHOLE", NULL }, 125 { 0x00000004, "PEEPHOLE" },
119 { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO }, 126 { 0x00000005, "PFIFO", vm_pfifo_subclients },
120 { 0x00000006, "BAR", vm_bar_subclients }, 127 { 0x00000006, "BAR", vm_bar_subclients },
121 { 0x00000008, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP }, 128 { 0x00000008, "PMSPPP" },
122 { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG }, 129 { 0x00000008, "PMPEG" },
123 { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP }, 130 { 0x00000009, "PBSP" },
124 { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CIPHER }, 131 { 0x0000000a, "PCRYPT" },
125 { 0x0000000b, "PCOUNTER", NULL }, 132 { 0x0000000b, "PCOUNTER" },
126 { 0x0000000c, "SEMAPHORE_BG", NULL }, 133 { 0x0000000c, "SEMAPHORE_BG" },
127 { 0x0000000d, "PCE0", NULL, NVDEV_ENGINE_CE0 }, 134 { 0x0000000d, "PCE0" },
128 { 0x0000000e, "PDAEMON", NULL }, 135 { 0x0000000e, "PDAEMON" },
129 {} 136 {}
130}; 137};
131 138
132static const struct nvkm_enum vm_fault[] = { 139static const struct nvkm_enum vm_fault[] = {
133 { 0x00000000, "PT_NOT_PRESENT", NULL }, 140 { 0x00000000, "PT_NOT_PRESENT" },
134 { 0x00000001, "PT_TOO_SHORT", NULL }, 141 { 0x00000001, "PT_TOO_SHORT" },
135 { 0x00000002, "PAGE_NOT_PRESENT", NULL }, 142 { 0x00000002, "PAGE_NOT_PRESENT" },
136 { 0x00000003, "PAGE_SYSTEM_ONLY", NULL }, 143 { 0x00000003, "PAGE_SYSTEM_ONLY" },
137 { 0x00000004, "PAGE_READ_ONLY", NULL }, 144 { 0x00000004, "PAGE_READ_ONLY" },
138 { 0x00000006, "NULL_DMAOBJ", NULL }, 145 { 0x00000006, "NULL_DMAOBJ" },
139 { 0x00000007, "WRONG_MEMTYPE", NULL }, 146 { 0x00000007, "WRONG_MEMTYPE" },
140 { 0x0000000b, "VRAM_LIMIT", NULL }, 147 { 0x0000000b, "VRAM_LIMIT" },
141 { 0x0000000f, "DMAOBJ_LIMIT", NULL }, 148 { 0x0000000f, "DMAOBJ_LIMIT" },
142 {} 149 {}
143}; 150};
144 151
145static void 152static void
146nv50_fb_intr(struct nvkm_subdev *subdev) 153nv50_fb_intr(struct nvkm_fb *base)
147{ 154{
148 struct nvkm_device *device = nv_device(subdev); 155 struct nv50_fb *fb = nv50_fb(base);
149 struct nvkm_engine *engine; 156 struct nvkm_subdev *subdev = &fb->base.subdev;
150 struct nv50_fb_priv *priv = (void *)subdev; 157 struct nvkm_device *device = subdev->device;
151 const struct nvkm_enum *en, *cl; 158 struct nvkm_fifo *fifo = device->fifo;
152 struct nvkm_object *engctx = NULL; 159 struct nvkm_fifo_chan *chan;
153 u32 trap[6], idx, chan; 160 const struct nvkm_enum *en, *re, *cl, *sc;
161 u32 trap[6], idx, inst;
154 u8 st0, st1, st2, st3; 162 u8 st0, st1, st2, st3;
163 unsigned long flags;
155 int i; 164 int i;
156 165
157 idx = nv_rd32(priv, 0x100c90); 166 idx = nvkm_rd32(device, 0x100c90);
158 if (!(idx & 0x80000000)) 167 if (!(idx & 0x80000000))
159 return; 168 return;
160 idx &= 0x00ffffff; 169 idx &= 0x00ffffff;
161 170
162 for (i = 0; i < 6; i++) { 171 for (i = 0; i < 6; i++) {
163 nv_wr32(priv, 0x100c90, idx | i << 24); 172 nvkm_wr32(device, 0x100c90, idx | i << 24);
164 trap[i] = nv_rd32(priv, 0x100c94); 173 trap[i] = nvkm_rd32(device, 0x100c94);
165 } 174 }
166 nv_wr32(priv, 0x100c90, idx | 0x80000000); 175 nvkm_wr32(device, 0x100c90, idx | 0x80000000);
167 176
168 /* decode status bits into something more useful */ 177 /* decode status bits into something more useful */
169 if (device->chipset < 0xa3 || 178 if (device->chipset < 0xa3 ||
@@ -178,143 +187,103 @@ nv50_fb_intr(struct nvkm_subdev *subdev)
178 st2 = (trap[0] & 0x00ff0000) >> 16; 187 st2 = (trap[0] & 0x00ff0000) >> 16;
179 st3 = (trap[0] & 0xff000000) >> 24; 188 st3 = (trap[0] & 0xff000000) >> 24;
180 } 189 }
181 chan = (trap[2] << 16) | trap[1]; 190 inst = ((trap[2] << 16) | trap[1]) << 12;
182 191
183 en = nvkm_enum_find(vm_engine, st0); 192 en = nvkm_enum_find(vm_engine, st0);
184 193 re = nvkm_enum_find(vm_fault , st1);
185 if (en && en->data2) {
186 const struct nvkm_enum *orig_en = en;
187 while (en->name && en->value == st0 && en->data2) {
188 engine = nvkm_engine(subdev, en->data2);
189 /*XXX: clean this up */
190 if (!engine && en->data2 == NVDEV_ENGINE_BSP)
191 engine = nvkm_engine(subdev, NVDEV_ENGINE_MSVLD);
192 if (!engine && en->data2 == NVDEV_ENGINE_CIPHER)
193 engine = nvkm_engine(subdev, NVDEV_ENGINE_SEC);
194 if (!engine && en->data2 == NVDEV_ENGINE_VP)
195 engine = nvkm_engine(subdev, NVDEV_ENGINE_MSPDEC);
196 if (engine) {
197 engctx = nvkm_engctx_get(engine, chan);
198 if (engctx)
199 break;
200 }
201 en++;
202 }
203 if (!engctx)
204 en = orig_en;
205 }
206
207 nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
208 (trap[5] & 0x00000100) ? "read" : "write",
209 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
210 nvkm_client_name(engctx));
211
212 nvkm_engctx_put(engctx);
213
214 if (en)
215 pr_cont("%s/", en->name);
216 else
217 pr_cont("%02x/", st0);
218
219 cl = nvkm_enum_find(vm_client, st2); 194 cl = nvkm_enum_find(vm_client, st2);
220 if (cl) 195 if (cl && cl->data) sc = nvkm_enum_find(cl->data, st3);
221 pr_cont("%s/", cl->name); 196 else if (en && en->data) sc = nvkm_enum_find(en->data, st3);
222 else 197 else sc = NULL;
223 pr_cont("%02x/", st2); 198
224 199 chan = nvkm_fifo_chan_inst(fifo, inst, &flags);
225 if (cl && cl->data) cl = nvkm_enum_find(cl->data, st3); 200 nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel %d [%08x %s] "
226 else if (en && en->data) cl = nvkm_enum_find(en->data, st3); 201 "engine %02x [%s] client %02x [%s] "
227 else cl = NULL; 202 "subclient %02x [%s] reason %08x [%s]\n",
228 if (cl) 203 (trap[5] & 0x00000100) ? "read" : "write",
229 pr_cont("%s", cl->name); 204 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
230 else 205 chan ? chan->chid : -1, inst,
231 pr_cont("%02x", st3); 206 chan ? chan->object.client->name : "unknown",
232 207 st0, en ? en->name : "",
233 pr_cont(" reason: "); 208 st2, cl ? cl->name : "", st3, sc ? sc->name : "",
234 en = nvkm_enum_find(vm_fault, st1); 209 st1, re ? re->name : "");
235 if (en) 210 nvkm_fifo_chan_put(fifo, flags, &chan);
236 pr_cont("%s\n", en->name);
237 else
238 pr_cont("0x%08x\n", st1);
239} 211}
240 212
241int 213static void
242nv50_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 214nv50_fb_init(struct nvkm_fb *base)
243 struct nvkm_oclass *oclass, void *data, u32 size,
244 struct nvkm_object **pobject)
245{ 215{
246 struct nvkm_device *device = nv_device(parent); 216 struct nv50_fb *fb = nv50_fb(base);
247 struct nv50_fb_priv *priv; 217 struct nvkm_device *device = fb->base.subdev.device;
248 int ret;
249
250 ret = nvkm_fb_create(parent, engine, oclass, &priv);
251 *pobject = nv_object(priv);
252 if (ret)
253 return ret;
254 218
255 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 219 /* Not a clue what this is exactly. Without pointing it at a
256 if (priv->r100c08_page) { 220 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
257 priv->r100c08 = dma_map_page(nv_device_base(device), 221 * cause IOMMU "read from address 0" errors (rh#561267)
258 priv->r100c08_page, 0, PAGE_SIZE, 222 */
259 DMA_BIDIRECTIONAL); 223 nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
260 if (dma_mapping_error(nv_device_base(device), priv->r100c08))
261 return -EFAULT;
262 } else {
263 nv_warn(priv, "failed 0x100c08 page alloc\n");
264 }
265 224
266 nv_subdev(priv)->intr = nv50_fb_intr; 225 /* This is needed to get meaningful information from 100c90
267 return 0; 226 * on traps. No idea what these values mean exactly. */
227 nvkm_wr32(device, 0x100c90, fb->func->trap);
268} 228}
269 229
270void 230static void *
271nv50_fb_dtor(struct nvkm_object *object) 231nv50_fb_dtor(struct nvkm_fb *base)
272{ 232{
273 struct nvkm_device *device = nv_device(object); 233 struct nv50_fb *fb = nv50_fb(base);
274 struct nv50_fb_priv *priv = (void *)object; 234 struct nvkm_device *device = fb->base.subdev.device;
275 235
276 if (priv->r100c08_page) { 236 if (fb->r100c08_page) {
277 dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE, 237 dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
278 DMA_BIDIRECTIONAL); 238 DMA_BIDIRECTIONAL);
279 __free_page(priv->r100c08_page); 239 __free_page(fb->r100c08_page);
280 } 240 }
281 241
282 nvkm_fb_destroy(&priv->base); 242 return fb;
283} 243}
284 244
245static const struct nvkm_fb_func
246nv50_fb_ = {
247 .dtor = nv50_fb_dtor,
248 .init = nv50_fb_init,
249 .intr = nv50_fb_intr,
250 .ram_new = nv50_fb_ram_new,
251 .memtype_valid = nv50_fb_memtype_valid,
252};
253
285int 254int
286nv50_fb_init(struct nvkm_object *object) 255nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
256 int index, struct nvkm_fb **pfb)
287{ 257{
288 struct nv50_fb_impl *impl = (void *)object->oclass; 258 struct nv50_fb *fb;
289 struct nv50_fb_priv *priv = (void *)object; 259
290 int ret; 260 if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
291 261 return -ENOMEM;
292 ret = nvkm_fb_init(&priv->base); 262 nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
293 if (ret) 263 fb->func = func;
294 return ret; 264 *pfb = &fb->base;
295 265
296 /* Not a clue what this is exactly. Without pointing it at a 266 fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
297 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) 267 if (fb->r100c08_page) {
298 * cause IOMMU "read from address 0" errors (rh#561267) 268 fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
299 */ 269 PAGE_SIZE, DMA_BIDIRECTIONAL);
300 nv_wr32(priv, 0x100c08, priv->r100c08 >> 8); 270 if (dma_mapping_error(device->dev, fb->r100c08))
271 return -EFAULT;
272 } else {
273 nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
274 }
301 275
302 /* This is needed to get meaningful information from 100c90
303 * on traps. No idea what these values mean exactly. */
304 nv_wr32(priv, 0x100c90, impl->trap);
305 return 0; 276 return 0;
306} 277}
307 278
308struct nvkm_oclass * 279static const struct nv50_fb_func
309nv50_fb_oclass = &(struct nv50_fb_impl) { 280nv50_fb = {
310 .base.base.handle = NV_SUBDEV(FB, 0x50), 281 .ram_new = nv50_ram_new,
311 .base.base.ofuncs = &(struct nvkm_ofuncs) {
312 .ctor = nv50_fb_ctor,
313 .dtor = nv50_fb_dtor,
314 .init = nv50_fb_init,
315 .fini = _nvkm_fb_fini,
316 },
317 .base.memtype = nv50_fb_memtype_valid,
318 .base.ram = &nv50_ram_oclass,
319 .trap = 0x000707ff, 282 .trap = 0x000707ff,
320}.base.base; 283};
284
285int
286nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
287{
288 return nv50_fb_new_(&nv50_fb, device, index, pfb);
289}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index f3cde3f1f511..faa88c8c66fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -1,31 +1,21 @@
1#ifndef __NVKM_FB_NV50_H__ 1#ifndef __NVKM_FB_NV50_H__
2#define __NVKM_FB_NV50_H__ 2#define __NVKM_FB_NV50_H__
3#define nv50_fb(p) container_of((p), struct nv50_fb, base)
3#include "priv.h" 4#include "priv.h"
4 5
5struct nv50_fb_priv { 6struct nv50_fb {
7 const struct nv50_fb_func *func;
6 struct nvkm_fb base; 8 struct nvkm_fb base;
7 struct page *r100c08_page; 9 struct page *r100c08_page;
8 dma_addr_t r100c08; 10 dma_addr_t r100c08;
9}; 11};
10 12
11int nv50_fb_ctor(struct nvkm_object *, struct nvkm_object *, 13struct nv50_fb_func {
12 struct nvkm_oclass *, void *, u32, 14 int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
13 struct nvkm_object **);
14void nv50_fb_dtor(struct nvkm_object *);
15int nv50_fb_init(struct nvkm_object *);
16
17struct nv50_fb_impl {
18 struct nvkm_fb_impl base;
19 u32 trap; 15 u32 trap;
20}; 16};
21 17
22#define nv50_ram_create(p,e,o,d) \ 18int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
23 nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d) 19 struct nvkm_fb **pfb);
24int nv50_ram_create_(struct nvkm_object *, struct nvkm_object *,
25 struct nvkm_oclass *, int, void **);
26int nv50_ram_get(struct nvkm_fb *, u64 size, u32 align, u32 ncmin,
27 u32 memtype, struct nvkm_mem **);
28void nv50_ram_put(struct nvkm_fb *, struct nvkm_mem **);
29void __nv50_ram_put(struct nvkm_fb *, struct nvkm_mem *);
30extern int nv50_fb_memtype[0x80]; 20extern int nv50_fb_memtype[0x80];
31#endif 21#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 485c4b64819a..62b9feb531dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -1,73 +1,62 @@
1#ifndef __NVKM_FB_PRIV_H__ 1#ifndef __NVKM_FB_PRIV_H__
2#define __NVKM_FB_PRIV_H__ 2#define __NVKM_FB_PRIV_H__
3#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
3#include <subdev/fb.h> 4#include <subdev/fb.h>
4struct nvkm_bios; 5struct nvkm_bios;
5 6
6#define nvkm_ram_create(p,e,o,d) \ 7struct nvkm_fb_func {
7 nvkm_object_create_((p), (e), (o), 0, sizeof(**d), (void **)d) 8 void *(*dtor)(struct nvkm_fb *);
8#define nvkm_ram_destroy(p) \ 9 void (*init)(struct nvkm_fb *);
9 nvkm_object_destroy(&(p)->base) 10 void (*intr)(struct nvkm_fb *);
10#define nvkm_ram_init(p) \
11 nvkm_object_init(&(p)->base)
12#define nvkm_ram_fini(p,s) \
13 nvkm_object_fini(&(p)->base, (s))
14 11
15#define nvkm_ram_create_(p,e,o,s,d) \ 12 struct {
16 nvkm_object_create_((p), (e), (o), 0, (s), (void **)d) 13 int regions;
17#define _nvkm_ram_dtor nvkm_object_destroy 14 void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
18#define _nvkm_ram_init nvkm_object_init 15 u32 pitch, u32 flags, struct nvkm_fb_tile *);
19#define _nvkm_ram_fini nvkm_object_fini 16 void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
17 struct nvkm_fb_tile *);
18 void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
19 void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
20 } tile;
20 21
21extern struct nvkm_oclass nv04_ram_oclass; 22 int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
22extern struct nvkm_oclass nv10_ram_oclass;
23extern struct nvkm_oclass nv1a_ram_oclass;
24extern struct nvkm_oclass nv20_ram_oclass;
25extern struct nvkm_oclass nv40_ram_oclass;
26extern struct nvkm_oclass nv41_ram_oclass;
27extern struct nvkm_oclass nv44_ram_oclass;
28extern struct nvkm_oclass nv49_ram_oclass;
29extern struct nvkm_oclass nv4e_ram_oclass;
30extern struct nvkm_oclass nv50_ram_oclass;
31extern struct nvkm_oclass gt215_ram_oclass;
32extern struct nvkm_oclass mcp77_ram_oclass;
33extern struct nvkm_oclass gf100_ram_oclass;
34extern struct nvkm_oclass gk104_ram_oclass;
35extern struct nvkm_oclass gm107_ram_oclass;
36 23
37int nvkm_sddr2_calc(struct nvkm_ram *ram); 24 bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
38int nvkm_sddr3_calc(struct nvkm_ram *ram); 25};
39int nvkm_gddr3_calc(struct nvkm_ram *ram);
40int nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts);
41 26
42#define nvkm_fb_create(p,e,c,d) \ 27void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
43 nvkm_fb_create_((p), (e), (c), sizeof(**d), (void **)d) 28 int index, struct nvkm_fb *);
44#define nvkm_fb_destroy(p) ({ \ 29int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
45 struct nvkm_fb *pfb = (p); \ 30 int index, struct nvkm_fb **);
46 _nvkm_fb_dtor(nv_object(pfb)); \ 31int nvkm_fb_bios_memtype(struct nvkm_bios *);
47})
48#define nvkm_fb_init(p) ({ \
49 struct nvkm_fb *pfb = (p); \
50 _nvkm_fb_init(nv_object(pfb)); \
51})
52#define nvkm_fb_fini(p,s) ({ \
53 struct nvkm_fb *pfb = (p); \
54 _nvkm_fb_fini(nv_object(pfb), (s)); \
55})
56 32
57int nvkm_fb_create_(struct nvkm_object *, struct nvkm_object *, 33bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
58 struct nvkm_oclass *, int, void **);
59void _nvkm_fb_dtor(struct nvkm_object *);
60int _nvkm_fb_init(struct nvkm_object *);
61int _nvkm_fb_fini(struct nvkm_object *, bool);
62 34
63struct nvkm_fb_impl { 35void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
64 struct nvkm_oclass base; 36 u32 pitch, u32 flags, struct nvkm_fb_tile *);
65 struct nvkm_oclass *ram; 37void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
66 bool (*memtype)(struct nvkm_fb *, u32); 38void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
67};
68 39
69bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype); 40void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
70bool nv50_fb_memtype_valid(struct nvkm_fb *, u32 memtype); 41 u32 pitch, u32 flags, struct nvkm_fb_tile *);
42void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
43void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
44
45void nv30_fb_init(struct nvkm_fb *);
46void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
47 u32 pitch, u32 flags, struct nvkm_fb_tile *);
48
49void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
50 struct nvkm_fb_tile *);
51
52void nv41_fb_init(struct nvkm_fb *);
53void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
54
55void nv44_fb_init(struct nvkm_fb *);
56void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
57
58void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
59 u32 pitch, u32 flags, struct nvkm_fb_tile *);
71 60
72int nvkm_fb_bios_memtype(struct nvkm_bios *); 61bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
73#endif 62#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
new file mode 100644
index 000000000000..c17d559dbfbe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "ram.h"
25
26int
27nvkm_ram_init(struct nvkm_ram *ram)
28{
29 if (ram->func->init)
30 return ram->func->init(ram);
31 return 0;
32}
33
34void
35nvkm_ram_del(struct nvkm_ram **pram)
36{
37 struct nvkm_ram *ram = *pram;
38 if (ram && !WARN_ON(!ram->func)) {
39 if (ram->func->dtor)
40 *pram = ram->func->dtor(ram);
41 nvkm_mm_fini(&ram->tags);
42 nvkm_mm_fini(&ram->vram);
43 kfree(*pram);
44 *pram = NULL;
45 }
46}
47
48int
49nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
50 enum nvkm_ram_type type, u64 size, u32 tags,
51 struct nvkm_ram *ram)
52{
53 static const char *name[] = {
54 [NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
55 [NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
56 [NVKM_RAM_TYPE_SGRAM ] = "SGRAM",
57 [NVKM_RAM_TYPE_SDRAM ] = "SDRAM",
58 [NVKM_RAM_TYPE_DDR1 ] = "DDR1",
59 [NVKM_RAM_TYPE_DDR2 ] = "DDR2",
60 [NVKM_RAM_TYPE_DDR3 ] = "DDR3",
61 [NVKM_RAM_TYPE_GDDR2 ] = "GDDR2",
62 [NVKM_RAM_TYPE_GDDR3 ] = "GDDR3",
63 [NVKM_RAM_TYPE_GDDR4 ] = "GDDR4",
64 [NVKM_RAM_TYPE_GDDR5 ] = "GDDR5",
65 };
66 struct nvkm_subdev *subdev = &fb->subdev;
67 int ret;
68
69 nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
70 ram->func = func;
71 ram->fb = fb;
72 ram->type = type;
73 ram->size = size;
74
75 if (!nvkm_mm_initialised(&ram->vram)) {
76 ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
77 if (ret)
78 return ret;
79 }
80
81 if (!nvkm_mm_initialised(&ram->tags)) {
82 ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
83 if (ret)
84 return ret;
85
86 nvkm_debug(subdev, "%d compression tags\n", tags);
87 }
88
89 return 0;
90}
91
92int
93nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
94 enum nvkm_ram_type type, u64 size, u32 tags,
95 struct nvkm_ram **pram)
96{
97 if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
98 return -ENOMEM;
99 return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
100}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
new file mode 100644
index 000000000000..f816cbf2ced3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -0,0 +1,50 @@
1#ifndef __NVKM_FB_RAM_PRIV_H__
2#define __NVKM_FB_RAM_PRIV_H__
3#include "priv.h"
4
5int nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
6 enum nvkm_ram_type, u64 size, u32 tags,
7 struct nvkm_ram *);
8int nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
9 enum nvkm_ram_type, u64 size, u32 tags,
10 struct nvkm_ram **);
11void nvkm_ram_del(struct nvkm_ram **);
12int nvkm_ram_init(struct nvkm_ram *);
13
14extern const struct nvkm_ram_func nv04_ram_func;
15
16int nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
17 struct nvkm_ram *);
18int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
19void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
20void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
21
22int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
23 u32, struct nvkm_ram *);
24int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
25void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
26
27int gk104_ram_init(struct nvkm_ram *ram);
28
29/* RAM type-specific MR calculation routines */
30int nvkm_sddr2_calc(struct nvkm_ram *);
31int nvkm_sddr3_calc(struct nvkm_ram *);
32int nvkm_gddr3_calc(struct nvkm_ram *);
33int nvkm_gddr5_calc(struct nvkm_ram *, bool nuts);
34
35int nv04_ram_new(struct nvkm_fb *, struct nvkm_ram **);
36int nv10_ram_new(struct nvkm_fb *, struct nvkm_ram **);
37int nv1a_ram_new(struct nvkm_fb *, struct nvkm_ram **);
38int nv20_ram_new(struct nvkm_fb *, struct nvkm_ram **);
39int nv40_ram_new(struct nvkm_fb *, struct nvkm_ram **);
40int nv41_ram_new(struct nvkm_fb *, struct nvkm_ram **);
41int nv44_ram_new(struct nvkm_fb *, struct nvkm_ram **);
42int nv49_ram_new(struct nvkm_fb *, struct nvkm_ram **);
43int nv4e_ram_new(struct nvkm_fb *, struct nvkm_ram **);
44int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
45int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
46int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
47int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
48int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
49int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
50#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index f343682b1387..9ef9d6aa3721 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,10 +1,11 @@
1#ifndef __NVKM_FBRAM_FUC_H__ 1#ifndef __NVKM_FBRAM_FUC_H__
2#define __NVKM_FBRAM_FUC_H__ 2#define __NVKM_FBRAM_FUC_H__
3#include <subdev/fb.h>
3#include <subdev/pmu.h> 4#include <subdev/pmu.h>
4 5
5struct ramfuc { 6struct ramfuc {
6 struct nvkm_memx *memx; 7 struct nvkm_memx *memx;
7 struct nvkm_fb *pfb; 8 struct nvkm_fb *fb;
8 int sequence; 9 int sequence;
9}; 10};
10 11
@@ -54,17 +55,14 @@ ramfuc_reg(u32 addr)
54} 55}
55 56
56static inline int 57static inline int
57ramfuc_init(struct ramfuc *ram, struct nvkm_fb *pfb) 58ramfuc_init(struct ramfuc *ram, struct nvkm_fb *fb)
58{ 59{
59 struct nvkm_pmu *pmu = nvkm_pmu(pfb); 60 int ret = nvkm_memx_init(fb->subdev.device->pmu, &ram->memx);
60 int ret;
61
62 ret = nvkm_memx_init(pmu, &ram->memx);
63 if (ret) 61 if (ret)
64 return ret; 62 return ret;
65 63
66 ram->sequence++; 64 ram->sequence++;
67 ram->pfb = pfb; 65 ram->fb = fb;
68 return 0; 66 return 0;
69} 67}
70 68
@@ -72,9 +70,9 @@ static inline int
72ramfuc_exec(struct ramfuc *ram, bool exec) 70ramfuc_exec(struct ramfuc *ram, bool exec)
73{ 71{
74 int ret = 0; 72 int ret = 0;
75 if (ram->pfb) { 73 if (ram->fb) {
76 ret = nvkm_memx_fini(&ram->memx, exec); 74 ret = nvkm_memx_fini(&ram->memx, exec);
77 ram->pfb = NULL; 75 ram->fb = NULL;
78 } 76 }
79 return ret; 77 return ret;
80} 78}
@@ -82,8 +80,9 @@ ramfuc_exec(struct ramfuc *ram, bool exec)
82static inline u32 80static inline u32
83ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg) 81ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
84{ 82{
83 struct nvkm_device *device = ram->fb->subdev.device;
85 if (reg->sequence != ram->sequence) 84 if (reg->sequence != ram->sequence)
86 reg->data = nv_rd32(ram->pfb, reg->addr); 85 reg->data = nvkm_rd32(device, reg->addr);
87 return reg->data; 86 return reg->data;
88} 87}
89 88
@@ -144,11 +143,9 @@ ramfuc_train(struct ramfuc *ram)
144} 143}
145 144
146static inline int 145static inline int
147ramfuc_train_result(struct nvkm_fb *pfb, u32 *result, u32 rsize) 146ramfuc_train_result(struct nvkm_fb *fb, u32 *result, u32 rsize)
148{ 147{
149 struct nvkm_pmu *pmu = nvkm_pmu(pfb); 148 return nvkm_memx_train_result(fb->subdev.device->pmu, result, rsize);
150
151 return nvkm_memx_train_result(pmu, result, rsize);
152} 149}
153 150
154static inline void 151static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index de9f39569943..772425ca5a9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -21,10 +21,10 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gf100.h" 24#define gf100_ram(p) container_of((p), struct gf100_ram, base)
25#include "ram.h"
25#include "ramfuc.h" 26#include "ramfuc.h"
26 27
27#include <core/device.h>
28#include <core/option.h> 28#include <core/option.h>
29#include <subdev/bios.h> 29#include <subdev/bios.h>
30#include <subdev/bios/pll.h> 30#include <subdev/bios/pll.h>
@@ -108,9 +108,10 @@ static void
108gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic) 108gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
109{ 109{
110 struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc); 110 struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
111 struct nvkm_fb *pfb = nvkm_fb(ram); 111 struct nvkm_fb *fb = ram->base.fb;
112 u32 part = nv_rd32(pfb, 0x022438), i; 112 struct nvkm_device *device = fb->subdev.device;
113 u32 mask = nv_rd32(pfb, 0x022554); 113 u32 part = nvkm_rd32(device, 0x022438), i;
114 u32 mask = nvkm_rd32(device, 0x022554);
114 u32 addr = 0x110974; 115 u32 addr = 0x110974;
115 116
116 ram_wr32(fuc, 0x10f910, magic); 117 ram_wr32(fuc, 0x10f910, magic);
@@ -124,12 +125,14 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
124} 125}
125 126
126static int 127static int
127gf100_ram_calc(struct nvkm_fb *pfb, u32 freq) 128gf100_ram_calc(struct nvkm_ram *base, u32 freq)
128{ 129{
129 struct nvkm_clk *clk = nvkm_clk(pfb); 130 struct gf100_ram *ram = gf100_ram(base);
130 struct nvkm_bios *bios = nvkm_bios(pfb);
131 struct gf100_ram *ram = (void *)pfb->ram;
132 struct gf100_ramfuc *fuc = &ram->fuc; 131 struct gf100_ramfuc *fuc = &ram->fuc;
132 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
133 struct nvkm_device *device = subdev->device;
134 struct nvkm_clk *clk = device->clk;
135 struct nvkm_bios *bios = device->bios;
133 struct nvbios_ramcfg cfg; 136 struct nvbios_ramcfg cfg;
134 u8 ver, cnt, len, strap; 137 u8 ver, cnt, len, strap;
135 struct { 138 struct {
@@ -145,37 +148,37 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
145 rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size, 148 rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
146 &cnt, &ramcfg.size, &cfg); 149 &cnt, &ramcfg.size, &cfg);
147 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) { 150 if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
148 nv_error(pfb, "invalid/missing rammap entry\n"); 151 nvkm_error(subdev, "invalid/missing rammap entry\n");
149 return -EINVAL; 152 return -EINVAL;
150 } 153 }
151 154
152 /* locate specific data set for the attached memory */ 155 /* locate specific data set for the attached memory */
153 strap = nvbios_ramcfg_index(nv_subdev(pfb)); 156 strap = nvbios_ramcfg_index(subdev);
154 if (strap >= cnt) { 157 if (strap >= cnt) {
155 nv_error(pfb, "invalid ramcfg strap\n"); 158 nvkm_error(subdev, "invalid ramcfg strap\n");
156 return -EINVAL; 159 return -EINVAL;
157 } 160 }
158 161
159 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size); 162 ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
160 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) { 163 if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
161 nv_error(pfb, "invalid/missing ramcfg entry\n"); 164 nvkm_error(subdev, "invalid/missing ramcfg entry\n");
162 return -EINVAL; 165 return -EINVAL;
163 } 166 }
164 167
165 /* lookup memory timings, if bios says they're present */ 168 /* lookup memory timings, if bios says they're present */
166 strap = nv_ro08(bios, ramcfg.data + 0x01); 169 strap = nvbios_rd08(bios, ramcfg.data + 0x01);
167 if (strap != 0xff) { 170 if (strap != 0xff) {
168 timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size, 171 timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
169 &cnt, &len); 172 &cnt, &len);
170 if (!timing.data || ver != 0x10 || timing.size < 0x19) { 173 if (!timing.data || ver != 0x10 || timing.size < 0x19) {
171 nv_error(pfb, "invalid/missing timing entry\n"); 174 nvkm_error(subdev, "invalid/missing timing entry\n");
172 return -EINVAL; 175 return -EINVAL;
173 } 176 }
174 } else { 177 } else {
175 timing.data = 0; 178 timing.data = 0;
176 } 179 }
177 180
178 ret = ram_init(fuc, pfb); 181 ret = ram_init(fuc, ram->base.fb);
179 if (ret) 182 if (ret)
180 return ret; 183 return ret;
181 184
@@ -184,9 +187,9 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
184 187
185 /* determine target mclk configuration */ 188 /* determine target mclk configuration */
186 if (!(ram_rd32(fuc, 0x137300) & 0x00000100)) 189 if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
187 ref = clk->read(clk, nv_clk_src_sppll0); 190 ref = nvkm_clk_read(clk, nv_clk_src_sppll0);
188 else 191 else
189 ref = clk->read(clk, nv_clk_src_sppll1); 192 ref = nvkm_clk_read(clk, nv_clk_src_sppll1);
190 div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2; 193 div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
191 out = (ref * 2) / (div + 2); 194 out = (ref * 2) / (div + 2);
192 mode = freq != out; 195 mode = freq != out;
@@ -210,10 +213,10 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
210 213
211 if (mode == 1 && from == 0) { 214 if (mode == 1 && from == 0) {
212 /* calculate refpll */ 215 /* calculate refpll */
213 ret = gt215_pll_calc(nv_subdev(pfb), &ram->refpll, 216 ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
214 ram->mempll.refclk, &N1, NULL, &M1, &P); 217 &N1, NULL, &M1, &P);
215 if (ret <= 0) { 218 if (ret <= 0) {
216 nv_error(pfb, "unable to calc refpll\n"); 219 nvkm_error(subdev, "unable to calc refpll\n");
217 return ret ? ret : -ERANGE; 220 return ret ? ret : -ERANGE;
218 } 221 }
219 222
@@ -225,10 +228,10 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
225 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000); 228 ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
226 229
227 /* calculate mempll */ 230 /* calculate mempll */
228 ret = gt215_pll_calc(nv_subdev(pfb), &ram->mempll, freq, 231 ret = gt215_pll_calc(subdev, &ram->mempll, freq,
229 &N1, NULL, &M1, &P); 232 &N1, NULL, &M1, &P);
230 if (ret <= 0) { 233 if (ret <= 0) {
231 nv_error(pfb, "unable to calc refpll\n"); 234 nvkm_error(subdev, "unable to calc refpll\n");
232 return ret ? ret : -ERANGE; 235 return ret ? ret : -ERANGE;
233 } 236 }
234 237
@@ -402,49 +405,48 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
402} 405}
403 406
404static int 407static int
405gf100_ram_prog(struct nvkm_fb *pfb) 408gf100_ram_prog(struct nvkm_ram *base)
406{ 409{
407 struct nvkm_device *device = nv_device(pfb); 410 struct gf100_ram *ram = gf100_ram(base);
408 struct gf100_ram *ram = (void *)pfb->ram; 411 struct nvkm_device *device = ram->base.fb->subdev.device;
409 struct gf100_ramfuc *fuc = &ram->fuc; 412 ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
410 ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
411 return 0; 413 return 0;
412} 414}
413 415
414static void 416static void
415gf100_ram_tidy(struct nvkm_fb *pfb) 417gf100_ram_tidy(struct nvkm_ram *base)
416{ 418{
417 struct gf100_ram *ram = (void *)pfb->ram; 419 struct gf100_ram *ram = gf100_ram(base);
418 struct gf100_ramfuc *fuc = &ram->fuc; 420 ram_exec(&ram->fuc, false);
419 ram_exec(fuc, false);
420} 421}
421 422
422extern const u8 gf100_pte_storage_type_map[256]; 423extern const u8 gf100_pte_storage_type_map[256];
423 424
424void 425void
425gf100_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem) 426gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
426{ 427{
427 struct nvkm_ltc *ltc = nvkm_ltc(pfb); 428 struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
428 struct nvkm_mem *mem = *pmem; 429 struct nvkm_mem *mem = *pmem;
429 430
430 *pmem = NULL; 431 *pmem = NULL;
431 if (unlikely(mem == NULL)) 432 if (unlikely(mem == NULL))
432 return; 433 return;
433 434
434 mutex_lock(&pfb->base.mutex); 435 mutex_lock(&ram->fb->subdev.mutex);
435 if (mem->tag) 436 if (mem->tag)
436 ltc->tags_free(ltc, &mem->tag); 437 nvkm_ltc_tags_free(ltc, &mem->tag);
437 __nv50_ram_put(pfb, mem); 438 __nv50_ram_put(ram, mem);
438 mutex_unlock(&pfb->base.mutex); 439 mutex_unlock(&ram->fb->subdev.mutex);
439 440
440 kfree(mem); 441 kfree(mem);
441} 442}
442 443
443int 444int
444gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin, 445gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
445 u32 memtype, struct nvkm_mem **pmem) 446 u32 memtype, struct nvkm_mem **pmem)
446{ 447{
447 struct nvkm_mm *mm = &pfb->vram; 448 struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
449 struct nvkm_mm *mm = &ram->vram;
448 struct nvkm_mm_node *r; 450 struct nvkm_mm_node *r;
449 struct nvkm_mem *mem; 451 struct nvkm_mem *mem;
450 int type = (memtype & 0x0ff); 452 int type = (memtype & 0x0ff);
@@ -452,9 +454,9 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
452 const bool comp = gf100_pte_storage_type_map[type] != type; 454 const bool comp = gf100_pte_storage_type_map[type] != type;
453 int ret; 455 int ret;
454 456
455 size >>= 12; 457 size >>= NVKM_RAM_MM_SHIFT;
456 align >>= 12; 458 align >>= NVKM_RAM_MM_SHIFT;
457 ncmin >>= 12; 459 ncmin >>= NVKM_RAM_MM_SHIFT;
458 if (!ncmin) 460 if (!ncmin)
459 ncmin = size; 461 ncmin = size;
460 462
@@ -465,14 +467,12 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
465 INIT_LIST_HEAD(&mem->regions); 467 INIT_LIST_HEAD(&mem->regions);
466 mem->size = size; 468 mem->size = size;
467 469
468 mutex_lock(&pfb->base.mutex); 470 mutex_lock(&ram->fb->subdev.mutex);
469 if (comp) { 471 if (comp) {
470 struct nvkm_ltc *ltc = nvkm_ltc(pfb);
471
472 /* compression only works with lpages */ 472 /* compression only works with lpages */
473 if (align == (1 << (17 - 12))) { 473 if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
474 int n = size >> 5; 474 int n = size >> 5;
475 ltc->tags_alloc(ltc, n, &mem->tag); 475 nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
476 } 476 }
477 477
478 if (unlikely(!mem->tag)) 478 if (unlikely(!mem->tag))
@@ -486,178 +486,173 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
486 else 486 else
487 ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r); 487 ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
488 if (ret) { 488 if (ret) {
489 mutex_unlock(&pfb->base.mutex); 489 mutex_unlock(&ram->fb->subdev.mutex);
490 pfb->ram->put(pfb, &mem); 490 ram->func->put(ram, &mem);
491 return ret; 491 return ret;
492 } 492 }
493 493
494 list_add_tail(&r->rl_entry, &mem->regions); 494 list_add_tail(&r->rl_entry, &mem->regions);
495 size -= r->length; 495 size -= r->length;
496 } while (size); 496 } while (size);
497 mutex_unlock(&pfb->base.mutex); 497 mutex_unlock(&ram->fb->subdev.mutex);
498 498
499 r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); 499 r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
500 mem->offset = (u64)r->offset << 12; 500 mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
501 *pmem = mem; 501 *pmem = mem;
502 return 0; 502 return 0;
503} 503}
504 504
505int 505static int
506gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine, 506gf100_ram_init(struct nvkm_ram *base)
507 struct nvkm_oclass *oclass, u32 maskaddr, int size,
508 void **pobject)
509{ 507{
510 struct nvkm_fb *pfb = nvkm_fb(parent); 508 static const u8 train0[] = {
511 struct nvkm_bios *bios = nvkm_bios(pfb); 509 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
512 struct nvkm_ram *ram; 510 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
513 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 511 };
514 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 512 static const u32 train1[] = {
515 u32 parts = nv_rd32(pfb, 0x022438); 513 0x00000000, 0xffffffff,
516 u32 pmask = nv_rd32(pfb, maskaddr); 514 0x55555555, 0xaaaaaaaa,
517 u32 bsize = nv_rd32(pfb, 0x10f20c); 515 0x33333333, 0xcccccccc,
518 u32 offset, length; 516 0xf0f0f0f0, 0x0f0f0f0f,
519 bool uniform = true; 517 0x00ff00ff, 0xff00ff00,
520 int ret, part; 518 0x0000ffff, 0xffff0000,
519 };
520 struct gf100_ram *ram = gf100_ram(base);
521 struct nvkm_device *device = ram->base.fb->subdev.device;
522 int i;
521 523
522 ret = nvkm_ram_create_(parent, engine, oclass, size, pobject); 524 switch (ram->base.type) {
523 ram = *pobject; 525 case NVKM_RAM_TYPE_GDDR5:
524 if (ret) 526 break;
525 return ret; 527 default:
528 return 0;
529 }
526 530
527 nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800)); 531 /* prepare for ddr link training, and load training patterns */
528 nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask); 532 for (i = 0; i < 0x30; i++) {
533 nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
534 nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
535 nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
536 nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
537 nvkm_wr32(device, 0x10f918, train1[i % 12]);
538 nvkm_wr32(device, 0x10f91c, train1[i % 12]);
539 nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
540 nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
541 nvkm_wr32(device, 0x10f918, train1[i % 12]);
542 nvkm_wr32(device, 0x10f91c, train1[i % 12]);
543 }
529 544
530 ram->type = nvkm_fb_bios_memtype(bios); 545 return 0;
531 ram->ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1; 546}
547
548static const struct nvkm_ram_func
549gf100_ram_func = {
550 .init = gf100_ram_init,
551 .get = gf100_ram_get,
552 .put = gf100_ram_put,
553 .calc = gf100_ram_calc,
554 .prog = gf100_ram_prog,
555 .tidy = gf100_ram_tidy,
556};
557
558int
559gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
560 u32 maskaddr, struct nvkm_ram *ram)
561{
562 struct nvkm_subdev *subdev = &fb->subdev;
563 struct nvkm_device *device = subdev->device;
564 struct nvkm_bios *bios = device->bios;
565 const u32 rsvd_head = ( 256 * 1024); /* vga memory */
566 const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
567 u32 parts = nvkm_rd32(device, 0x022438);
568 u32 pmask = nvkm_rd32(device, maskaddr);
569 u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
570 u64 psize, size = 0;
571 enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
572 bool uniform = true;
573 int ret, i;
574
575 nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
576 nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
532 577
533 /* read amount of vram attached to each memory controller */ 578 /* read amount of vram attached to each memory controller */
534 for (part = 0; part < parts; part++) { 579 for (i = 0; i < parts; i++) {
535 if (!(pmask & (1 << part))) { 580 if (pmask & (1 << i))
536 u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000)); 581 continue;
537 if (psize != bsize) { 582
538 if (psize < bsize) 583 psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
539 bsize = psize; 584 if (psize != bsize) {
540 uniform = false; 585 if (psize < bsize)
541 } 586 bsize = psize;
542 587 uniform = false;
543 nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
544 ram->size += (u64)psize << 20;
545 } 588 }
589
590 nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
591 size += psize;
546 } 592 }
547 593
594 ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
595 if (ret)
596 return ret;
597
598 nvkm_mm_fini(&ram->vram);
599
548 /* if all controllers have the same amount attached, there's no holes */ 600 /* if all controllers have the same amount attached, there's no holes */
549 if (uniform) { 601 if (uniform) {
550 offset = rsvd_head; 602 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
551 length = (ram->size >> 12) - rsvd_head - rsvd_tail; 603 (size - rsvd_head - rsvd_tail) >>
552 ret = nvkm_mm_init(&pfb->vram, offset, length, 1); 604 NVKM_RAM_MM_SHIFT, 1);
605 if (ret)
606 return ret;
553 } else { 607 } else {
554 /* otherwise, address lowest common amount from 0GiB */ 608 /* otherwise, address lowest common amount from 0GiB */
555 ret = nvkm_mm_init(&pfb->vram, rsvd_head, 609 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
556 (bsize << 8) * parts - rsvd_head, 1); 610 ((bsize * parts) - rsvd_head) >>
611 NVKM_RAM_MM_SHIFT, 1);
557 if (ret) 612 if (ret)
558 return ret; 613 return ret;
559 614
560 /* and the rest starting from (8GiB + common_size) */ 615 /* and the rest starting from (8GiB + common_size) */
561 offset = (0x0200000000ULL >> 12) + (bsize << 8); 616 ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
562 length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail; 617 NVKM_RAM_MM_SHIFT,
563 618 (size - (bsize * parts) - rsvd_tail) >>
564 ret = nvkm_mm_init(&pfb->vram, offset, length, 1); 619 NVKM_RAM_MM_SHIFT, 1);
565 if (ret) 620 if (ret)
566 nvkm_mm_fini(&pfb->vram); 621 return ret;
567 }
568
569 if (ret)
570 return ret;
571
572 ram->get = gf100_ram_get;
573 ram->put = gf100_ram_put;
574 return 0;
575}
576
577static int
578gf100_ram_init(struct nvkm_object *object)
579{
580 struct nvkm_fb *pfb = (void *)object->parent;
581 struct gf100_ram *ram = (void *)object;
582 int ret, i;
583
584 ret = nvkm_ram_init(&ram->base);
585 if (ret)
586 return ret;
587
588 /* prepare for ddr link training, and load training patterns */
589 switch (ram->base.type) {
590 case NV_MEM_TYPE_GDDR5: {
591 static const u8 train0[] = {
592 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
593 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
594 };
595 static const u32 train1[] = {
596 0x00000000, 0xffffffff,
597 0x55555555, 0xaaaaaaaa,
598 0x33333333, 0xcccccccc,
599 0xf0f0f0f0, 0x0f0f0f0f,
600 0x00ff00ff, 0xff00ff00,
601 0x0000ffff, 0xffff0000,
602 };
603
604 for (i = 0; i < 0x30; i++) {
605 nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
606 nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
607 nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
608 nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
609 nv_wr32(pfb, 0x10f918, train1[i % 12]);
610 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
611 nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
612 nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
613 nv_wr32(pfb, 0x10f918, train1[i % 12]);
614 nv_wr32(pfb, 0x10f91c, train1[i % 12]);
615 }
616 } break;
617 default:
618 break;
619 } 622 }
620 623
624 ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
621 return 0; 625 return 0;
622} 626}
623 627
624static int 628int
625gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 629gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
626 struct nvkm_oclass *oclass, void *data, u32 size,
627 struct nvkm_object **pobject)
628{ 630{
629 struct nvkm_bios *bios = nvkm_bios(parent); 631 struct nvkm_subdev *subdev = &fb->subdev;
632 struct nvkm_bios *bios = subdev->device->bios;
630 struct gf100_ram *ram; 633 struct gf100_ram *ram;
631 int ret; 634 int ret;
632 635
633 ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram); 636 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
634 *pobject = nv_object(ram); 637 return -ENOMEM;
638 *pram = &ram->base;
639
640 ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
635 if (ret) 641 if (ret)
636 return ret; 642 return ret;
637 643
638 ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll); 644 ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
639 if (ret) { 645 if (ret) {
640 nv_error(ram, "mclk refpll data not found\n"); 646 nvkm_error(subdev, "mclk refpll data not found\n");
641 return ret; 647 return ret;
642 } 648 }
643 649
644 ret = nvbios_pll_parse(bios, 0x04, &ram->mempll); 650 ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
645 if (ret) { 651 if (ret) {
646 nv_error(ram, "mclk pll data not found\n"); 652 nvkm_error(subdev, "mclk pll data not found\n");
647 return ret; 653 return ret;
648 } 654 }
649 655
650 switch (ram->base.type) {
651 case NV_MEM_TYPE_GDDR5:
652 ram->base.calc = gf100_ram_calc;
653 ram->base.prog = gf100_ram_prog;
654 ram->base.tidy = gf100_ram_tidy;
655 break;
656 default:
657 nv_warn(ram, "reclocking of this ram type unsupported\n");
658 return 0;
659 }
660
661 ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20); 656 ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
662 ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24); 657 ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
663 ram->fuc.r_0x137320 = ramfuc_reg(0x137320); 658 ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
@@ -718,14 +713,3 @@ gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
718 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4); 713 ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
719 return 0; 714 return 0;
720} 715}
721
722struct nvkm_oclass
723gf100_ram_oclass = {
724 .handle = 0,
725 .ofuncs = &(struct nvkm_ofuncs) {
726 .ctor = gf100_ram_ctor,
727 .dtor = _nvkm_ram_dtor,
728 .init = gf100_ram_init,
729 .fini = _nvkm_ram_fini,
730 }
731};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1ef15c3e6a81..989355622aac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -21,10 +21,10 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#define gk104_ram(p) container_of((p), struct gk104_ram, base)
25#include "ram.h"
24#include "ramfuc.h" 26#include "ramfuc.h"
25#include "gf100.h"
26 27
27#include <core/device.h>
28#include <core/option.h> 28#include <core/option.h>
29#include <subdev/bios.h> 29#include <subdev/bios.h>
30#include <subdev/bios/init.h> 30#include <subdev/bios/init.h>
@@ -229,8 +229,9 @@ static void
229gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg, 229gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
230 u32 _mask, u32 _data, u32 _copy) 230 u32 _mask, u32 _data, u32 _copy)
231{ 231{
232 struct gk104_fb_priv *priv = (void *)nvkm_fb(ram); 232 struct nvkm_fb *fb = ram->base.fb;
233 struct ramfuc *fuc = &ram->fuc.base; 233 struct ramfuc *fuc = &ram->fuc.base;
234 struct nvkm_device *device = fb->subdev.device;
234 u32 addr = 0x110000 + (reg->addr & 0xfff); 235 u32 addr = 0x110000 + (reg->addr & 0xfff);
235 u32 mask = _mask | _copy; 236 u32 mask = _mask | _copy;
236 u32 data = (_data & _mask) | (reg->data & _copy); 237 u32 data = (_data & _mask) | (reg->data & _copy);
@@ -238,7 +239,7 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
238 239
239 for (i = 0; i < 16; i++, addr += 0x1000) { 240 for (i = 0; i < 16; i++, addr += 0x1000) {
240 if (ram->pnuts & (1 << i)) { 241 if (ram->pnuts & (1 << i)) {
241 u32 prev = nv_rd32(priv, addr); 242 u32 prev = nvkm_rd32(device, addr);
242 u32 next = (prev & ~mask) | data; 243 u32 next = (prev & ~mask) | data;
243 nvkm_memx_wr32(fuc->memx, addr, next); 244 nvkm_memx_wr32(fuc->memx, addr, next);
244 } 245 }
@@ -248,9 +249,8 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
248 gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c)) 249 gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
249 250
250static int 251static int
251gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq) 252gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
252{ 253{
253 struct gk104_ram *ram = (void *)pfb->ram;
254 struct gk104_ramfuc *fuc = &ram->fuc; 254 struct gk104_ramfuc *fuc = &ram->fuc;
255 struct nvkm_ram_data *next = ram->base.next; 255 struct nvkm_ram_data *next = ram->base.next;
256 int vc = !next->bios.ramcfg_11_02_08; 256 int vc = !next->bios.ramcfg_11_02_08;
@@ -674,9 +674,8 @@ gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
674 ******************************************************************************/ 674 ******************************************************************************/
675 675
676static int 676static int
677gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq) 677gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
678{ 678{
679 struct gk104_ram *ram = (void *)pfb->ram;
680 struct gk104_ramfuc *fuc = &ram->fuc; 679 struct gk104_ramfuc *fuc = &ram->fuc;
681 const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1); 680 const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
682 const u32 runk0 = ram->fN1 << 16; 681 const u32 runk0 = ram->fN1 << 16;
@@ -926,9 +925,9 @@ gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
926 ******************************************************************************/ 925 ******************************************************************************/
927 926
928static int 927static int
929gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data) 928gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data)
930{ 929{
931 struct gk104_ram *ram = (void *)pfb->ram; 930 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
932 struct nvkm_ram_data *cfg; 931 struct nvkm_ram_data *cfg;
933 u32 mhz = khz / 1000; 932 u32 mhz = khz / 1000;
934 933
@@ -941,19 +940,19 @@ gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
941 } 940 }
942 } 941 }
943 942
944 nv_error(ram, "ramcfg data for %dMHz not found\n", mhz); 943 nvkm_error(subdev, "ramcfg data for %dMHz not found\n", mhz);
945 return -EINVAL; 944 return -EINVAL;
946} 945}
947 946
948static int 947static int
949gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next) 948gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
950{ 949{
951 struct gk104_ram *ram = (void *)pfb->ram;
952 struct gk104_ramfuc *fuc = &ram->fuc; 950 struct gk104_ramfuc *fuc = &ram->fuc;
951 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
953 int refclk, i; 952 int refclk, i;
954 int ret; 953 int ret;
955 954
956 ret = ram_init(fuc, pfb); 955 ret = ram_init(fuc, ram->base.fb);
957 if (ret) 956 if (ret)
958 return ret; 957 return ret;
959 958
@@ -973,11 +972,11 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
973 refclk = fuc->mempll.refclk; 972 refclk = fuc->mempll.refclk;
974 973
975 /* calculate refpll coefficients */ 974 /* calculate refpll coefficients */
976 ret = gt215_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1, 975 ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
977 &ram->fN1, &ram->M1, &ram->P1); 976 &ram->fN1, &ram->M1, &ram->P1);
978 fuc->mempll.refclk = ret; 977 fuc->mempll.refclk = ret;
979 if (ret <= 0) { 978 if (ret <= 0) {
980 nv_error(pfb, "unable to calc refpll\n"); 979 nvkm_error(subdev, "unable to calc refpll\n");
981 return -EINVAL; 980 return -EINVAL;
982 } 981 }
983 982
@@ -990,10 +989,10 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
990 fuc->mempll.min_p = 1; 989 fuc->mempll.min_p = 1;
991 fuc->mempll.max_p = 2; 990 fuc->mempll.max_p = 2;
992 991
993 ret = gt215_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq, 992 ret = gt215_pll_calc(subdev, &fuc->mempll, next->freq,
994 &ram->N2, NULL, &ram->M2, &ram->P2); 993 &ram->N2, NULL, &ram->M2, &ram->P2);
995 if (ret <= 0) { 994 if (ret <= 0) {
996 nv_error(pfb, "unable to calc mempll\n"); 995 nvkm_error(subdev, "unable to calc mempll\n");
997 return -EINVAL; 996 return -EINVAL;
998 } 997 }
999 } 998 }
@@ -1005,15 +1004,15 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
1005 ram->base.freq = next->freq; 1004 ram->base.freq = next->freq;
1006 1005
1007 switch (ram->base.type) { 1006 switch (ram->base.type) {
1008 case NV_MEM_TYPE_DDR3: 1007 case NVKM_RAM_TYPE_DDR3:
1009 ret = nvkm_sddr3_calc(&ram->base); 1008 ret = nvkm_sddr3_calc(&ram->base);
1010 if (ret == 0) 1009 if (ret == 0)
1011 ret = gk104_ram_calc_sddr3(pfb, next->freq); 1010 ret = gk104_ram_calc_sddr3(ram, next->freq);
1012 break; 1011 break;
1013 case NV_MEM_TYPE_GDDR5: 1012 case NVKM_RAM_TYPE_GDDR5:
1014 ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0); 1013 ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
1015 if (ret == 0) 1014 if (ret == 0)
1016 ret = gk104_ram_calc_gddr5(pfb, next->freq); 1015 ret = gk104_ram_calc_gddr5(ram, next->freq);
1017 break; 1016 break;
1018 default: 1017 default:
1019 ret = -ENOSYS; 1018 ret = -ENOSYS;
@@ -1024,21 +1023,22 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
1024} 1023}
1025 1024
1026static int 1025static int
1027gk104_ram_calc(struct nvkm_fb *pfb, u32 freq) 1026gk104_ram_calc(struct nvkm_ram *base, u32 freq)
1028{ 1027{
1029 struct nvkm_clk *clk = nvkm_clk(pfb); 1028 struct gk104_ram *ram = gk104_ram(base);
1030 struct gk104_ram *ram = (void *)pfb->ram; 1029 struct nvkm_clk *clk = ram->base.fb->subdev.device->clk;
1031 struct nvkm_ram_data *xits = &ram->base.xition; 1030 struct nvkm_ram_data *xits = &ram->base.xition;
1032 struct nvkm_ram_data *copy; 1031 struct nvkm_ram_data *copy;
1033 int ret; 1032 int ret;
1034 1033
1035 if (ram->base.next == NULL) { 1034 if (ram->base.next == NULL) {
1036 ret = gk104_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem), 1035 ret = gk104_ram_calc_data(ram,
1036 nvkm_clk_read(clk, nv_clk_src_mem),
1037 &ram->base.former); 1037 &ram->base.former);
1038 if (ret) 1038 if (ret)
1039 return ret; 1039 return ret;
1040 1040
1041 ret = gk104_ram_calc_data(pfb, freq, &ram->base.target); 1041 ret = gk104_ram_calc_data(ram, freq, &ram->base.target);
1042 if (ret) 1042 if (ret)
1043 return ret; 1043 return ret;
1044 1044
@@ -1062,13 +1062,13 @@ gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
1062 ram->base.next = &ram->base.target; 1062 ram->base.next = &ram->base.target;
1063 } 1063 }
1064 1064
1065 return gk104_ram_calc_xits(pfb, ram->base.next); 1065 return gk104_ram_calc_xits(ram, ram->base.next);
1066} 1066}
1067 1067
1068static void 1068static void
1069gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq) 1069gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
1070{ 1070{
1071 struct gk104_ram *ram = (void *)pfb->ram; 1071 struct nvkm_device *device = ram->base.fb->subdev.device;
1072 struct nvkm_ram_data *cfg; 1072 struct nvkm_ram_data *cfg;
1073 u32 mhz = freq / 1000; 1073 u32 mhz = freq / 1000;
1074 u32 mask, data; 1074 u32 mask, data;
@@ -1090,31 +1090,31 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
1090 data |= cfg->bios.rammap_11_09_01ff; 1090 data |= cfg->bios.rammap_11_09_01ff;
1091 mask |= 0x000001ff; 1091 mask |= 0x000001ff;
1092 } 1092 }
1093 nv_mask(pfb, 0x10f468, mask, data); 1093 nvkm_mask(device, 0x10f468, mask, data);
1094 1094
1095 if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) { 1095 if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) {
1096 data |= cfg->bios.rammap_11_0a_0400; 1096 data |= cfg->bios.rammap_11_0a_0400;
1097 mask |= 0x00000001; 1097 mask |= 0x00000001;
1098 } 1098 }
1099 nv_mask(pfb, 0x10f420, mask, data); 1099 nvkm_mask(device, 0x10f420, mask, data);
1100 1100
1101 if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) { 1101 if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) {
1102 data |= cfg->bios.rammap_11_0a_0800; 1102 data |= cfg->bios.rammap_11_0a_0800;
1103 mask |= 0x00000001; 1103 mask |= 0x00000001;
1104 } 1104 }
1105 nv_mask(pfb, 0x10f430, mask, data); 1105 nvkm_mask(device, 0x10f430, mask, data);
1106 1106
1107 if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) { 1107 if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) {
1108 data |= cfg->bios.rammap_11_0b_01f0; 1108 data |= cfg->bios.rammap_11_0b_01f0;
1109 mask |= 0x0000001f; 1109 mask |= 0x0000001f;
1110 } 1110 }
1111 nv_mask(pfb, 0x10f400, mask, data); 1111 nvkm_mask(device, 0x10f400, mask, data);
1112 1112
1113 if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) { 1113 if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) {
1114 data |= cfg->bios.rammap_11_0b_0200 << 9; 1114 data |= cfg->bios.rammap_11_0b_0200 << 9;
1115 mask |= 0x00000200; 1115 mask |= 0x00000200;
1116 } 1116 }
1117 nv_mask(pfb, 0x10f410, mask, data); 1117 nvkm_mask(device, 0x10f410, mask, data);
1118 1118
1119 if (mask = 0, data = 0, ram->diff.rammap_11_0d) { 1119 if (mask = 0, data = 0, ram->diff.rammap_11_0d) {
1120 data |= cfg->bios.rammap_11_0d << 16; 1120 data |= cfg->bios.rammap_11_0d << 16;
@@ -1124,7 +1124,7 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
1124 data |= cfg->bios.rammap_11_0f << 8; 1124 data |= cfg->bios.rammap_11_0f << 8;
1125 mask |= 0x0000ff00; 1125 mask |= 0x0000ff00;
1126 } 1126 }
1127 nv_mask(pfb, 0x10f440, mask, data); 1127 nvkm_mask(device, 0x10f440, mask, data);
1128 1128
1129 if (mask = 0, data = 0, ram->diff.rammap_11_0e) { 1129 if (mask = 0, data = 0, ram->diff.rammap_11_0e) {
1130 data |= cfg->bios.rammap_11_0e << 8; 1130 data |= cfg->bios.rammap_11_0e << 8;
@@ -1138,15 +1138,15 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
1138 data |= cfg->bios.rammap_11_0b_0400 << 5; 1138 data |= cfg->bios.rammap_11_0b_0400 << 5;
1139 mask |= 0x00000020; 1139 mask |= 0x00000020;
1140 } 1140 }
1141 nv_mask(pfb, 0x10f444, mask, data); 1141 nvkm_mask(device, 0x10f444, mask, data);
1142} 1142}
1143 1143
1144static int 1144static int
1145gk104_ram_prog(struct nvkm_fb *pfb) 1145gk104_ram_prog(struct nvkm_ram *base)
1146{ 1146{
1147 struct nvkm_device *device = nv_device(pfb); 1147 struct gk104_ram *ram = gk104_ram(base);
1148 struct gk104_ram *ram = (void *)pfb->ram;
1149 struct gk104_ramfuc *fuc = &ram->fuc; 1148 struct gk104_ramfuc *fuc = &ram->fuc;
1149 struct nvkm_device *device = ram->base.fb->subdev.device;
1150 struct nvkm_ram_data *next = ram->base.next; 1150 struct nvkm_ram_data *next = ram->base.next;
1151 1151
1152 if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) { 1152 if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) {
@@ -1154,20 +1154,19 @@ gk104_ram_prog(struct nvkm_fb *pfb)
1154 return (ram->base.next == &ram->base.xition); 1154 return (ram->base.next == &ram->base.xition);
1155 } 1155 }
1156 1156
1157 gk104_ram_prog_0(pfb, 1000); 1157 gk104_ram_prog_0(ram, 1000);
1158 ram_exec(fuc, true); 1158 ram_exec(fuc, true);
1159 gk104_ram_prog_0(pfb, next->freq); 1159 gk104_ram_prog_0(ram, next->freq);
1160 1160
1161 return (ram->base.next == &ram->base.xition); 1161 return (ram->base.next == &ram->base.xition);
1162} 1162}
1163 1163
1164static void 1164static void
1165gk104_ram_tidy(struct nvkm_fb *pfb) 1165gk104_ram_tidy(struct nvkm_ram *base)
1166{ 1166{
1167 struct gk104_ram *ram = (void *)pfb->ram; 1167 struct gk104_ram *ram = gk104_ram(base);
1168 struct gk104_ramfuc *fuc = &ram->fuc;
1169 ram->base.next = NULL; 1168 ram->base.next = NULL;
1170 ram_exec(fuc, false); 1169 ram_exec(&ram->fuc, false);
1171} 1170}
1172 1171
1173struct gk104_ram_train { 1172struct gk104_ram_train {
@@ -1183,10 +1182,10 @@ struct gk104_ram_train {
1183}; 1182};
1184 1183
1185static int 1184static int
1186gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg, 1185gk104_ram_train_type(struct nvkm_ram *ram, int i, u8 ramcfg,
1187 struct gk104_ram_train *train) 1186 struct gk104_ram_train *train)
1188{ 1187{
1189 struct nvkm_bios *bios = nvkm_bios(pfb); 1188 struct nvkm_bios *bios = ram->fb->subdev.device->bios;
1190 struct nvbios_M0205E M0205E; 1189 struct nvbios_M0205E M0205E;
1191 struct nvbios_M0205S M0205S; 1190 struct nvbios_M0205S M0205S;
1192 struct nvbios_M0209E M0209E; 1191 struct nvbios_M0209E M0209E;
@@ -1244,33 +1243,35 @@ gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
1244} 1243}
1245 1244
1246static int 1245static int
1247gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train) 1246gk104_ram_train_init_0(struct nvkm_ram *ram, struct gk104_ram_train *train)
1248{ 1247{
1248 struct nvkm_subdev *subdev = &ram->fb->subdev;
1249 struct nvkm_device *device = subdev->device;
1249 int i, j; 1250 int i, j;
1250 1251
1251 if ((train->mask & 0x03d3) != 0x03d3) { 1252 if ((train->mask & 0x03d3) != 0x03d3) {
1252 nv_warn(pfb, "missing link training data\n"); 1253 nvkm_warn(subdev, "missing link training data\n");
1253 return -EINVAL; 1254 return -EINVAL;
1254 } 1255 }
1255 1256
1256 for (i = 0; i < 0x30; i++) { 1257 for (i = 0; i < 0x30; i++) {
1257 for (j = 0; j < 8; j += 4) { 1258 for (j = 0; j < 8; j += 4) {
1258 nv_wr32(pfb, 0x10f968 + j, 0x00000000 | (i << 8)); 1259 nvkm_wr32(device, 0x10f968 + j, 0x00000000 | (i << 8));
1259 nv_wr32(pfb, 0x10f920 + j, 0x00000000 | 1260 nvkm_wr32(device, 0x10f920 + j, 0x00000000 |
1260 train->type08.data[i] << 4 | 1261 train->type08.data[i] << 4 |
1261 train->type06.data[i]); 1262 train->type06.data[i]);
1262 nv_wr32(pfb, 0x10f918 + j, train->type00.data[i]); 1263 nvkm_wr32(device, 0x10f918 + j, train->type00.data[i]);
1263 nv_wr32(pfb, 0x10f920 + j, 0x00000100 | 1264 nvkm_wr32(device, 0x10f920 + j, 0x00000100 |
1264 train->type09.data[i] << 4 | 1265 train->type09.data[i] << 4 |
1265 train->type07.data[i]); 1266 train->type07.data[i]);
1266 nv_wr32(pfb, 0x10f918 + j, train->type01.data[i]); 1267 nvkm_wr32(device, 0x10f918 + j, train->type01.data[i]);
1267 } 1268 }
1268 } 1269 }
1269 1270
1270 for (j = 0; j < 8; j += 4) { 1271 for (j = 0; j < 8; j += 4) {
1271 for (i = 0; i < 0x100; i++) { 1272 for (i = 0; i < 0x100; i++) {
1272 nv_wr32(pfb, 0x10f968 + j, i); 1273 nvkm_wr32(device, 0x10f968 + j, i);
1273 nv_wr32(pfb, 0x10f900 + j, train->type04.data[i]); 1274 nvkm_wr32(device, 0x10f900 + j, train->type04.data[i]);
1274 } 1275 }
1275 } 1276 }
1276 1277
@@ -1278,23 +1279,24 @@ gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
1278} 1279}
1279 1280
1280static int 1281static int
1281gk104_ram_train_init(struct nvkm_fb *pfb) 1282gk104_ram_train_init(struct nvkm_ram *ram)
1282{ 1283{
1283 u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb)); 1284 u8 ramcfg = nvbios_ramcfg_index(&ram->fb->subdev);
1284 struct gk104_ram_train *train; 1285 struct gk104_ram_train *train;
1285 int ret = -ENOMEM, i; 1286 int ret, i;
1286 1287
1287 if ((train = kzalloc(sizeof(*train), GFP_KERNEL))) { 1288 if (!(train = kzalloc(sizeof(*train), GFP_KERNEL)))
1288 for (i = 0; i < 0x100; i++) { 1289 return -ENOMEM;
1289 ret = gk104_ram_train_type(pfb, i, ramcfg, train); 1290
1290 if (ret && ret != -ENOENT) 1291 for (i = 0; i < 0x100; i++) {
1291 break; 1292 ret = gk104_ram_train_type(ram, i, ramcfg, train);
1292 } 1293 if (ret && ret != -ENOENT)
1294 break;
1293 } 1295 }
1294 1296
1295 switch (pfb->ram->type) { 1297 switch (ram->type) {
1296 case NV_MEM_TYPE_GDDR5: 1298 case NVKM_RAM_TYPE_GDDR5:
1297 ret = gk104_ram_train_init_0(pfb, train); 1299 ret = gk104_ram_train_init_0(ram, train);
1298 break; 1300 break;
1299 default: 1301 default:
1300 ret = 0; 1302 ret = 0;
@@ -1306,18 +1308,14 @@ gk104_ram_train_init(struct nvkm_fb *pfb)
1306} 1308}
1307 1309
1308int 1310int
1309gk104_ram_init(struct nvkm_object *object) 1311gk104_ram_init(struct nvkm_ram *ram)
1310{ 1312{
1311 struct nvkm_fb *pfb = (void *)object->parent; 1313 struct nvkm_subdev *subdev = &ram->fb->subdev;
1312 struct gk104_ram *ram = (void *)object; 1314 struct nvkm_device *device = subdev->device;
1313 struct nvkm_bios *bios = nvkm_bios(pfb); 1315 struct nvkm_bios *bios = device->bios;
1314 u8 ver, hdr, cnt, len, snr, ssz; 1316 u8 ver, hdr, cnt, len, snr, ssz;
1315 u32 data, save; 1317 u32 data, save;
1316 int ret, i; 1318 int i;
1317
1318 ret = nvkm_ram_init(&ram->base);
1319 if (ret)
1320 return ret;
1321 1319
1322 /* run a bunch of tables from rammap table. there's actually 1320 /* run a bunch of tables from rammap table. there's actually
1323 * individual pointers for each rammap entry too, but, nvidia 1321 * individual pointers for each rammap entry too, but, nvidia
@@ -1334,33 +1332,32 @@ gk104_ram_init(struct nvkm_object *object)
1334 if (!data || hdr < 0x15) 1332 if (!data || hdr < 0x15)
1335 return -EINVAL; 1333 return -EINVAL;
1336 1334
1337 cnt = nv_ro08(bios, data + 0x14); /* guess at count */ 1335 cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
1338 data = nv_ro32(bios, data + 0x10); /* guess u32... */ 1336 data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
1339 save = nv_rd32(pfb, 0x10f65c) & 0x000000f0; 1337 save = nvkm_rd32(device, 0x10f65c) & 0x000000f0;
1340 for (i = 0; i < cnt; i++, data += 4) { 1338 for (i = 0; i < cnt; i++, data += 4) {
1341 if (i != save >> 4) { 1339 if (i != save >> 4) {
1342 nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4); 1340 nvkm_mask(device, 0x10f65c, 0x000000f0, i << 4);
1343 nvbios_exec(&(struct nvbios_init) { 1341 nvbios_exec(&(struct nvbios_init) {
1344 .subdev = nv_subdev(pfb), 1342 .subdev = subdev,
1345 .bios = bios, 1343 .bios = bios,
1346 .offset = nv_ro32(bios, data), 1344 .offset = nvbios_rd32(bios, data),
1347 .execute = 1, 1345 .execute = 1,
1348 }); 1346 });
1349 } 1347 }
1350 } 1348 }
1351 nv_mask(pfb, 0x10f65c, 0x000000f0, save); 1349 nvkm_mask(device, 0x10f65c, 0x000000f0, save);
1352 nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000); 1350 nvkm_mask(device, 0x10f584, 0x11000000, 0x00000000);
1353 nv_wr32(pfb, 0x10ecc0, 0xffffffff); 1351 nvkm_wr32(device, 0x10ecc0, 0xffffffff);
1354 nv_mask(pfb, 0x10f160, 0x00000010, 0x00000010); 1352 nvkm_mask(device, 0x10f160, 0x00000010, 0x00000010);
1355 1353
1356 return gk104_ram_train_init(pfb); 1354 return gk104_ram_train_init(ram);
1357} 1355}
1358 1356
1359static int 1357static int
1360gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i) 1358gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
1361{ 1359{
1362 struct nvkm_fb *pfb = (void *)nv_object(ram)->parent; 1360 struct nvkm_bios *bios = ram->base.fb->subdev.device->bios;
1363 struct nvkm_bios *bios = nvkm_bios(pfb);
1364 struct nvkm_ram_data *cfg; 1361 struct nvkm_ram_data *cfg;
1365 struct nvbios_ramcfg *d = &ram->diff; 1362 struct nvbios_ramcfg *d = &ram->diff;
1366 struct nvbios_ramcfg *p, *n; 1363 struct nvbios_ramcfg *p, *n;
@@ -1426,63 +1423,64 @@ done:
1426 return ret; 1423 return ret;
1427} 1424}
1428 1425
1429static void 1426static void *
1430gk104_ram_dtor(struct nvkm_object *object) 1427gk104_ram_dtor(struct nvkm_ram *base)
1431{ 1428{
1432 struct gk104_ram *ram = (void *)object; 1429 struct gk104_ram *ram = gk104_ram(base);
1433 struct nvkm_ram_data *cfg, *tmp; 1430 struct nvkm_ram_data *cfg, *tmp;
1434 1431
1435 list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) { 1432 list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
1436 kfree(cfg); 1433 kfree(cfg);
1437 } 1434 }
1438 1435
1439 nvkm_ram_destroy(&ram->base); 1436 return ram;
1440} 1437}
1441 1438
1442static int 1439static const struct nvkm_ram_func
1443gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1440gk104_ram_func = {
1444 struct nvkm_oclass *oclass, void *data, u32 size, 1441 .dtor = gk104_ram_dtor,
1445 struct nvkm_object **pobject) 1442 .init = gk104_ram_init,
1443 .get = gf100_ram_get,
1444 .put = gf100_ram_put,
1445 .calc = gk104_ram_calc,
1446 .prog = gk104_ram_prog,
1447 .tidy = gk104_ram_tidy,
1448};
1449
1450int
1451gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
1446{ 1452{
1447 struct nvkm_fb *pfb = nvkm_fb(parent); 1453 struct nvkm_subdev *subdev = &fb->subdev;
1448 struct nvkm_bios *bios = nvkm_bios(pfb); 1454 struct nvkm_device *device = subdev->device;
1449 struct nvkm_gpio *gpio = nvkm_gpio(pfb); 1455 struct nvkm_bios *bios = device->bios;
1456 struct nvkm_gpio *gpio = device->gpio;
1450 struct dcb_gpio_func func; 1457 struct dcb_gpio_func func;
1451 struct gk104_ram *ram; 1458 struct gk104_ram *ram;
1452 int ret, i; 1459 int ret, i;
1453 u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb)); 1460 u8 ramcfg = nvbios_ramcfg_index(subdev);
1454 u32 tmp; 1461 u32 tmp;
1455 1462
1456 ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram); 1463 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
1457 *pobject = nv_object(ram); 1464 return -ENOMEM;
1465 *pram = &ram->base;
1466
1467 ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
1458 if (ret) 1468 if (ret)
1459 return ret; 1469 return ret;
1460 1470
1461 INIT_LIST_HEAD(&ram->cfg); 1471 INIT_LIST_HEAD(&ram->cfg);
1462 1472
1463 switch (ram->base.type) {
1464 case NV_MEM_TYPE_DDR3:
1465 case NV_MEM_TYPE_GDDR5:
1466 ram->base.calc = gk104_ram_calc;
1467 ram->base.prog = gk104_ram_prog;
1468 ram->base.tidy = gk104_ram_tidy;
1469 break;
1470 default:
1471 nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
1472 break;
1473 }
1474
1475 /* calculate a mask of differently configured memory partitions, 1473 /* calculate a mask of differently configured memory partitions,
1476 * because, of course reclocking wasn't complicated enough 1474 * because, of course reclocking wasn't complicated enough
1477 * already without having to treat some of them differently to 1475 * already without having to treat some of them differently to
1478 * the others.... 1476 * the others....
1479 */ 1477 */
1480 ram->parts = nv_rd32(pfb, 0x022438); 1478 ram->parts = nvkm_rd32(device, 0x022438);
1481 ram->pmask = nv_rd32(pfb, 0x022554); 1479 ram->pmask = nvkm_rd32(device, 0x022554);
1482 ram->pnuts = 0; 1480 ram->pnuts = 0;
1483 for (i = 0, tmp = 0; i < ram->parts; i++) { 1481 for (i = 0, tmp = 0; i < ram->parts; i++) {
1484 if (!(ram->pmask & (1 << i))) { 1482 if (!(ram->pmask & (1 << i))) {
1485 u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000)); 1483 u32 cfg1 = nvkm_rd32(device, 0x110204 + (i * 0x1000));
1486 if (tmp && tmp != cfg1) { 1484 if (tmp && tmp != cfg1) {
1487 ram->pnuts |= (1 << i); 1485 ram->pnuts |= (1 << i);
1488 continue; 1486 continue;
@@ -1505,7 +1503,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1505 for (i = 0; !ret; i++) { 1503 for (i = 0; !ret; i++) {
1506 ret = gk104_ram_ctor_data(ram, ramcfg, i); 1504 ret = gk104_ram_ctor_data(ram, ramcfg, i);
1507 if (ret && ret != -ENOENT) { 1505 if (ret && ret != -ENOENT) {
1508 nv_error(pfb, "failed to parse ramcfg data\n"); 1506 nvkm_error(subdev, "failed to parse ramcfg data\n");
1509 return ret; 1507 return ret;
1510 } 1508 }
1511 } 1509 }
@@ -1513,25 +1511,25 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1513 /* parse bios data for both pll's */ 1511 /* parse bios data for both pll's */
1514 ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll); 1512 ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
1515 if (ret) { 1513 if (ret) {
1516 nv_error(pfb, "mclk refpll data not found\n"); 1514 nvkm_error(subdev, "mclk refpll data not found\n");
1517 return ret; 1515 return ret;
1518 } 1516 }
1519 1517
1520 ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll); 1518 ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
1521 if (ret) { 1519 if (ret) {
1522 nv_error(pfb, "mclk pll data not found\n"); 1520 nvkm_error(subdev, "mclk pll data not found\n");
1523 return ret; 1521 return ret;
1524 } 1522 }
1525 1523
1526 /* lookup memory voltage gpios */ 1524 /* lookup memory voltage gpios */
1527 ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func); 1525 ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
1528 if (ret == 0) { 1526 if (ret == 0) {
1529 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04)); 1527 ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
1530 ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12; 1528 ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
1531 ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12; 1529 ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
1532 } 1530 }
1533 1531
1534 ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func); 1532 ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
1535 if (ret == 0) { 1533 if (ret == 0) {
1536 ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04)); 1534 ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
1537 ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12; 1535 ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
@@ -1588,7 +1586,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1588 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914); 1586 ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
1589 1587
1590 switch (ram->base.type) { 1588 switch (ram->base.type) {
1591 case NV_MEM_TYPE_GDDR5: 1589 case NVKM_RAM_TYPE_GDDR5:
1592 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300); 1590 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
1593 ram->fuc.r_mr[1] = ramfuc_reg(0x10f330); 1591 ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
1594 ram->fuc.r_mr[2] = ramfuc_reg(0x10f334); 1592 ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
@@ -1600,7 +1598,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1600 ram->fuc.r_mr[8] = ramfuc_reg(0x10f354); 1598 ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
1601 ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c); 1599 ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
1602 break; 1600 break;
1603 case NV_MEM_TYPE_DDR3: 1601 case NVKM_RAM_TYPE_DDR3:
1604 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300); 1602 ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
1605 ram->fuc.r_mr[2] = ramfuc_reg(0x10f320); 1603 ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
1606 break; 1604 break;
@@ -1626,14 +1624,3 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1626 ram->fuc.r_0x100750 = ramfuc_reg(0x100750); 1624 ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
1627 return 0; 1625 return 0;
1628} 1626}
1629
1630struct nvkm_oclass
1631gk104_ram_oclass = {
1632 .handle = 0,
1633 .ofuncs = &(struct nvkm_ofuncs) {
1634 .ctor = gk104_ram_ctor,
1635 .dtor = gk104_ram_dtor,
1636 .init = gk104_ram_init,
1637 .fini = _nvkm_ram_fini,
1638 }
1639};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index a298b39f55c5..43d807f6ca71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -21,35 +21,20 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gf100.h" 24#include "ram.h"
25 25
26struct gm107_ram { 26static const struct nvkm_ram_func
27 struct nvkm_ram base; 27gm107_ram_func = {
28 .init = gk104_ram_init,
29 .get = gf100_ram_get,
30 .put = gf100_ram_put,
28}; 31};
29 32
30static int 33int
31gm107_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 34gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
32 struct nvkm_oclass *oclass, void *data, u32 size,
33 struct nvkm_object **pobject)
34{ 35{
35 struct gm107_ram *ram; 36 if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
36 int ret; 37 return -ENOMEM;
37 38
38 ret = gf100_ram_create(parent, engine, oclass, 0x021c14, &ram); 39 return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
39 *pobject = nv_object(ram);
40 if (ret)
41 return ret;
42
43 return 0;
44} 40}
45
46struct nvkm_oclass
47gm107_ram_oclass = {
48 .handle = 0,
49 .ofuncs = &(struct nvkm_ofuncs) {
50 .ctor = gm107_ram_ctor,
51 .dtor = _nvkm_ram_dtor,
52 .init = gk104_ram_init,
53 .fini = _nvkm_ram_fini,
54 }
55};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index 24176401b49b..5c08ae8023fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -22,11 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 * Roy Spliet <rspliet@eclipso.eu> 23 * Roy Spliet <rspliet@eclipso.eu>
24 */ 24 */
25 25#define gt215_ram(p) container_of((p), struct gt215_ram, base)
26#include "ram.h"
26#include "ramfuc.h" 27#include "ramfuc.h"
27#include "nv50.h"
28 28
29#include <core/device.h>
30#include <core/option.h> 29#include <core/option.h>
31#include <subdev/bios.h> 30#include <subdev/bios.h>
32#include <subdev/bios/M0205.h> 31#include <subdev/bios/M0205.h>
@@ -154,14 +153,14 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
154 * Link training for (at least) DDR3 153 * Link training for (at least) DDR3
155 */ 154 */
156int 155int
157gt215_link_train(struct nvkm_fb *pfb) 156gt215_link_train(struct gt215_ram *ram)
158{ 157{
159 struct nvkm_bios *bios = nvkm_bios(pfb);
160 struct gt215_ram *ram = (void *)pfb->ram;
161 struct nvkm_clk *clk = nvkm_clk(pfb);
162 struct gt215_ltrain *train = &ram->ltrain; 158 struct gt215_ltrain *train = &ram->ltrain;
163 struct nvkm_device *device = nv_device(pfb);
164 struct gt215_ramfuc *fuc = &ram->fuc; 159 struct gt215_ramfuc *fuc = &ram->fuc;
160 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
161 struct nvkm_device *device = subdev->device;
162 struct nvkm_bios *bios = device->bios;
163 struct nvkm_clk *clk = device->clk;
165 u32 *result, r1700; 164 u32 *result, r1700;
166 int ret, i; 165 int ret, i;
167 struct nvbios_M0205T M0205T = { 0 }; 166 struct nvbios_M0205T M0205T = { 0 };
@@ -182,27 +181,29 @@ gt215_link_train(struct nvkm_fb *pfb)
182 181
183 /* Clock speeds for training and back */ 182 /* Clock speeds for training and back */
184 nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T); 183 nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
185 if (M0205T.freq == 0) 184 if (M0205T.freq == 0) {
185 kfree(result);
186 return -ENOENT; 186 return -ENOENT;
187 }
187 188
188 clk_current = clk->read(clk, nv_clk_src_mem); 189 clk_current = nvkm_clk_read(clk, nv_clk_src_mem);
189 190
190 ret = gt215_clk_pre(clk, f); 191 ret = gt215_clk_pre(clk, f);
191 if (ret) 192 if (ret)
192 goto out; 193 goto out;
193 194
194 /* First: clock up/down */ 195 /* First: clock up/down */
195 ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000); 196 ret = ram->base.func->calc(&ram->base, (u32) M0205T.freq * 1000);
196 if (ret) 197 if (ret)
197 goto out; 198 goto out;
198 199
199 /* Do this *after* calc, eliminates write in script */ 200 /* Do this *after* calc, eliminates write in script */
200 nv_wr32(pfb, 0x111400, 0x00000000); 201 nvkm_wr32(device, 0x111400, 0x00000000);
201 /* XXX: Magic writes that improve train reliability? */ 202 /* XXX: Magic writes that improve train reliability? */
202 nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000); 203 nvkm_mask(device, 0x100674, 0x0000ffff, 0x00000000);
203 nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000); 204 nvkm_mask(device, 0x1005e4, 0x0000ffff, 0x00000000);
204 nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000); 205 nvkm_mask(device, 0x100b0c, 0x000000ff, 0x00000000);
205 nv_wr32(pfb, 0x100c04, 0x00000400); 206 nvkm_wr32(device, 0x100c04, 0x00000400);
206 207
207 /* Now the training script */ 208 /* Now the training script */
208 r1700 = ram_rd32(fuc, 0x001700); 209 r1700 = ram_rd32(fuc, 0x001700);
@@ -235,22 +236,22 @@ gt215_link_train(struct nvkm_fb *pfb)
235 236
236 ram_exec(fuc, true); 237 ram_exec(fuc, true);
237 238
238 ram->base.calc(pfb, clk_current); 239 ram->base.func->calc(&ram->base, clk_current);
239 ram_exec(fuc, true); 240 ram_exec(fuc, true);
240 241
241 /* Post-processing, avoids flicker */ 242 /* Post-processing, avoids flicker */
242 nv_mask(pfb, 0x616308, 0x10, 0x10); 243 nvkm_mask(device, 0x616308, 0x10, 0x10);
243 nv_mask(pfb, 0x616b08, 0x10, 0x10); 244 nvkm_mask(device, 0x616b08, 0x10, 0x10);
244 245
245 gt215_clk_post(clk, f); 246 gt215_clk_post(clk, f);
246 247
247 ram_train_result(pfb, result, 64); 248 ram_train_result(ram->base.fb, result, 64);
248 for (i = 0; i < 64; i++) 249 for (i = 0; i < 64; i++)
249 nv_debug(pfb, "Train: %08x", result[i]); 250 nvkm_debug(subdev, "Train: %08x", result[i]);
250 gt215_link_train_calc(result, train); 251 gt215_link_train_calc(result, train);
251 252
252 nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720, 253 nvkm_debug(subdev, "Train: %08x %08x %08x", train->r_100720,
253 train->r_1111e0, train->r_111400); 254 train->r_1111e0, train->r_111400);
254 255
255 kfree(result); 256 kfree(result);
256 257
@@ -265,11 +266,12 @@ out:
265 train->state = NVA3_TRAIN_UNSUPPORTED; 266 train->state = NVA3_TRAIN_UNSUPPORTED;
266 267
267 gt215_clk_post(clk, f); 268 gt215_clk_post(clk, f);
269 kfree(result);
268 return ret; 270 return ret;
269} 271}
270 272
271int 273int
272gt215_link_train_init(struct nvkm_fb *pfb) 274gt215_link_train_init(struct gt215_ram *ram)
273{ 275{
274 static const u32 pattern[16] = { 276 static const u32 pattern[16] = {
275 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee, 277 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
@@ -277,9 +279,9 @@ gt215_link_train_init(struct nvkm_fb *pfb)
277 0x33333333, 0x55555555, 0x77777777, 0x66666666, 279 0x33333333, 0x55555555, 0x77777777, 0x66666666,
278 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb, 280 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
279 }; 281 };
280 struct nvkm_bios *bios = nvkm_bios(pfb);
281 struct gt215_ram *ram = (void *)pfb->ram;
282 struct gt215_ltrain *train = &ram->ltrain; 282 struct gt215_ltrain *train = &ram->ltrain;
283 struct nvkm_device *device = ram->base.fb->subdev.device;
284 struct nvkm_bios *bios = device->bios;
283 struct nvkm_mem *mem; 285 struct nvkm_mem *mem;
284 struct nvbios_M0205E M0205E; 286 struct nvbios_M0205E M0205E;
285 u8 ver, hdr, cnt, len; 287 u8 ver, hdr, cnt, len;
@@ -298,48 +300,47 @@ gt215_link_train_init(struct nvkm_fb *pfb)
298 300
299 train->state = NVA3_TRAIN_ONCE; 301 train->state = NVA3_TRAIN_ONCE;
300 302
301 ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem); 303 ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
304 &ram->ltrain.mem);
302 if (ret) 305 if (ret)
303 return ret; 306 return ret;
304 307
305 mem = ram->ltrain.mem; 308 mem = ram->ltrain.mem;
306 309
307 nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16)); 310 nvkm_wr32(device, 0x100538, 0x10000000 | (mem->offset >> 16));
308 nv_wr32(pfb, 0x1005a8, 0x0000ffff); 311 nvkm_wr32(device, 0x1005a8, 0x0000ffff);
309 nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001); 312 nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
310 313
311 for (i = 0; i < 0x30; i++) { 314 for (i = 0; i < 0x30; i++) {
312 nv_wr32(pfb, 0x10f8c0, (i << 8) | i); 315 nvkm_wr32(device, 0x10f8c0, (i << 8) | i);
313 nv_wr32(pfb, 0x10f900, pattern[i % 16]); 316 nvkm_wr32(device, 0x10f900, pattern[i % 16]);
314 } 317 }
315 318
316 for (i = 0; i < 0x30; i++) { 319 for (i = 0; i < 0x30; i++) {
317 nv_wr32(pfb, 0x10f8e0, (i << 8) | i); 320 nvkm_wr32(device, 0x10f8e0, (i << 8) | i);
318 nv_wr32(pfb, 0x10f920, pattern[i % 16]); 321 nvkm_wr32(device, 0x10f920, pattern[i % 16]);
319 } 322 }
320 323
321 /* And upload the pattern */ 324 /* And upload the pattern */
322 r001700 = nv_rd32(pfb, 0x1700); 325 r001700 = nvkm_rd32(device, 0x1700);
323 nv_wr32(pfb, 0x1700, mem->offset >> 16); 326 nvkm_wr32(device, 0x1700, mem->offset >> 16);
324 for (i = 0; i < 16; i++) 327 for (i = 0; i < 16; i++)
325 nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]); 328 nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
326 for (i = 0; i < 16; i++) 329 for (i = 0; i < 16; i++)
327 nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]); 330 nvkm_wr32(device, 0x700100 + (i << 2), pattern[i]);
328 nv_wr32(pfb, 0x1700, r001700); 331 nvkm_wr32(device, 0x1700, r001700);
329 332
330 train->r_100720 = nv_rd32(pfb, 0x100720); 333 train->r_100720 = nvkm_rd32(device, 0x100720);
331 train->r_1111e0 = nv_rd32(pfb, 0x1111e0); 334 train->r_1111e0 = nvkm_rd32(device, 0x1111e0);
332 train->r_111400 = nv_rd32(pfb, 0x111400); 335 train->r_111400 = nvkm_rd32(device, 0x111400);
333 return 0; 336 return 0;
334} 337}
335 338
336void 339void
337gt215_link_train_fini(struct nvkm_fb *pfb) 340gt215_link_train_fini(struct gt215_ram *ram)
338{ 341{
339 struct gt215_ram *ram = (void *)pfb->ram;
340
341 if (ram->ltrain.mem) 342 if (ram->ltrain.mem)
342 pfb->ram->put(pfb, &ram->ltrain.mem); 343 ram->base.func->put(&ram->base, &ram->ltrain.mem);
343} 344}
344 345
345/* 346/*
@@ -347,24 +348,25 @@ gt215_link_train_fini(struct nvkm_fb *pfb)
347 */ 348 */
348#define T(t) cfg->timing_10_##t 349#define T(t) cfg->timing_10_##t
349static int 350static int
350gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing) 351gt215_ram_timing_calc(struct gt215_ram *ram, u32 *timing)
351{ 352{
352 struct gt215_ram *ram = (void *)pfb->ram;
353 struct nvbios_ramcfg *cfg = &ram->base.target.bios; 353 struct nvbios_ramcfg *cfg = &ram->base.target.bios;
354 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
355 struct nvkm_device *device = subdev->device;
354 int tUNK_base, tUNK_40_0, prevCL; 356 int tUNK_base, tUNK_40_0, prevCL;
355 u32 cur2, cur3, cur7, cur8; 357 u32 cur2, cur3, cur7, cur8;
356 358
357 cur2 = nv_rd32(pfb, 0x100228); 359 cur2 = nvkm_rd32(device, 0x100228);
358 cur3 = nv_rd32(pfb, 0x10022c); 360 cur3 = nvkm_rd32(device, 0x10022c);
359 cur7 = nv_rd32(pfb, 0x10023c); 361 cur7 = nvkm_rd32(device, 0x10023c);
360 cur8 = nv_rd32(pfb, 0x100240); 362 cur8 = nvkm_rd32(device, 0x100240);
361 363
362 364
363 switch ((!T(CWL)) * ram->base.type) { 365 switch ((!T(CWL)) * ram->base.type) {
364 case NV_MEM_TYPE_DDR2: 366 case NVKM_RAM_TYPE_DDR2:
365 T(CWL) = T(CL) - 1; 367 T(CWL) = T(CL) - 1;
366 break; 368 break;
367 case NV_MEM_TYPE_GDDR3: 369 case NVKM_RAM_TYPE_GDDR3:
368 T(CWL) = ((cur2 & 0xff000000) >> 24) + 1; 370 T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
369 break; 371 break;
370 } 372 }
@@ -402,8 +404,8 @@ gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
402 timing[8] = cur8 & 0xffffff00; 404 timing[8] = cur8 & 0xffffff00;
403 405
404 switch (ram->base.type) { 406 switch (ram->base.type) {
405 case NV_MEM_TYPE_DDR2: 407 case NVKM_RAM_TYPE_DDR2:
406 case NV_MEM_TYPE_GDDR3: 408 case NVKM_RAM_TYPE_GDDR3:
407 tUNK_40_0 = prevCL - (cur8 & 0xff); 409 tUNK_40_0 = prevCL - (cur8 & 0xff);
408 if (tUNK_40_0 > 0) 410 if (tUNK_40_0 > 0)
409 timing[8] |= T(CL); 411 timing[8] |= T(CL);
@@ -412,11 +414,11 @@ gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
412 break; 414 break;
413 } 415 }
414 416
415 nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n", 417 nvkm_debug(subdev, "Entry: 220: %08x %08x %08x %08x\n",
416 timing[0], timing[1], timing[2], timing[3]); 418 timing[0], timing[1], timing[2], timing[3]);
417 nv_debug(pfb, " 230: %08x %08x %08x %08x\n", 419 nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
418 timing[4], timing[5], timing[6], timing[7]); 420 timing[4], timing[5], timing[6], timing[7]);
419 nv_debug(pfb, " 240: %08x\n", timing[8]); 421 nvkm_debug(subdev, " 240: %08x\n", timing[8]);
420 return 0; 422 return 0;
421} 423}
422#undef T 424#undef T
@@ -466,13 +468,13 @@ gt215_ram_lock_pll(struct gt215_ramfuc *fuc, struct gt215_clk_info *mclk)
466static void 468static void
467gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val) 469gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
468{ 470{
469 struct nvkm_gpio *gpio = nvkm_gpio(fuc->base.pfb); 471 struct nvkm_gpio *gpio = fuc->base.fb->subdev.device->gpio;
470 struct dcb_gpio_func func; 472 struct dcb_gpio_func func;
471 u32 reg, sh, gpio_val; 473 u32 reg, sh, gpio_val;
472 int ret; 474 int ret;
473 475
474 if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) { 476 if (nvkm_gpio_get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
475 ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func); 477 ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
476 if (ret) 478 if (ret)
477 return; 479 return;
478 480
@@ -487,12 +489,14 @@ gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
487} 489}
488 490
489static int 491static int
490gt215_ram_calc(struct nvkm_fb *pfb, u32 freq) 492gt215_ram_calc(struct nvkm_ram *base, u32 freq)
491{ 493{
492 struct nvkm_bios *bios = nvkm_bios(pfb); 494 struct gt215_ram *ram = gt215_ram(base);
493 struct gt215_ram *ram = (void *)pfb->ram;
494 struct gt215_ramfuc *fuc = &ram->fuc; 495 struct gt215_ramfuc *fuc = &ram->fuc;
495 struct gt215_ltrain *train = &ram->ltrain; 496 struct gt215_ltrain *train = &ram->ltrain;
497 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
498 struct nvkm_device *device = subdev->device;
499 struct nvkm_bios *bios = device->bios;
496 struct gt215_clk_info mclk; 500 struct gt215_clk_info mclk;
497 struct nvkm_ram_data *next; 501 struct nvkm_ram_data *next;
498 u8 ver, hdr, cnt, len, strap; 502 u8 ver, hdr, cnt, len, strap;
@@ -508,28 +512,27 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
508 ram->base.next = next; 512 ram->base.next = next;
509 513
510 if (ram->ltrain.state == NVA3_TRAIN_ONCE) 514 if (ram->ltrain.state == NVA3_TRAIN_ONCE)
511 gt215_link_train(pfb); 515 gt215_link_train(ram);
512 516
513 /* lookup memory config data relevant to the target frequency */ 517 /* lookup memory config data relevant to the target frequency */
514 i = 0;
515 data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len, 518 data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
516 &next->bios); 519 &next->bios);
517 if (!data || ver != 0x10 || hdr < 0x05) { 520 if (!data || ver != 0x10 || hdr < 0x05) {
518 nv_error(pfb, "invalid/missing rammap entry\n"); 521 nvkm_error(subdev, "invalid/missing rammap entry\n");
519 return -EINVAL; 522 return -EINVAL;
520 } 523 }
521 524
522 /* locate specific data set for the attached memory */ 525 /* locate specific data set for the attached memory */
523 strap = nvbios_ramcfg_index(nv_subdev(pfb)); 526 strap = nvbios_ramcfg_index(subdev);
524 if (strap >= cnt) { 527 if (strap >= cnt) {
525 nv_error(pfb, "invalid ramcfg strap\n"); 528 nvkm_error(subdev, "invalid ramcfg strap\n");
526 return -EINVAL; 529 return -EINVAL;
527 } 530 }
528 531
529 data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap, 532 data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
530 &ver, &hdr, &next->bios); 533 &ver, &hdr, &next->bios);
531 if (!data || ver != 0x10 || hdr < 0x09) { 534 if (!data || ver != 0x10 || hdr < 0x09) {
532 nv_error(pfb, "invalid/missing ramcfg entry\n"); 535 nvkm_error(subdev, "invalid/missing ramcfg entry\n");
533 return -EINVAL; 536 return -EINVAL;
534 } 537 }
535 538
@@ -539,20 +542,20 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
539 &ver, &hdr, &cnt, &len, 542 &ver, &hdr, &cnt, &len,
540 &next->bios); 543 &next->bios);
541 if (!data || ver != 0x10 || hdr < 0x17) { 544 if (!data || ver != 0x10 || hdr < 0x17) {
542 nv_error(pfb, "invalid/missing timing entry\n"); 545 nvkm_error(subdev, "invalid/missing timing entry\n");
543 return -EINVAL; 546 return -EINVAL;
544 } 547 }
545 } 548 }
546 549
547 ret = gt215_pll_info(nvkm_clk(pfb), 0x12, 0x4000, freq, &mclk); 550 ret = gt215_pll_info(device->clk, 0x12, 0x4000, freq, &mclk);
548 if (ret < 0) { 551 if (ret < 0) {
549 nv_error(pfb, "failed mclk calculation\n"); 552 nvkm_error(subdev, "failed mclk calculation\n");
550 return ret; 553 return ret;
551 } 554 }
552 555
553 gt215_ram_timing_calc(pfb, timing); 556 gt215_ram_timing_calc(ram, timing);
554 557
555 ret = ram_init(fuc, pfb); 558 ret = ram_init(fuc, ram->base.fb);
556 if (ret) 559 if (ret)
557 return ret; 560 return ret;
558 561
@@ -562,13 +565,13 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
562 ram->base.mr[2] = ram_rd32(fuc, mr[2]); 565 ram->base.mr[2] = ram_rd32(fuc, mr[2]);
563 566
564 switch (ram->base.type) { 567 switch (ram->base.type) {
565 case NV_MEM_TYPE_DDR2: 568 case NVKM_RAM_TYPE_DDR2:
566 ret = nvkm_sddr2_calc(&ram->base); 569 ret = nvkm_sddr2_calc(&ram->base);
567 break; 570 break;
568 case NV_MEM_TYPE_DDR3: 571 case NVKM_RAM_TYPE_DDR3:
569 ret = nvkm_sddr3_calc(&ram->base); 572 ret = nvkm_sddr3_calc(&ram->base);
570 break; 573 break;
571 case NV_MEM_TYPE_GDDR3: 574 case NVKM_RAM_TYPE_GDDR3:
572 ret = nvkm_gddr3_calc(&ram->base); 575 ret = nvkm_gddr3_calc(&ram->base);
573 break; 576 break;
574 default: 577 default:
@@ -579,7 +582,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
579 if (ret) 582 if (ret)
580 return ret; 583 return ret;
581 584
582 /* XXX: where the fuck does 750MHz come from? */ 585 /* XXX: 750MHz seems rather arbitrary */
583 if (freq <= 750000) { 586 if (freq <= 750000) {
584 r004018 = 0x10000000; 587 r004018 = 0x10000000;
585 r100760 = 0x22222222; 588 r100760 = 0x22222222;
@@ -590,7 +593,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
590 r100da0 = 0x00000000; 593 r100da0 = 0x00000000;
591 } 594 }
592 595
593 if (!next->bios.ramcfg_10_DLLoff) 596 if (!next->bios.ramcfg_DLLoff)
594 r004018 |= 0x00004000; 597 r004018 |= 0x00004000;
595 598
596 /* pll2pll requires to switch to a safe clock first */ 599 /* pll2pll requires to switch to a safe clock first */
@@ -623,18 +626,18 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
623 ram_nsec(fuc, 2000); 626 ram_nsec(fuc, 2000);
624 627
625 if (!next->bios.ramcfg_10_02_10) { 628 if (!next->bios.ramcfg_10_02_10) {
626 if (ram->base.type == NV_MEM_TYPE_GDDR3) 629 if (ram->base.type == NVKM_RAM_TYPE_GDDR3)
627 ram_mask(fuc, 0x111100, 0x04020000, 0x00020000); 630 ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
628 else 631 else
629 ram_mask(fuc, 0x111100, 0x04020000, 0x04020000); 632 ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
630 } 633 }
631 634
632 /* If we're disabling the DLL, do it now */ 635 /* If we're disabling the DLL, do it now */
633 switch (next->bios.ramcfg_10_DLLoff * ram->base.type) { 636 switch (next->bios.ramcfg_DLLoff * ram->base.type) {
634 case NV_MEM_TYPE_DDR3: 637 case NVKM_RAM_TYPE_DDR3:
635 nvkm_sddr3_dll_disable(fuc, ram->base.mr); 638 nvkm_sddr3_dll_disable(fuc, ram->base.mr);
636 break; 639 break;
637 case NV_MEM_TYPE_GDDR3: 640 case NVKM_RAM_TYPE_GDDR3:
638 nvkm_gddr3_dll_disable(fuc, ram->base.mr); 641 nvkm_gddr3_dll_disable(fuc, ram->base.mr);
639 break; 642 break;
640 } 643 }
@@ -650,7 +653,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
650 ram_wr32(fuc, 0x1002dc, 0x00000001); 653 ram_wr32(fuc, 0x1002dc, 0x00000001);
651 ram_nsec(fuc, 2000); 654 ram_nsec(fuc, 2000);
652 655
653 if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000) 656 if (device->chipset == 0xa3 && freq <= 500000)
654 ram_mask(fuc, 0x100700, 0x00000006, 0x00000006); 657 ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
655 658
656 /* Fiddle with clocks */ 659 /* Fiddle with clocks */
@@ -708,7 +711,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
708 ram_mask(fuc, 0x1007e0, 0x22222222, r100760); 711 ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
709 } 712 }
710 713
711 if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) { 714 if (device->chipset == 0xa3 && freq > 500000) {
712 ram_mask(fuc, 0x100700, 0x00000006, 0x00000000); 715 ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
713 } 716 }
714 717
@@ -752,11 +755,11 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
752 755
753 if (next->bios.ramcfg_10_02_04) { 756 if (next->bios.ramcfg_10_02_04) {
754 switch (ram->base.type) { 757 switch (ram->base.type) {
755 case NV_MEM_TYPE_DDR3: 758 case NVKM_RAM_TYPE_DDR3:
756 if (nv_device(pfb)->chipset != 0xa8) 759 if (device->chipset != 0xa8)
757 r111100 |= 0x00000004; 760 r111100 |= 0x00000004;
758 /* no break */ 761 /* no break */
759 case NV_MEM_TYPE_DDR2: 762 case NVKM_RAM_TYPE_DDR2:
760 r111100 |= 0x08000000; 763 r111100 |= 0x08000000;
761 break; 764 break;
762 default: 765 default:
@@ -764,12 +767,12 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
764 } 767 }
765 } else { 768 } else {
766 switch (ram->base.type) { 769 switch (ram->base.type) {
767 case NV_MEM_TYPE_DDR2: 770 case NVKM_RAM_TYPE_DDR2:
768 r111100 |= 0x1a800000; 771 r111100 |= 0x1a800000;
769 unk714 |= 0x00000010; 772 unk714 |= 0x00000010;
770 break; 773 break;
771 case NV_MEM_TYPE_DDR3: 774 case NVKM_RAM_TYPE_DDR3:
772 if (nv_device(pfb)->chipset == 0xa8) { 775 if (device->chipset == 0xa8) {
773 r111100 |= 0x08000000; 776 r111100 |= 0x08000000;
774 } else { 777 } else {
775 r111100 &= ~0x00000004; 778 r111100 &= ~0x00000004;
@@ -777,7 +780,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
777 } 780 }
778 unk714 |= 0x00000010; 781 unk714 |= 0x00000010;
779 break; 782 break;
780 case NV_MEM_TYPE_GDDR3: 783 case NVKM_RAM_TYPE_GDDR3:
781 r111100 |= 0x30000000; 784 r111100 |= 0x30000000;
782 unk714 |= 0x00000020; 785 unk714 |= 0x00000020;
783 break; 786 break;
@@ -810,16 +813,16 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
810 gt215_ram_fbvref(fuc, 1); 813 gt215_ram_fbvref(fuc, 1);
811 814
812 /* Reset DLL */ 815 /* Reset DLL */
813 if (!next->bios.ramcfg_10_DLLoff) 816 if (!next->bios.ramcfg_DLLoff)
814 nvkm_sddr2_dll_reset(fuc); 817 nvkm_sddr2_dll_reset(fuc);
815 818
816 if (ram->base.type == NV_MEM_TYPE_GDDR3) { 819 if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
817 ram_nsec(fuc, 31000); 820 ram_nsec(fuc, 31000);
818 } else { 821 } else {
819 ram_nsec(fuc, 14000); 822 ram_nsec(fuc, 14000);
820 } 823 }
821 824
822 if (ram->base.type == NV_MEM_TYPE_DDR3) { 825 if (ram->base.type == NVKM_RAM_TYPE_DDR3) {
823 ram_wr32(fuc, 0x100264, 0x1); 826 ram_wr32(fuc, 0x100264, 0x1);
824 ram_nsec(fuc, 2000); 827 ram_nsec(fuc, 2000);
825 } 828 }
@@ -855,24 +858,24 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
855} 858}
856 859
857static int 860static int
858gt215_ram_prog(struct nvkm_fb *pfb) 861gt215_ram_prog(struct nvkm_ram *base)
859{ 862{
860 struct nvkm_device *device = nv_device(pfb); 863 struct gt215_ram *ram = gt215_ram(base);
861 struct gt215_ram *ram = (void *)pfb->ram;
862 struct gt215_ramfuc *fuc = &ram->fuc; 864 struct gt215_ramfuc *fuc = &ram->fuc;
865 struct nvkm_device *device = ram->base.fb->subdev.device;
863 bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true); 866 bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
864 867
865 if (exec) { 868 if (exec) {
866 nv_mask(pfb, 0x001534, 0x2, 0x2); 869 nvkm_mask(device, 0x001534, 0x2, 0x2);
867 870
868 ram_exec(fuc, true); 871 ram_exec(fuc, true);
869 872
870 /* Post-processing, avoids flicker */ 873 /* Post-processing, avoids flicker */
871 nv_mask(pfb, 0x002504, 0x1, 0x0); 874 nvkm_mask(device, 0x002504, 0x1, 0x0);
872 nv_mask(pfb, 0x001534, 0x2, 0x0); 875 nvkm_mask(device, 0x001534, 0x2, 0x0);
873 876
874 nv_mask(pfb, 0x616308, 0x10, 0x10); 877 nvkm_mask(device, 0x616308, 0x10, 0x10);
875 nv_mask(pfb, 0x616b08, 0x10, 0x10); 878 nvkm_mask(device, 0x616b08, 0x10, 0x10);
876 } else { 879 } else {
877 ram_exec(fuc, false); 880 ram_exec(fuc, false);
878 } 881 }
@@ -880,69 +883,56 @@ gt215_ram_prog(struct nvkm_fb *pfb)
880} 883}
881 884
882static void 885static void
883gt215_ram_tidy(struct nvkm_fb *pfb) 886gt215_ram_tidy(struct nvkm_ram *base)
884{ 887{
885 struct gt215_ram *ram = (void *)pfb->ram; 888 struct gt215_ram *ram = gt215_ram(base);
886 struct gt215_ramfuc *fuc = &ram->fuc; 889 ram_exec(&ram->fuc, false);
887 ram_exec(fuc, false);
888} 890}
889 891
890static int 892static int
891gt215_ram_init(struct nvkm_object *object) 893gt215_ram_init(struct nvkm_ram *base)
892{ 894{
893 struct nvkm_fb *pfb = (void *)object->parent; 895 struct gt215_ram *ram = gt215_ram(base);
894 struct gt215_ram *ram = (void *)object; 896 gt215_link_train_init(ram);
895 int ret;
896
897 ret = nvkm_ram_init(&ram->base);
898 if (ret)
899 return ret;
900
901 gt215_link_train_init(pfb);
902 return 0; 897 return 0;
903} 898}
904 899
905static int 900static void *
906gt215_ram_fini(struct nvkm_object *object, bool suspend) 901gt215_ram_dtor(struct nvkm_ram *base)
907{ 902{
908 struct nvkm_fb *pfb = (void *)object->parent; 903 struct gt215_ram *ram = gt215_ram(base);
909 904 gt215_link_train_fini(ram);
910 if (!suspend) 905 return ram;
911 gt215_link_train_fini(pfb);
912
913 return 0;
914} 906}
915 907
916static int 908static const struct nvkm_ram_func
917gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 909gt215_ram_func = {
918 struct nvkm_oclass *oclass, void *data, u32 datasize, 910 .dtor = gt215_ram_dtor,
919 struct nvkm_object **pobject) 911 .init = gt215_ram_init,
912 .get = nv50_ram_get,
913 .put = nv50_ram_put,
914 .calc = gt215_ram_calc,
915 .prog = gt215_ram_prog,
916 .tidy = gt215_ram_tidy,
917};
918
919int
920gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
920{ 921{
921 struct nvkm_fb *pfb = nvkm_fb(parent); 922 struct nvkm_gpio *gpio = fb->subdev.device->gpio;
922 struct nvkm_gpio *gpio = nvkm_gpio(pfb);
923 struct dcb_gpio_func func; 923 struct dcb_gpio_func func;
924 struct gt215_ram *ram; 924 struct gt215_ram *ram;
925 int ret, i;
926 u32 reg, shift; 925 u32 reg, shift;
926 int ret, i;
927
928 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
929 return -ENOMEM;
930 *pram = &ram->base;
927 931
928 ret = nv50_ram_create(parent, engine, oclass, &ram); 932 ret = nv50_ram_ctor(&gt215_ram_func, fb, &ram->base);
929 *pobject = nv_object(ram);
930 if (ret) 933 if (ret)
931 return ret; 934 return ret;
932 935
933 switch (ram->base.type) {
934 case NV_MEM_TYPE_DDR2:
935 case NV_MEM_TYPE_DDR3:
936 case NV_MEM_TYPE_GDDR3:
937 ram->base.calc = gt215_ram_calc;
938 ram->base.prog = gt215_ram_prog;
939 ram->base.tidy = gt215_ram_tidy;
940 break;
941 default:
942 nv_warn(ram, "reclocking of this ram type unsupported\n");
943 return 0;
944 }
945
946 ram->fuc.r_0x001610 = ramfuc_reg(0x001610); 936 ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
947 ram->fuc.r_0x001700 = ramfuc_reg(0x001700); 937 ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
948 ram->fuc.r_0x002504 = ramfuc_reg(0x002504); 938 ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
@@ -992,7 +982,7 @@ gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
992 ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4); 982 ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
993 } 983 }
994 984
995 ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func); 985 ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
996 if (ret == 0) { 986 if (ret == 0) {
997 nv50_gpio_location(func.line, &reg, &shift); 987 nv50_gpio_location(func.line, &reg, &shift);
998 ram->fuc.r_gpioFBVREF = ramfuc_reg(reg); 988 ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
@@ -1000,13 +990,3 @@ gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1000 990
1001 return 0; 991 return 0;
1002} 992}
1003
1004struct nvkm_oclass
1005gt215_ram_oclass = {
1006 .ofuncs = &(struct nvkm_ofuncs) {
1007 .ctor = gt215_ram_ctor,
1008 .dtor = _nvkm_ram_dtor,
1009 .init = gt215_ram_init,
1010 .fini = gt215_ram_fini,
1011 },
1012};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index abc18e89a97c..0a0e44b75577 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -21,81 +21,67 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#define mcp77_ram(p) container_of((p), struct mcp77_ram, base)
25#include "ram.h"
25 26
26struct mcp77_ram_priv { 27struct mcp77_ram {
27 struct nvkm_ram base; 28 struct nvkm_ram base;
28 u64 poller_base; 29 u64 poller_base;
29}; 30};
30 31
31static int 32static int
32mcp77_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 33mcp77_ram_init(struct nvkm_ram *base)
33 struct nvkm_oclass *oclass, void *data, u32 datasize,
34 struct nvkm_object **pobject)
35{ 34{
36 u32 rsvd_head = ( 256 * 1024); /* vga memory */ 35 struct mcp77_ram *ram = mcp77_ram(base);
37 u32 rsvd_tail = (1024 * 1024); /* vbios etc */ 36 struct nvkm_device *device = ram->base.fb->subdev.device;
38 struct nvkm_fb *pfb = nvkm_fb(parent); 37 u32 dniso = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
39 struct mcp77_ram_priv *priv; 38 u32 hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
40 int ret; 39 u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
41
42 ret = nvkm_ram_create(parent, engine, oclass, &priv);
43 *pobject = nv_object(priv);
44 if (ret)
45 return ret;
46
47 priv->base.type = NV_MEM_TYPE_STOLEN;
48 priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
49 priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
50 40
51 rsvd_tail += 0x1000; 41 /* Enable NISO poller for various clients and set their associated
52 priv->poller_base = priv->base.size - rsvd_tail; 42 * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
53 43 */
54 ret = nvkm_mm_init(&pfb->vram, rsvd_head >> 12, 44 nvkm_wr32(device, 0x100c18, dniso);
55 (priv->base.size - (rsvd_head + rsvd_tail)) >> 12, 45 nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001);
56 1); 46 nvkm_wr32(device, 0x100c1c, hostnb);
57 if (ret) 47 nvkm_mask(device, 0x100c14, 0x00000000, 0x00000002);
58 return ret; 48 nvkm_wr32(device, 0x100c24, flush);
59 49 nvkm_mask(device, 0x100c14, 0x00000000, 0x00010000);
60 priv->base.get = nv50_ram_get;
61 priv->base.put = nv50_ram_put;
62 return 0; 50 return 0;
63} 51}
64 52
65static int 53static const struct nvkm_ram_func
66mcp77_ram_init(struct nvkm_object *object) 54mcp77_ram_func = {
55 .init = mcp77_ram_init,
56 .get = nv50_ram_get,
57 .put = nv50_ram_put,
58};
59
60int
61mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
67{ 62{
68 struct nvkm_fb *pfb = nvkm_fb(object); 63 struct nvkm_device *device = fb->subdev.device;
69 struct mcp77_ram_priv *priv = (void *)object; 64 u32 rsvd_head = ( 256 * 1024); /* vga memory */
65 u32 rsvd_tail = (1024 * 1024) + 0x1000; /* vbios etc + poller mem */
66 u64 base = (u64)nvkm_rd32(device, 0x100e10) << 12;
67 u64 size = (u64)nvkm_rd32(device, 0x100e14) << 12;
68 struct mcp77_ram *ram;
70 int ret; 69 int ret;
71 u64 dniso, hostnb, flush;
72 70
73 ret = nvkm_ram_init(&priv->base); 71 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
72 return -ENOMEM;
73 *pram = &ram->base;
74
75 ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
76 size, 0, &ram->base);
74 if (ret) 77 if (ret)
75 return ret; 78 return ret;
76 79
77 dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1; 80 ram->poller_base = size - rsvd_tail;
78 hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1; 81 ram->base.stolen = base;
79 flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1; 82 nvkm_mm_fini(&ram->base.vram);
80 83
81 /* Enable NISO poller for various clients and set their associated 84 return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
82 * read address, only for MCP77/78 and MCP79/7A. (fd#25701) 85 (size - rsvd_head - rsvd_tail) >>
83 */ 86 NVKM_RAM_MM_SHIFT, 1);
84 nv_wr32(pfb, 0x100c18, dniso);
85 nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
86 nv_wr32(pfb, 0x100c1c, hostnb);
87 nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
88 nv_wr32(pfb, 0x100c24, flush);
89 nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
90 return 0;
91} 87}
92
93struct nvkm_oclass
94mcp77_ram_oclass = {
95 .ofuncs = &(struct nvkm_ofuncs) {
96 .ctor = mcp77_ram_ctor,
97 .dtor = _nvkm_ram_dtor,
98 .init = mcp77_ram_init,
99 .fini = _nvkm_ram_fini,
100 },
101};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 855de1617229..6f053a03d61c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -21,59 +21,45 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "ram.h"
25#include "regsnv04.h" 25#include "regsnv04.h"
26 26
27static int 27const struct nvkm_ram_func
28nv04_ram_create(struct nvkm_object *parent, struct nvkm_object *engine, 28nv04_ram_func = {
29 struct nvkm_oclass *oclass, void *data, u32 size, 29};
30 struct nvkm_object **pobject)
31{
32 struct nvkm_fb *pfb = nvkm_fb(parent);
33 struct nvkm_ram *ram;
34 u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
35 int ret;
36 30
37 ret = nvkm_ram_create(parent, engine, oclass, &ram); 31int
38 *pobject = nv_object(ram); 32nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
39 if (ret) 33{
40 return ret; 34 struct nvkm_device *device = fb->subdev.device;
35 u32 boot0 = nvkm_rd32(device, NV04_PFB_BOOT_0);
36 u64 size;
37 enum nvkm_ram_type type;
41 38
42 if (boot0 & 0x00000100) { 39 if (boot0 & 0x00000100) {
43 ram->size = ((boot0 >> 12) & 0xf) * 2 + 2; 40 size = ((boot0 >> 12) & 0xf) * 2 + 2;
44 ram->size *= 1024 * 1024; 41 size *= 1024 * 1024;
45 } else { 42 } else {
46 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { 43 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
47 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: 44 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
48 ram->size = 32 * 1024 * 1024; 45 size = 32 * 1024 * 1024;
49 break; 46 break;
50 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: 47 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
51 ram->size = 16 * 1024 * 1024; 48 size = 16 * 1024 * 1024;
52 break; 49 break;
53 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: 50 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
54 ram->size = 8 * 1024 * 1024; 51 size = 8 * 1024 * 1024;
55 break; 52 break;
56 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: 53 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
57 ram->size = 4 * 1024 * 1024; 54 size = 4 * 1024 * 1024;
58 break; 55 break;
59 } 56 }
60 } 57 }
61 58
62 if ((boot0 & 0x00000038) <= 0x10) 59 if ((boot0 & 0x00000038) <= 0x10)
63 ram->type = NV_MEM_TYPE_SGRAM; 60 type = NVKM_RAM_TYPE_SGRAM;
64 else 61 else
65 ram->type = NV_MEM_TYPE_SDRAM; 62 type = NVKM_RAM_TYPE_SDRAM;
66 63
67 return 0; 64 return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
68} 65}
69
70struct nvkm_oclass
71nv04_ram_oclass = {
72 .handle = 0,
73 .ofuncs = &(struct nvkm_ofuncs) {
74 .ctor = nv04_ram_create,
75 .dtor = _nvkm_ram_dtor,
76 .init = _nvkm_ram_init,
77 .fini = _nvkm_ram_fini,
78 }
79};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index 3b8a1eda5b64..dfd155c98dbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -21,39 +21,20 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "ram.h"
25 25
26static int 26int
27nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine, 27nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28 struct nvkm_oclass *oclass, void *data, u32 size,
29 struct nvkm_object **pobject)
30{ 28{
31 struct nvkm_fb *pfb = nvkm_fb(parent); 29 struct nvkm_device *device = fb->subdev.device;
32 struct nvkm_ram *ram; 30 u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
33 u32 cfg0 = nv_rd32(pfb, 0x100200); 31 u32 cfg0 = nvkm_rd32(device, 0x100200);
34 int ret; 32 enum nvkm_ram_type type;
35
36 ret = nvkm_ram_create(parent, engine, oclass, &ram);
37 *pobject = nv_object(ram);
38 if (ret)
39 return ret;
40 33
41 if (cfg0 & 0x00000001) 34 if (cfg0 & 0x00000001)
42 ram->type = NV_MEM_TYPE_DDR1; 35 type = NVKM_RAM_TYPE_DDR1;
43 else 36 else
44 ram->type = NV_MEM_TYPE_SDRAM; 37 type = NVKM_RAM_TYPE_SDRAM;
45 38
46 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 39 return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
47 return 0;
48} 40}
49
50struct nvkm_oclass
51nv10_ram_oclass = {
52 .handle = 0,
53 .ofuncs = &(struct nvkm_ofuncs) {
54 .ctor = nv10_ram_create,
55 .dtor = _nvkm_ram_dtor,
56 .init = _nvkm_ram_init,
57 .fini = _nvkm_ram_fini,
58 }
59};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index fbae05db4ffd..3c6a8710e812 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -21,33 +21,21 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "ram.h"
25 25
26#include <core/device.h> 26int
27 27nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28static int
29nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
30 struct nvkm_oclass *oclass, void *data, u32 size,
31 struct nvkm_object **pobject)
32{ 28{
33 struct nvkm_fb *pfb = nvkm_fb(parent);
34 struct nvkm_ram *ram;
35 struct pci_dev *bridge; 29 struct pci_dev *bridge;
36 u32 mem, mib; 30 u32 mem, mib;
37 int ret;
38 31
39 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); 32 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
40 if (!bridge) { 33 if (!bridge) {
41 nv_fatal(pfb, "no bridge device\n"); 34 nvkm_error(&fb->subdev, "no bridge device\n");
42 return -ENODEV; 35 return -ENODEV;
43 } 36 }
44 37
45 ret = nvkm_ram_create(parent, engine, oclass, &ram); 38 if (fb->subdev.device->chipset == 0x1a) {
46 *pobject = nv_object(ram);
47 if (ret)
48 return ret;
49
50 if (nv_device(pfb)->chipset == 0x1a) {
51 pci_read_config_dword(bridge, 0x7c, &mem); 39 pci_read_config_dword(bridge, 0x7c, &mem);
52 mib = ((mem >> 6) & 31) + 1; 40 mib = ((mem >> 6) & 31) + 1;
53 } else { 41 } else {
@@ -55,18 +43,6 @@ nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
55 mib = ((mem >> 4) & 127) + 1; 43 mib = ((mem >> 4) & 127) + 1;
56 } 44 }
57 45
58 ram->type = NV_MEM_TYPE_STOLEN; 46 return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
59 ram->size = mib * 1024 * 1024; 47 mib * 1024 * 1024, 0, pram);
60 return 0;
61} 48}
62
63struct nvkm_oclass
64nv1a_ram_oclass = {
65 .handle = 0,
66 .ofuncs = &(struct nvkm_ofuncs) {
67 .ctor = nv1a_ram_create,
68 .dtor = _nvkm_ram_dtor,
69 .init = _nvkm_ram_init,
70 .fini = _nvkm_ram_fini,
71 }
72};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index d9e7187bd235..747e47c10cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -21,42 +21,29 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "ram.h"
25 25
26static int 26int
27nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine, 27nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28 struct nvkm_oclass *oclass, void *data, u32 size,
29 struct nvkm_object **pobject)
30{ 28{
31 struct nvkm_fb *pfb = nvkm_fb(parent); 29 struct nvkm_device *device = fb->subdev.device;
32 struct nvkm_ram *ram; 30 u32 pbus1218 = nvkm_rd32(device, 0x001218);
33 u32 pbus1218 = nv_rd32(pfb, 0x001218); 31 u32 size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
32 u32 tags = nvkm_rd32(device, 0x100320);
33 enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
34 int ret; 34 int ret;
35 35
36 ret = nvkm_ram_create(parent, engine, oclass, &ram); 36 switch (pbus1218 & 0x00000300) {
37 *pobject = nv_object(ram); 37 case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
38 case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
39 case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
40 case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
41 }
42
43 ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
38 if (ret) 44 if (ret)
39 return ret; 45 return ret;
40 46
41 switch (pbus1218 & 0x00000300) { 47 (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
42 case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
43 case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
44 case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
45 case 0x00000300: ram->type = NV_MEM_TYPE_GDDR2; break;
46 }
47 ram->size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
48 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
49 ram->tags = nv_rd32(pfb, 0x100320);
50 return 0; 48 return 0;
51} 49}
52
53struct nvkm_oclass
54nv20_ram_oclass = {
55 .handle = 0,
56 .ofuncs = &(struct nvkm_ofuncs) {
57 .ctor = nv20_ram_create,
58 .dtor = _nvkm_ram_dtor,
59 .init = _nvkm_ram_init,
60 .fini = _nvkm_ram_fini,
61 }
62};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 3d31fa45c1a6..56f8cffc2560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -21,9 +21,8 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv40.h" 24#include "ramnv40.h"
25 25
26#include <core/device.h>
27#include <subdev/bios.h> 26#include <subdev/bios.h>
28#include <subdev/bios/bit.h> 27#include <subdev/bios/bit.h>
29#include <subdev/bios/init.h> 28#include <subdev/bios/init.h>
@@ -31,23 +30,23 @@
31#include <subdev/clk/pll.h> 30#include <subdev/clk/pll.h>
32#include <subdev/timer.h> 31#include <subdev/timer.h>
33 32
34int 33static int
35nv40_ram_calc(struct nvkm_fb *pfb, u32 freq) 34nv40_ram_calc(struct nvkm_ram *base, u32 freq)
36{ 35{
37 struct nvkm_bios *bios = nvkm_bios(pfb); 36 struct nv40_ram *ram = nv40_ram(base);
38 struct nv40_ram *ram = (void *)pfb->ram; 37 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
38 struct nvkm_bios *bios = subdev->device->bios;
39 struct nvbios_pll pll; 39 struct nvbios_pll pll;
40 int N1, M1, N2, M2; 40 int N1, M1, N2, M2;
41 int log2P, ret; 41 int log2P, ret;
42 42
43 ret = nvbios_pll_parse(bios, 0x04, &pll); 43 ret = nvbios_pll_parse(bios, 0x04, &pll);
44 if (ret) { 44 if (ret) {
45 nv_error(pfb, "mclk pll data not found\n"); 45 nvkm_error(subdev, "mclk pll data not found\n");
46 return ret; 46 return ret;
47 } 47 }
48 48
49 ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq, 49 ret = nv04_pll_calc(subdev, &pll, freq, &N1, &M1, &N2, &M2, &log2P);
50 &N1, &M1, &N2, &M2, &log2P);
51 if (ret < 0) 50 if (ret < 0)
52 return ret; 51 return ret;
53 52
@@ -64,11 +63,13 @@ nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
64 return 0; 63 return 0;
65} 64}
66 65
67int 66static int
68nv40_ram_prog(struct nvkm_fb *pfb) 67nv40_ram_prog(struct nvkm_ram *base)
69{ 68{
70 struct nvkm_bios *bios = nvkm_bios(pfb); 69 struct nv40_ram *ram = nv40_ram(base);
71 struct nv40_ram *ram = (void *)pfb->ram; 70 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
71 struct nvkm_device *device = subdev->device;
72 struct nvkm_bios *bios = device->bios;
72 struct bit_entry M; 73 struct bit_entry M;
73 u32 crtc_mask = 0; 74 u32 crtc_mask = 0;
74 u8 sr1[2]; 75 u8 sr1[2];
@@ -76,12 +77,12 @@ nv40_ram_prog(struct nvkm_fb *pfb)
76 77
77 /* determine which CRTCs are active, fetch VGA_SR1 for each */ 78 /* determine which CRTCs are active, fetch VGA_SR1 for each */
78 for (i = 0; i < 2; i++) { 79 for (i = 0; i < 2; i++) {
79 u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000)); 80 u32 vbl = nvkm_rd32(device, 0x600808 + (i * 0x2000));
80 u32 cnt = 0; 81 u32 cnt = 0;
81 do { 82 do {
82 if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) { 83 if (vbl != nvkm_rd32(device, 0x600808 + (i * 0x2000))) {
83 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01); 84 nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
84 sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000)); 85 sr1[i] = nvkm_rd08(device, 0x0c03c5 + (i * 0x2000));
85 if (!(sr1[i] & 0x20)) 86 if (!(sr1[i] & 0x20))
86 crtc_mask |= (1 << i); 87 crtc_mask |= (1 << i);
87 break; 88 break;
@@ -94,55 +95,66 @@ nv40_ram_prog(struct nvkm_fb *pfb)
94 for (i = 0; i < 2; i++) { 95 for (i = 0; i < 2; i++) {
95 if (!(crtc_mask & (1 << i))) 96 if (!(crtc_mask & (1 << i)))
96 continue; 97 continue;
97 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000); 98
98 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); 99 nvkm_msec(device, 2000,
99 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01); 100 u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
100 nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20); 101 if (!(tmp & 0x00010000))
102 break;
103 );
104
105 nvkm_msec(device, 2000,
106 u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
107 if ( (tmp & 0x00010000))
108 break;
109 );
110
111 nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
112 nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
101 } 113 }
102 114
103 /* prepare ram for reclocking */ 115 /* prepare ram for reclocking */
104 nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */ 116 nvkm_wr32(device, 0x1002d4, 0x00000001); /* precharge */
105 nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */ 117 nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
106 nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */ 118 nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
107 nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */ 119 nvkm_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
108 nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */ 120 nvkm_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
109 121
110 /* change the PLL of each memory partition */ 122 /* change the PLL of each memory partition */
111 nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000); 123 nvkm_mask(device, 0x00c040, 0x0000c000, 0x00000000);
112 switch (nv_device(pfb)->chipset) { 124 switch (device->chipset) {
113 case 0x40: 125 case 0x40:
114 case 0x45: 126 case 0x45:
115 case 0x41: 127 case 0x41:
116 case 0x42: 128 case 0x42:
117 case 0x47: 129 case 0x47:
118 nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl); 130 nvkm_mask(device, 0x004044, 0xc0771100, ram->ctrl);
119 nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl); 131 nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
120 nv_wr32(pfb, 0x004048, ram->coef); 132 nvkm_wr32(device, 0x004048, ram->coef);
121 nv_wr32(pfb, 0x004030, ram->coef); 133 nvkm_wr32(device, 0x004030, ram->coef);
122 case 0x43: 134 case 0x43:
123 case 0x49: 135 case 0x49:
124 case 0x4b: 136 case 0x4b:
125 nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl); 137 nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
126 nv_wr32(pfb, 0x00403c, ram->coef); 138 nvkm_wr32(device, 0x00403c, ram->coef);
127 default: 139 default:
128 nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl); 140 nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
129 nv_wr32(pfb, 0x004024, ram->coef); 141 nvkm_wr32(device, 0x004024, ram->coef);
130 break; 142 break;
131 } 143 }
132 udelay(100); 144 udelay(100);
133 nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000); 145 nvkm_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
134 146
135 /* re-enable normal operation of memory controller */ 147 /* re-enable normal operation of memory controller */
136 nv_wr32(pfb, 0x1002dc, 0x00000000); 148 nvkm_wr32(device, 0x1002dc, 0x00000000);
137 nv_mask(pfb, 0x100210, 0x80000000, 0x80000000); 149 nvkm_mask(device, 0x100210, 0x80000000, 0x80000000);
138 udelay(100); 150 udelay(100);
139 151
140 /* execute memory reset script from vbios */ 152 /* execute memory reset script from vbios */
141 if (!bit_entry(bios, 'M', &M)) { 153 if (!bit_entry(bios, 'M', &M)) {
142 struct nvbios_init init = { 154 struct nvbios_init init = {
143 .subdev = nv_subdev(pfb), 155 .subdev = subdev,
144 .bios = bios, 156 .bios = bios,
145 .offset = nv_ro16(bios, M.offset + 0x00), 157 .offset = nvbios_rd16(bios, M.offset + 0x00),
146 .execute = 1, 158 .execute = 1,
147 }; 159 };
148 160
@@ -155,58 +167,64 @@ nv40_ram_prog(struct nvkm_fb *pfb)
155 for (i = 0; i < 2; i++) { 167 for (i = 0; i < 2; i++) {
156 if (!(crtc_mask & (1 << i))) 168 if (!(crtc_mask & (1 << i)))
157 continue; 169 continue;
158 nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); 170
159 nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01); 171 nvkm_msec(device, 2000,
160 nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]); 172 u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
173 if ( (tmp & 0x00010000))
174 break;
175 );
176
177 nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
178 nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
161 } 179 }
162 180
163 return 0; 181 return 0;
164} 182}
165 183
166void 184static void
167nv40_ram_tidy(struct nvkm_fb *pfb) 185nv40_ram_tidy(struct nvkm_ram *base)
168{ 186{
169} 187}
170 188
171static int 189static const struct nvkm_ram_func
172nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine, 190nv40_ram_func = {
173 struct nvkm_oclass *oclass, void *data, u32 size, 191 .calc = nv40_ram_calc,
174 struct nvkm_object **pobject) 192 .prog = nv40_ram_prog,
193 .tidy = nv40_ram_tidy,
194};
195
196int
197nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
198 u32 tags, struct nvkm_ram **pram)
175{ 199{
176 struct nvkm_fb *pfb = nvkm_fb(parent);
177 struct nv40_ram *ram; 200 struct nv40_ram *ram;
178 u32 pbus1218 = nv_rd32(pfb, 0x001218); 201 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
179 int ret; 202 return -ENOMEM;
203 *pram = &ram->base;
204 return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
205}
180 206
181 ret = nvkm_ram_create(parent, engine, oclass, &ram); 207int
182 *pobject = nv_object(ram); 208nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
183 if (ret) 209{
184 return ret; 210 struct nvkm_device *device = fb->subdev.device;
211 u32 pbus1218 = nvkm_rd32(device, 0x001218);
212 u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
213 u32 tags = nvkm_rd32(device, 0x100320);
214 enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
215 int ret;
185 216
186 switch (pbus1218 & 0x00000300) { 217 switch (pbus1218 & 0x00000300) {
187 case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break; 218 case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
188 case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break; 219 case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
189 case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break; 220 case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
190 case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break; 221 case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
191 } 222 }
192 223
193 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000; 224 ret = nv40_ram_new_(fb, type, size, tags, pram);
194 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 225 if (ret)
195 ram->base.tags = nv_rd32(pfb, 0x100320); 226 return ret;
196 ram->base.calc = nv40_ram_calc; 227
197 ram->base.prog = nv40_ram_prog; 228 (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
198 ram->base.tidy = nv40_ram_tidy;
199 return 0; 229 return 0;
200} 230}
201
202
203struct nvkm_oclass
204nv40_ram_oclass = {
205 .handle = 0,
206 .ofuncs = &(struct nvkm_ofuncs) {
207 .ctor = nv40_ram_create,
208 .dtor = _nvkm_ram_dtor,
209 .init = _nvkm_ram_init,
210 .fini = _nvkm_ram_fini,
211 }
212};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
new file mode 100644
index 000000000000..8a0524566b48
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -0,0 +1,14 @@
1#ifndef __NV40_FB_RAM_H__
2#define __NV40_FB_RAM_H__
3#define nv40_ram(p) container_of((p), struct nv40_ram, base)
4#include "ram.h"
5
6struct nv40_ram {
7 struct nvkm_ram base;
8 u32 ctrl;
9 u32 coef;
10};
11
12int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
13 struct nvkm_ram **);
14#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index 33c612b1355f..114828be292e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -21,46 +21,29 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv40.h" 24#include "ramnv40.h"
25 25
26static int 26int
27nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine, 27nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28 struct nvkm_oclass *oclass, void *data, u32 size,
29 struct nvkm_object **pobject)
30{ 28{
31 struct nvkm_fb *pfb = nvkm_fb(parent); 29 struct nvkm_device *device = fb->subdev.device;
32 struct nv40_ram *ram; 30 u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
33 u32 pfb474 = nv_rd32(pfb, 0x100474); 31 u32 tags = nvkm_rd32(device, 0x100320);
32 u32 fb474 = nvkm_rd32(device, 0x100474);
33 enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
34 int ret; 34 int ret;
35 35
36 ret = nvkm_ram_create(parent, engine, oclass, &ram); 36 if (fb474 & 0x00000004)
37 *pobject = nv_object(ram); 37 type = NVKM_RAM_TYPE_GDDR3;
38 if (fb474 & 0x00000002)
39 type = NVKM_RAM_TYPE_DDR2;
40 if (fb474 & 0x00000001)
41 type = NVKM_RAM_TYPE_DDR1;
42
43 ret = nv40_ram_new_(fb, type, size, tags, pram);
38 if (ret) 44 if (ret)
39 return ret; 45 return ret;
40 46
41 if (pfb474 & 0x00000004) 47 (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
42 ram->base.type = NV_MEM_TYPE_GDDR3;
43 if (pfb474 & 0x00000002)
44 ram->base.type = NV_MEM_TYPE_DDR2;
45 if (pfb474 & 0x00000001)
46 ram->base.type = NV_MEM_TYPE_DDR1;
47
48 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
49 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
50 ram->base.tags = nv_rd32(pfb, 0x100320);
51 ram->base.calc = nv40_ram_calc;
52 ram->base.prog = nv40_ram_prog;
53 ram->base.tidy = nv40_ram_tidy;
54 return 0; 48 return 0;
55} 49}
56
57struct nvkm_oclass
58nv41_ram_oclass = {
59 .handle = 0,
60 .ofuncs = &(struct nvkm_ofuncs) {
61 .ctor = nv41_ram_create,
62 .dtor = _nvkm_ram_dtor,
63 .init = _nvkm_ram_init,
64 .fini = _nvkm_ram_fini,
65 }
66};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index f575a7246403..bc56fbf1c788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -21,44 +21,22 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv40.h" 24#include "ramnv40.h"
25 25
26static int 26int
27nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine, 27nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28 struct nvkm_oclass *oclass, void *data, u32 size,
29 struct nvkm_object **pobject)
30{ 28{
31 struct nvkm_fb *pfb = nvkm_fb(parent); 29 struct nvkm_device *device = fb->subdev.device;
32 struct nv40_ram *ram; 30 u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
33 u32 pfb474 = nv_rd32(pfb, 0x100474); 31 u32 fb474 = nvkm_rd32(device, 0x100474);
34 int ret; 32 enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
35 33
36 ret = nvkm_ram_create(parent, engine, oclass, &ram); 34 if (fb474 & 0x00000004)
37 *pobject = nv_object(ram); 35 type = NVKM_RAM_TYPE_GDDR3;
38 if (ret) 36 if (fb474 & 0x00000002)
39 return ret; 37 type = NVKM_RAM_TYPE_DDR2;
38 if (fb474 & 0x00000001)
39 type = NVKM_RAM_TYPE_DDR1;
40 40
41 if (pfb474 & 0x00000004) 41 return nv40_ram_new_(fb, type, size, 0, pram);
42 ram->base.type = NV_MEM_TYPE_GDDR3;
43 if (pfb474 & 0x00000002)
44 ram->base.type = NV_MEM_TYPE_DDR2;
45 if (pfb474 & 0x00000001)
46 ram->base.type = NV_MEM_TYPE_DDR1;
47
48 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
49 ram->base.calc = nv40_ram_calc;
50 ram->base.prog = nv40_ram_prog;
51 ram->base.tidy = nv40_ram_tidy;
52 return 0;
53} 42}
54
55struct nvkm_oclass
56nv44_ram_oclass = {
57 .handle = 0,
58 .ofuncs = &(struct nvkm_ofuncs) {
59 .ctor = nv44_ram_create,
60 .dtor = _nvkm_ram_dtor,
61 .init = _nvkm_ram_init,
62 .fini = _nvkm_ram_fini,
63 }
64};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index 51b44cdb2732..c01f4b1022b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -21,46 +21,29 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv40.h" 24#include "ramnv40.h"
25 25
26static int 26int
27nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine, 27nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28 struct nvkm_oclass *oclass, void *data, u32 size,
29 struct nvkm_object **pobject)
30{ 28{
31 struct nvkm_fb *pfb = nvkm_fb(parent); 29 struct nvkm_device *device = fb->subdev.device;
32 struct nv40_ram *ram; 30 u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
33 u32 pfb914 = nv_rd32(pfb, 0x100914); 31 u32 tags = nvkm_rd32(device, 0x100320);
32 u32 fb914 = nvkm_rd32(device, 0x100914);
33 enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
34 int ret; 34 int ret;
35 35
36 ret = nvkm_ram_create(parent, engine, oclass, &ram); 36 switch (fb914 & 0x00000003) {
37 *pobject = nv_object(ram); 37 case 0x00000000: type = NVKM_RAM_TYPE_DDR1 ; break;
38 if (ret) 38 case 0x00000001: type = NVKM_RAM_TYPE_DDR2 ; break;
39 return ret; 39 case 0x00000002: type = NVKM_RAM_TYPE_GDDR3; break;
40
41 switch (pfb914 & 0x00000003) {
42 case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
43 case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
44 case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
45 case 0x00000003: break; 40 case 0x00000003: break;
46 } 41 }
47 42
48 ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000; 43 ret = nv40_ram_new_(fb, type, size, tags, pram);
49 ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 44 if (ret)
50 ram->base.tags = nv_rd32(pfb, 0x100320); 45 return ret;
51 ram->base.calc = nv40_ram_calc; 46
52 ram->base.prog = nv40_ram_prog; 47 (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
53 ram->base.tidy = nv40_ram_tidy;
54 return 0; 48 return 0;
55} 49}
56
57struct nvkm_oclass
58nv49_ram_oclass = {
59 .handle = 0,
60 .ofuncs = &(struct nvkm_ofuncs) {
61 .ctor = nv49_ram_create,
62 .dtor = _nvkm_ram_dtor,
63 .init = _nvkm_ram_init,
64 .fini = _nvkm_ram_fini,
65 }
66};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index f3ed1c60d730..fa3c2e06203d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -21,34 +21,13 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "ram.h"
25 25
26static int 26int
27nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine, 27nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
28 struct nvkm_oclass *oclass, void *data, u32 size,
29 struct nvkm_object **pobject)
30{ 28{
31 struct nvkm_fb *pfb = nvkm_fb(parent); 29 struct nvkm_device *device = fb->subdev.device;
32 struct nvkm_ram *ram; 30 u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
33 int ret; 31 return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
34 32 size, 0, pram);
35 ret = nvkm_ram_create(parent, engine, oclass, &ram);
36 *pobject = nv_object(ram);
37 if (ret)
38 return ret;
39
40 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
41 ram->type = NV_MEM_TYPE_STOLEN;
42 return 0;
43} 33}
44
45struct nvkm_oclass
46nv4e_ram_oclass = {
47 .handle = 0,
48 .ofuncs = &(struct nvkm_ofuncs) {
49 .ctor = nv4e_ram_create,
50 .dtor = _nvkm_ram_dtor,
51 .init = _nvkm_ram_init,
52 .fini = _nvkm_ram_fini,
53 }
54};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index d2c81dd635dc..9197e0ef5cdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -21,14 +21,16 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#define nv50_ram(p) container_of((p), struct nv50_ram, base)
25#include "ram.h"
25#include "ramseq.h" 26#include "ramseq.h"
27#include "nv50.h"
26 28
27#include <core/device.h>
28#include <core/option.h> 29#include <core/option.h>
29#include <subdev/bios.h> 30#include <subdev/bios.h>
30#include <subdev/bios/perf.h> 31#include <subdev/bios/perf.h>
31#include <subdev/bios/pll.h> 32#include <subdev/bios/pll.h>
33#include <subdev/bios/rammap.h>
32#include <subdev/bios/timing.h> 34#include <subdev/bios/timing.h>
33#include <subdev/clk/pll.h> 35#include <subdev/clk/pll.h>
34 36
@@ -38,11 +40,20 @@ struct nv50_ramseq {
38 struct hwsq_reg r_0x004008; 40 struct hwsq_reg r_0x004008;
39 struct hwsq_reg r_0x00400c; 41 struct hwsq_reg r_0x00400c;
40 struct hwsq_reg r_0x00c040; 42 struct hwsq_reg r_0x00c040;
43 struct hwsq_reg r_0x100200;
41 struct hwsq_reg r_0x100210; 44 struct hwsq_reg r_0x100210;
45 struct hwsq_reg r_0x10021c;
42 struct hwsq_reg r_0x1002d0; 46 struct hwsq_reg r_0x1002d0;
43 struct hwsq_reg r_0x1002d4; 47 struct hwsq_reg r_0x1002d4;
44 struct hwsq_reg r_0x1002dc; 48 struct hwsq_reg r_0x1002dc;
45 struct hwsq_reg r_0x100da0[8]; 49 struct hwsq_reg r_0x10053c;
50 struct hwsq_reg r_0x1005a0;
51 struct hwsq_reg r_0x1005a4;
52 struct hwsq_reg r_0x100710;
53 struct hwsq_reg r_0x100714;
54 struct hwsq_reg r_0x100718;
55 struct hwsq_reg r_0x10071c;
56 struct hwsq_reg r_0x100da0;
46 struct hwsq_reg r_0x100e20; 57 struct hwsq_reg r_0x100e20;
47 struct hwsq_reg r_0x100e24; 58 struct hwsq_reg r_0x100e24;
48 struct hwsq_reg r_0x611200; 59 struct hwsq_reg r_0x611200;
@@ -55,64 +66,181 @@ struct nv50_ram {
55 struct nv50_ramseq hwsq; 66 struct nv50_ramseq hwsq;
56}; 67};
57 68
58#define QFX5800NVA0 1 69#define T(t) cfg->timing_10_##t
70static int
71nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing)
72{
73 struct nvbios_ramcfg *cfg = &ram->base.target.bios;
74 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
75 struct nvkm_device *device = subdev->device;
76 u32 cur2, cur4, cur7, cur8;
77 u8 unkt3b;
78
79 cur2 = nvkm_rd32(device, 0x100228);
80 cur4 = nvkm_rd32(device, 0x100230);
81 cur7 = nvkm_rd32(device, 0x10023c);
82 cur8 = nvkm_rd32(device, 0x100240);
83
84 switch ((!T(CWL)) * ram->base.type) {
85 case NVKM_RAM_TYPE_DDR2:
86 T(CWL) = T(CL) - 1;
87 break;
88 case NVKM_RAM_TYPE_GDDR3:
89 T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
90 break;
91 }
92
93 /* XXX: N=1 is not proper statistics */
94 if (device->chipset == 0xa0) {
95 unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40;
96 timing[6] = (0x2d + T(CL) - T(CWL) +
97 ram->base.next->bios.rammap_00_16_40) << 16 |
98 T(CWL) << 8 |
99 (0x2f + T(CL) - T(CWL));
100 } else {
101 unkt3b = 0x16;
102 timing[6] = (0x2b + T(CL) - T(CWL)) << 16 |
103 max_t(s8, T(CWL) - 2, 1) << 8 |
104 (0x2e + T(CL) - T(CWL));
105 }
106
107 timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
108 timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
109 max_t(u8, T(18), 1) << 16 |
110 (T(WTR) + 1 + T(CWL)) << 8 |
111 (3 + T(CL) - T(CWL));
112 timing[2] = (T(CWL) - 1) << 24 |
113 (T(RRD) << 16) |
114 (T(RCDWR) << 8) |
115 T(RCDRD);
116 timing[3] = (unkt3b - 2 + T(CL)) << 24 |
117 unkt3b << 16 |
118 (T(CL) - 1) << 8 |
119 (T(CL) - 1);
120 timing[4] = (cur4 & 0xffff0000) |
121 T(13) << 8 |
122 T(13);
123 timing[5] = T(RFC) << 24 |
124 max_t(u8, T(RCDRD), T(RCDWR)) << 16 |
125 T(RP);
126 /* Timing 6 is already done above */
127 timing[7] = (cur7 & 0xff00ffff) | (T(CL) - 1) << 16;
128 timing[8] = (cur8 & 0xffffff00);
129
130 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
131 if (ram->base.type == NVKM_RAM_TYPE_DDR2) {
132 timing[5] |= (T(CL) + 3) << 8;
133 timing[8] |= (T(CL) - 4);
134 } else
135 if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
136 timing[5] |= (T(CL) + 2) << 8;
137 timing[8] |= (T(CL) - 2);
138 }
139
140 nvkm_debug(subdev, " 220: %08x %08x %08x %08x\n",
141 timing[0], timing[1], timing[2], timing[3]);
142 nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
143 timing[4], timing[5], timing[6], timing[7]);
144 nvkm_debug(subdev, " 240: %08x\n", timing[8]);
145 return 0;
146}
147#undef T
148
149static void
150nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
151{
152 ram_mask(hwsq, mr[0], 0x100, 0x100);
153 ram_mask(hwsq, mr[0], 0x100, 0x000);
154 ram_nsec(hwsq, 24000);
155}
59 156
60static int 157static int
61nv50_ram_calc(struct nvkm_fb *pfb, u32 freq) 158nv50_ram_calc(struct nvkm_ram *base, u32 freq)
62{ 159{
63 struct nvkm_bios *bios = nvkm_bios(pfb); 160 struct nv50_ram *ram = nv50_ram(base);
64 struct nv50_ram *ram = (void *)pfb->ram;
65 struct nv50_ramseq *hwsq = &ram->hwsq; 161 struct nv50_ramseq *hwsq = &ram->hwsq;
162 struct nvkm_subdev *subdev = &ram->base.fb->subdev;
163 struct nvkm_bios *bios = subdev->device->bios;
66 struct nvbios_perfE perfE; 164 struct nvbios_perfE perfE;
67 struct nvbios_pll mpll; 165 struct nvbios_pll mpll;
68 struct { 166 struct nvkm_ram_data *next;
69 u32 data; 167 u8 ver, hdr, cnt, len, strap, size;
70 u8 size; 168 u32 data;
71 } ramcfg, timing; 169 u32 r100da0, r004008, unk710, unk714, unk718, unk71c;
72 u8 ver, hdr, cnt, len, strap;
73 int N1, M1, N2, M2, P; 170 int N1, M1, N2, M2, P;
74 int ret, i; 171 int ret, i;
172 u32 timing[9];
173
174 next = &ram->base.target;
175 next->freq = freq;
176 ram->base.next = next;
75 177
76 /* lookup closest matching performance table entry for frequency */ 178 /* lookup closest matching performance table entry for frequency */
77 i = 0; 179 i = 0;
78 do { 180 do {
79 ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt, 181 data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
80 &ramcfg.size, &perfE); 182 &size, &perfE);
81 if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) || 183 if (!data || (ver < 0x25 || ver >= 0x40) ||
82 (ramcfg.size < 2)) { 184 (size < 2)) {
83 nv_error(pfb, "invalid/missing perftab entry\n"); 185 nvkm_error(subdev, "invalid/missing perftab entry\n");
84 return -EINVAL; 186 return -EINVAL;
85 } 187 }
86 } while (perfE.memory < freq); 188 } while (perfE.memory < freq);
87 189
190 nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios);
191
88 /* locate specific data set for the attached memory */ 192 /* locate specific data set for the attached memory */
89 strap = nvbios_ramcfg_index(nv_subdev(pfb)); 193 strap = nvbios_ramcfg_index(subdev);
90 if (strap >= cnt) { 194 if (strap >= cnt) {
91 nv_error(pfb, "invalid ramcfg strap\n"); 195 nvkm_error(subdev, "invalid ramcfg strap\n");
92 return -EINVAL; 196 return -EINVAL;
93 } 197 }
94 198
95 ramcfg.data += hdr + (strap * ramcfg.size); 199 data = nvbios_rammapSp_from_perf(bios, data + hdr, size, strap,
200 &next->bios);
201 if (!data) {
202 nvkm_error(subdev, "invalid/missing rammap entry ");
203 return -EINVAL;
204 }
96 205
97 /* lookup memory timings, if bios says they're present */ 206 /* lookup memory timings, if bios says they're present */
98 strap = nv_ro08(bios, ramcfg.data + 0x01); 207 if (next->bios.ramcfg_timing != 0xff) {
99 if (strap != 0xff) { 208 data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
100 timing.data = nvbios_timingEe(bios, strap, &ver, &hdr, 209 &ver, &hdr, &cnt, &len, &next->bios);
101 &cnt, &len); 210 if (!data || ver != 0x10 || hdr < 0x12) {
102 if (!timing.data || ver != 0x10 || hdr < 0x12) { 211 nvkm_error(subdev, "invalid/missing timing entry "
103 nv_error(pfb, "invalid/missing timing entry "
104 "%02x %04x %02x %02x\n", 212 "%02x %04x %02x %02x\n",
105 strap, timing.data, ver, hdr); 213 strap, data, ver, hdr);
106 return -EINVAL; 214 return -EINVAL;
107 } 215 }
108 } else {
109 timing.data = 0;
110 } 216 }
111 217
112 ret = ram_init(hwsq, nv_subdev(pfb)); 218 nv50_ram_timing_calc(ram, timing);
219
220 ret = ram_init(hwsq, subdev);
113 if (ret) 221 if (ret)
114 return ret; 222 return ret;
115 223
224 /* Determine ram-specific MR values */
225 ram->base.mr[0] = ram_rd32(hwsq, mr[0]);
226 ram->base.mr[1] = ram_rd32(hwsq, mr[1]);
227 ram->base.mr[2] = ram_rd32(hwsq, mr[2]);
228
229 switch (ram->base.type) {
230 case NVKM_RAM_TYPE_GDDR3:
231 ret = nvkm_gddr3_calc(&ram->base);
232 break;
233 default:
234 ret = -ENOSYS;
235 break;
236 }
237
238 if (ret)
239 return ret;
240
241 /* Always disable this bit during reclock */
242 ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000);
243
116 ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */ 244 ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
117 ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */ 245 ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
118 ram_wr32(hwsq, 0x611200, 0x00003300); 246 ram_wr32(hwsq, 0x611200, 0x00003300);
@@ -120,6 +248,7 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
120 ram_nsec(hwsq, 8000); 248 ram_nsec(hwsq, 8000);
121 ram_setf(hwsq, 0x10, 0x00); /* disable fb */ 249 ram_setf(hwsq, 0x10, 0x00); /* disable fb */
122 ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */ 250 ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
251 ram_nsec(hwsq, 2000);
123 252
124 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */ 253 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
125 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */ 254 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
@@ -129,97 +258,149 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
129 258
130 ret = nvbios_pll_parse(bios, 0x004008, &mpll); 259 ret = nvbios_pll_parse(bios, 0x004008, &mpll);
131 mpll.vco2.max_freq = 0; 260 mpll.vco2.max_freq = 0;
132 if (ret == 0) { 261 if (ret >= 0) {
133 ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq, 262 ret = nv04_pll_calc(subdev, &mpll, freq,
134 &N1, &M1, &N2, &M2, &P); 263 &N1, &M1, &N2, &M2, &P);
135 if (ret == 0) 264 if (ret <= 0)
136 ret = -EINVAL; 265 ret = -EINVAL;
137 } 266 }
138 267
139 if (ret < 0) 268 if (ret < 0)
140 return ret; 269 return ret;
141 270
271 /* XXX: 750MHz seems rather arbitrary */
272 if (freq <= 750000) {
273 r100da0 = 0x00000010;
274 r004008 = 0x90000000;
275 } else {
276 r100da0 = 0x00000000;
277 r004008 = 0x80000000;
278 }
279
280 r004008 |= (mpll.bias_p << 19) | (P << 22) | (P << 16);
281
142 ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000); 282 ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
143 ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200); 283 /* XXX: Is rammap_00_16_40 the DLL bit we've seen in GT215? Why does
284 * it have a different rammap bit from DLLoff? */
285 ram_mask(hwsq, 0x004008, 0x00004200, 0x00000200 |
286 next->bios.rammap_00_16_40 << 14);
144 ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1); 287 ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
145 ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) | 288 ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
146 (P << 22) | (P << 16)); 289 if (subdev->device->chipset >= 0x96)
147#if QFX5800NVA0 290 ram_wr32(hwsq, 0x100da0, r100da0);
148 for (i = 0; i < 8; i++) 291 ram_nsec(hwsq, 64000); /*XXX*/
149 ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/ 292 ram_nsec(hwsq, 32000); /*XXX*/
150#endif 293
151 ram_nsec(hwsq, 96000); /*XXX*/
152 ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000); 294 ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
153 295
154 ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */ 296 ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
297 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* disable self-refresh */
155 ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */ 298 ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
156 299
157 ram_nsec(hwsq, 12000); 300 ram_nsec(hwsq, 12000);
158 301
159 switch (ram->base.type) { 302 switch (ram->base.type) {
160 case NV_MEM_TYPE_DDR2: 303 case NVKM_RAM_TYPE_DDR2:
161 ram_nuke(hwsq, mr[0]); /* force update */ 304 ram_nuke(hwsq, mr[0]); /* force update */
162 ram_mask(hwsq, mr[0], 0x000, 0x000); 305 ram_mask(hwsq, mr[0], 0x000, 0x000);
163 break; 306 break;
164 case NV_MEM_TYPE_GDDR3: 307 case NVKM_RAM_TYPE_GDDR3:
165 ram_mask(hwsq, mr[2], 0x000, 0x000); 308 ram_nuke(hwsq, mr[1]); /* force update */
309 ram_wr32(hwsq, mr[1], ram->base.mr[1]);
166 ram_nuke(hwsq, mr[0]); /* force update */ 310 ram_nuke(hwsq, mr[0]); /* force update */
167 ram_mask(hwsq, mr[0], 0x000, 0x000); 311 ram_wr32(hwsq, mr[0], ram->base.mr[0]);
168 break; 312 break;
169 default: 313 default:
170 break; 314 break;
171 } 315 }
172 316
173 ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/ 317 ram_mask(hwsq, timing[3], 0xffffffff, timing[3]);
174 ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/ 318 ram_mask(hwsq, timing[1], 0xffffffff, timing[1]);
175 ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/ 319 ram_mask(hwsq, timing[6], 0xffffffff, timing[6]);
176 ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/ 320 ram_mask(hwsq, timing[7], 0xffffffff, timing[7]);
177 ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/ 321 ram_mask(hwsq, timing[8], 0xffffffff, timing[8]);
178 ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/ 322 ram_mask(hwsq, timing[0], 0xffffffff, timing[0]);
179 ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/ 323 ram_mask(hwsq, timing[2], 0xffffffff, timing[2]);
180 ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/ 324 ram_mask(hwsq, timing[4], 0xffffffff, timing[4]);
181 ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/ 325 ram_mask(hwsq, timing[5], 0xffffffff, timing[5]);
182 326
183 ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/ 327 if (!next->bios.ramcfg_00_03_02)
184 328 ram_mask(hwsq, 0x10021c, 0x00010000, 0x00000000);
185#if QFX5800NVA0 329 ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12);
186 ram_nuke(hwsq, 0x100e24); 330
187 ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000); 331 /* XXX: A lot of this could be "chipset"/"ram type" specific stuff */
188 ram_nuke(hwsq, 0x100e20); 332 unk710 = ram_rd32(hwsq, 0x100710) & ~0x00000101;
189 ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000); 333 unk714 = ram_rd32(hwsq, 0x100714) & ~0xf0000020;
190#endif 334 unk718 = ram_rd32(hwsq, 0x100718) & ~0x00000100;
335 unk71c = ram_rd32(hwsq, 0x10071c) & ~0x00000100;
336
337 if ( next->bios.ramcfg_00_03_01)
338 unk71c |= 0x00000100;
339 if ( next->bios.ramcfg_00_03_02)
340 unk710 |= 0x00000100;
341 if (!next->bios.ramcfg_00_03_08) {
342 unk710 |= 0x1;
343 unk714 |= 0x20;
344 }
345 if ( next->bios.ramcfg_00_04_04)
346 unk714 |= 0x70000000;
347 if ( next->bios.ramcfg_00_04_20)
348 unk718 |= 0x00000100;
349
350 ram_mask(hwsq, 0x100714, 0xffffffff, unk714);
351 ram_mask(hwsq, 0x10071c, 0xffffffff, unk71c);
352 ram_mask(hwsq, 0x100718, 0xffffffff, unk718);
353 ram_mask(hwsq, 0x100710, 0xffffffff, unk710);
354
355 if (next->bios.rammap_00_16_20) {
356 ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 |
357 next->bios.ramcfg_00_06 << 8 |
358 next->bios.ramcfg_00_05);
359 ram_wr32(hwsq, 0x1005a4, next->bios.ramcfg_00_09 << 8 |
360 next->bios.ramcfg_00_08);
361 ram_mask(hwsq, 0x10053c, 0x00001000, 0x00000000);
362 } else {
363 ram_mask(hwsq, 0x10053c, 0x00001000, 0x00001000);
364 }
365 ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]);
191 366
192 ram_mask(hwsq, mr[0], 0x100, 0x100); 367 /* Reset DLL */
193 ram_mask(hwsq, mr[0], 0x100, 0x000); 368 if (!next->bios.ramcfg_DLLoff)
369 nvkm_sddr2_dll_reset(hwsq);
194 370
195 ram_setf(hwsq, 0x10, 0x01); /* enable fb */ 371 ram_setf(hwsq, 0x10, 0x01); /* enable fb */
196 ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */ 372 ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
197 ram_wr32(hwsq, 0x611200, 0x00003330); 373 ram_wr32(hwsq, 0x611200, 0x00003330);
198 ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */ 374 ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
375
376 if (next->bios.rammap_00_17_02)
377 ram_mask(hwsq, 0x100200, 0x00000800, 0x00000800);
378 if (!next->bios.rammap_00_16_40)
379 ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000);
380 if (next->bios.ramcfg_00_03_02)
381 ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000);
382
199 return 0; 383 return 0;
200} 384}
201 385
202static int 386static int
203nv50_ram_prog(struct nvkm_fb *pfb) 387nv50_ram_prog(struct nvkm_ram *base)
204{ 388{
205 struct nvkm_device *device = nv_device(pfb); 389 struct nv50_ram *ram = nv50_ram(base);
206 struct nv50_ram *ram = (void *)pfb->ram; 390 struct nvkm_device *device = ram->base.fb->subdev.device;
207 struct nv50_ramseq *hwsq = &ram->hwsq; 391 ram_exec(&ram->hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
208
209 ram_exec(hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
210 return 0; 392 return 0;
211} 393}
212 394
213static void 395static void
214nv50_ram_tidy(struct nvkm_fb *pfb) 396nv50_ram_tidy(struct nvkm_ram *base)
215{ 397{
216 struct nv50_ram *ram = (void *)pfb->ram; 398 struct nv50_ram *ram = nv50_ram(base);
217 struct nv50_ramseq *hwsq = &ram->hwsq; 399 ram_exec(&ram->hwsq, false);
218 ram_exec(hwsq, false);
219} 400}
220 401
221void 402void
222__nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem) 403__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
223{ 404{
224 struct nvkm_mm_node *this; 405 struct nvkm_mm_node *this;
225 406
@@ -227,14 +408,14 @@ __nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
227 this = list_first_entry(&mem->regions, typeof(*this), rl_entry); 408 this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
228 409
229 list_del(&this->rl_entry); 410 list_del(&this->rl_entry);
230 nvkm_mm_free(&pfb->vram, &this); 411 nvkm_mm_free(&ram->vram, &this);
231 } 412 }
232 413
233 nvkm_mm_free(&pfb->tags, &mem->tag); 414 nvkm_mm_free(&ram->tags, &mem->tag);
234} 415}
235 416
236void 417void
237nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem) 418nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
238{ 419{
239 struct nvkm_mem *mem = *pmem; 420 struct nvkm_mem *mem = *pmem;
240 421
@@ -242,19 +423,19 @@ nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
242 if (unlikely(mem == NULL)) 423 if (unlikely(mem == NULL))
243 return; 424 return;
244 425
245 mutex_lock(&pfb->base.mutex); 426 mutex_lock(&ram->fb->subdev.mutex);
246 __nv50_ram_put(pfb, mem); 427 __nv50_ram_put(ram, mem);
247 mutex_unlock(&pfb->base.mutex); 428 mutex_unlock(&ram->fb->subdev.mutex);
248 429
249 kfree(mem); 430 kfree(mem);
250} 431}
251 432
252int 433int
253nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin, 434nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
254 u32 memtype, struct nvkm_mem **pmem) 435 u32 memtype, struct nvkm_mem **pmem)
255{ 436{
256 struct nvkm_mm *heap = &pfb->vram; 437 struct nvkm_mm *heap = &ram->vram;
257 struct nvkm_mm *tags = &pfb->tags; 438 struct nvkm_mm *tags = &ram->tags;
258 struct nvkm_mm_node *r; 439 struct nvkm_mm_node *r;
259 struct nvkm_mem *mem; 440 struct nvkm_mem *mem;
260 int comp = (memtype & 0x300) >> 8; 441 int comp = (memtype & 0x300) >> 8;
@@ -262,17 +443,17 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
262 int back = (memtype & 0x800); 443 int back = (memtype & 0x800);
263 int min, max, ret; 444 int min, max, ret;
264 445
265 max = (size >> 12); 446 max = (size >> NVKM_RAM_MM_SHIFT);
266 min = ncmin ? (ncmin >> 12) : max; 447 min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
267 align >>= 12; 448 align >>= NVKM_RAM_MM_SHIFT;
268 449
269 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 450 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
270 if (!mem) 451 if (!mem)
271 return -ENOMEM; 452 return -ENOMEM;
272 453
273 mutex_lock(&pfb->base.mutex); 454 mutex_lock(&ram->fb->subdev.mutex);
274 if (comp) { 455 if (comp) {
275 if (align == 16) { 456 if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
276 int n = (max >> 4) * comp; 457 int n = (max >> 4) * comp;
277 458
278 ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag); 459 ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
@@ -295,34 +476,45 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
295 else 476 else
296 ret = nvkm_mm_head(heap, 0, type, max, min, align, &r); 477 ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
297 if (ret) { 478 if (ret) {
298 mutex_unlock(&pfb->base.mutex); 479 mutex_unlock(&ram->fb->subdev.mutex);
299 pfb->ram->put(pfb, &mem); 480 ram->func->put(ram, &mem);
300 return ret; 481 return ret;
301 } 482 }
302 483
303 list_add_tail(&r->rl_entry, &mem->regions); 484 list_add_tail(&r->rl_entry, &mem->regions);
304 max -= r->length; 485 max -= r->length;
305 } while (max); 486 } while (max);
306 mutex_unlock(&pfb->base.mutex); 487 mutex_unlock(&ram->fb->subdev.mutex);
307 488
308 r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); 489 r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
309 mem->offset = (u64)r->offset << 12; 490 mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
310 *pmem = mem; 491 *pmem = mem;
311 return 0; 492 return 0;
312} 493}
313 494
495static const struct nvkm_ram_func
496nv50_ram_func = {
497 .get = nv50_ram_get,
498 .put = nv50_ram_put,
499 .calc = nv50_ram_calc,
500 .prog = nv50_ram_prog,
501 .tidy = nv50_ram_tidy,
502};
503
314static u32 504static u32
315nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram) 505nv50_fb_vram_rblock(struct nvkm_ram *ram)
316{ 506{
507 struct nvkm_subdev *subdev = &ram->fb->subdev;
508 struct nvkm_device *device = subdev->device;
317 int colbits, rowbitsa, rowbitsb, banks; 509 int colbits, rowbitsa, rowbitsb, banks;
318 u64 rowsize, predicted; 510 u64 rowsize, predicted;
319 u32 r0, r4, rt, rblock_size; 511 u32 r0, r4, rt, rblock_size;
320 512
321 r0 = nv_rd32(pfb, 0x100200); 513 r0 = nvkm_rd32(device, 0x100200);
322 r4 = nv_rd32(pfb, 0x100204); 514 r4 = nvkm_rd32(device, 0x100204);
323 rt = nv_rd32(pfb, 0x100250); 515 rt = nvkm_rd32(device, 0x100250);
324 nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", 516 nvkm_debug(subdev, "memcfg %08x %08x %08x %08x\n",
325 r0, r4, rt, nv_rd32(pfb, 0x001540)); 517 r0, r4, rt, nvkm_rd32(device, 0x001540));
326 518
327 colbits = (r4 & 0x0000f000) >> 12; 519 colbits = (r4 & 0x0000f000) >> 12;
328 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 520 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
@@ -335,103 +527,94 @@ nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
335 predicted += rowsize << rowbitsb; 527 predicted += rowsize << rowbitsb;
336 528
337 if (predicted != ram->size) { 529 if (predicted != ram->size) {
338 nv_warn(pfb, "memory controller reports %d MiB VRAM\n", 530 nvkm_warn(subdev, "memory controller reports %d MiB VRAM\n",
339 (u32)(ram->size >> 20)); 531 (u32)(ram->size >> 20));
340 } 532 }
341 533
342 rblock_size = rowsize; 534 rblock_size = rowsize;
343 if (rt & 1) 535 if (rt & 1)
344 rblock_size *= 3; 536 rblock_size *= 3;
345 537
346 nv_debug(pfb, "rblock %d bytes\n", rblock_size); 538 nvkm_debug(subdev, "rblock %d bytes\n", rblock_size);
347 return rblock_size; 539 return rblock_size;
348} 540}
349 541
350int 542int
351nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine, 543nv50_ram_ctor(const struct nvkm_ram_func *func,
352 struct nvkm_oclass *oclass, int length, void **pobject) 544 struct nvkm_fb *fb, struct nvkm_ram *ram)
353{ 545{
354 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 546 struct nvkm_device *device = fb->subdev.device;
355 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 547 struct nvkm_bios *bios = device->bios;
356 struct nvkm_bios *bios = nvkm_bios(parent); 548 const u32 rsvd_head = ( 256 * 1024); /* vga memory */
357 struct nvkm_fb *pfb = nvkm_fb(parent); 549 const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
358 struct nvkm_ram *ram; 550 u64 size = nvkm_rd32(device, 0x10020c);
551 u32 tags = nvkm_rd32(device, 0x100320);
552 enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
359 int ret; 553 int ret;
360 554
361 ret = nvkm_ram_create_(parent, engine, oclass, length, pobject); 555 switch (nvkm_rd32(device, 0x100714) & 0x00000007) {
362 ram = *pobject; 556 case 0: type = NVKM_RAM_TYPE_DDR1; break;
363 if (ret)
364 return ret;
365
366 ram->size = nv_rd32(pfb, 0x10020c);
367 ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
368
369 ram->part_mask = (nv_rd32(pfb, 0x001540) & 0x00ff0000) >> 16;
370 ram->parts = hweight8(ram->part_mask);
371
372 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
373 case 0: ram->type = NV_MEM_TYPE_DDR1; break;
374 case 1: 557 case 1:
375 if (nvkm_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3) 558 if (nvkm_fb_bios_memtype(bios) == NVKM_RAM_TYPE_DDR3)
376 ram->type = NV_MEM_TYPE_DDR3; 559 type = NVKM_RAM_TYPE_DDR3;
377 else 560 else
378 ram->type = NV_MEM_TYPE_DDR2; 561 type = NVKM_RAM_TYPE_DDR2;
379 break; 562 break;
380 case 2: ram->type = NV_MEM_TYPE_GDDR3; break; 563 case 2: type = NVKM_RAM_TYPE_GDDR3; break;
381 case 3: ram->type = NV_MEM_TYPE_GDDR4; break; 564 case 3: type = NVKM_RAM_TYPE_GDDR4; break;
382 case 4: ram->type = NV_MEM_TYPE_GDDR5; break; 565 case 4: type = NVKM_RAM_TYPE_GDDR5; break;
383 default: 566 default:
384 break; 567 break;
385 } 568 }
386 569
387 ret = nvkm_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) - 570 size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
388 (rsvd_head + rsvd_tail), 571
389 nv50_fb_vram_rblock(pfb, ram) >> 12); 572 ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
390 if (ret) 573 if (ret)
391 return ret; 574 return ret;
392 575
393 ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; 576 ram->part_mask = (nvkm_rd32(device, 0x001540) & 0x00ff0000) >> 16;
394 ram->tags = nv_rd32(pfb, 0x100320); 577 ram->parts = hweight8(ram->part_mask);
395 ram->get = nv50_ram_get; 578 ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
396 ram->put = nv50_ram_put; 579 nvkm_mm_fini(&ram->vram);
397 return 0; 580
581 return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
582 (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
583 nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
398} 584}
399 585
400static int 586int
401nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 587nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
402 struct nvkm_oclass *oclass, void *data, u32 datasize,
403 struct nvkm_object **pobject)
404{ 588{
405 struct nv50_ram *ram; 589 struct nv50_ram *ram;
406 int ret, i; 590 int ret, i;
407 591
408 ret = nv50_ram_create(parent, engine, oclass, &ram); 592 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
409 *pobject = nv_object(ram); 593 return -ENOMEM;
594 *pram = &ram->base;
595
596 ret = nv50_ram_ctor(&nv50_ram_func, fb, &ram->base);
410 if (ret) 597 if (ret)
411 return ret; 598 return ret;
412 599
413 switch (ram->base.type) {
414 case NV_MEM_TYPE_DDR2:
415 case NV_MEM_TYPE_GDDR3:
416 ram->base.calc = nv50_ram_calc;
417 ram->base.prog = nv50_ram_prog;
418 ram->base.tidy = nv50_ram_tidy;
419 break;
420 default:
421 nv_warn(ram, "reclocking of this ram type unsupported\n");
422 return 0;
423 }
424
425 ram->hwsq.r_0x002504 = hwsq_reg(0x002504); 600 ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
426 ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040); 601 ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
427 ram->hwsq.r_0x004008 = hwsq_reg(0x004008); 602 ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
428 ram->hwsq.r_0x00400c = hwsq_reg(0x00400c); 603 ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
604 ram->hwsq.r_0x100200 = hwsq_reg(0x100200);
429 ram->hwsq.r_0x100210 = hwsq_reg(0x100210); 605 ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
606 ram->hwsq.r_0x10021c = hwsq_reg(0x10021c);
430 ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0); 607 ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
431 ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4); 608 ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
432 ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc); 609 ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
433 for (i = 0; i < 8; i++) 610 ram->hwsq.r_0x10053c = hwsq_reg(0x10053c);
434 ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04)); 611 ram->hwsq.r_0x1005a0 = hwsq_reg(0x1005a0);
612 ram->hwsq.r_0x1005a4 = hwsq_reg(0x1005a4);
613 ram->hwsq.r_0x100710 = hwsq_reg(0x100710);
614 ram->hwsq.r_0x100714 = hwsq_reg(0x100714);
615 ram->hwsq.r_0x100718 = hwsq_reg(0x100718);
616 ram->hwsq.r_0x10071c = hwsq_reg(0x10071c);
617 ram->hwsq.r_0x100da0 = hwsq_stride(0x100da0, 4, ram->base.part_mask);
435 ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20); 618 ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
436 ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24); 619 ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
437 ram->hwsq.r_0x611200 = hwsq_reg(0x611200); 620 ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
@@ -453,13 +636,3 @@ nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
453 636
454 return 0; 637 return 0;
455} 638}
456
457struct nvkm_oclass
458nv50_ram_oclass = {
459 .ofuncs = &(struct nvkm_ofuncs) {
460 .ctor = nv50_ram_ctor,
461 .dtor = _nvkm_ram_dtor,
462 .init = _nvkm_ram_init,
463 .fini = _nvkm_ram_fini,
464 }
465};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index afab42df28d4..86bf67456b14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -65,7 +65,7 @@ nvkm_sddr2_calc(struct nvkm_ram *ram)
65 case 0x10: 65 case 0x10:
66 CL = ram->next->bios.timing_10_CL; 66 CL = ram->next->bios.timing_10_CL;
67 WR = ram->next->bios.timing_10_WR; 67 WR = ram->next->bios.timing_10_WR;
68 DLL = !ram->next->bios.ramcfg_10_DLLoff; 68 DLL = !ram->next->bios.ramcfg_DLLoff;
69 ODT = ram->next->bios.timing_10_ODT & 3; 69 ODT = ram->next->bios.timing_10_ODT & 3;
70 break; 70 break;
71 case 0x20: 71 case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 10844355c3f3..b4edc97dc8c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -53,7 +53,7 @@ static const struct ramxlat
53ramddr3_wr[] = { 53ramddr3_wr[] = {
54 { 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 }, 54 { 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
55 /* the below are mentioned in some, but not all, ddr3 docs */ 55 /* the below are mentioned in some, but not all, ddr3 docs */
56 { 14, 7 }, { 16, 0 }, 56 { 14, 7 }, { 15, 7 }, { 16, 0 },
57 { -1 } 57 { -1 }
58}; 58};
59 59
@@ -61,7 +61,7 @@ static const struct ramxlat
61ramddr3_cwl[] = { 61ramddr3_cwl[] = {
62 { 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 }, 62 { 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
63 /* the below are mentioned in some, but not all, ddr3 docs */ 63 /* the below are mentioned in some, but not all, ddr3 docs */
64 { 9, 4 }, 64 { 9, 4 }, { 10, 5 },
65 { -1 } 65 { -1 }
66}; 66};
67 67
@@ -79,7 +79,7 @@ nvkm_sddr3_calc(struct nvkm_ram *ram)
79 CWL = ram->next->bios.timing_10_CWL; 79 CWL = ram->next->bios.timing_10_CWL;
80 CL = ram->next->bios.timing_10_CL; 80 CL = ram->next->bios.timing_10_CL;
81 WR = ram->next->bios.timing_10_WR; 81 WR = ram->next->bios.timing_10_WR;
82 DLL = !ram->next->bios.ramcfg_10_DLLoff; 82 DLL = !ram->next->bios.ramcfg_DLLoff;
83 ODT = ram->next->bios.timing_10_ODT; 83 ODT = ram->next->bios.timing_10_ODT;
84 break; 84 break;
85 case 0x20: 85 case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index b7b7193bbce7..f4144979a79c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -21,31 +21,34 @@
21 * 21 *
22 * Authors: Martin Peres 22 * Authors: Martin Peres
23 */ 23 */
24#include <subdev/fuse.h> 24#include "priv.h"
25 25
26int 26u32
27_nvkm_fuse_init(struct nvkm_object *object) 27nvkm_fuse_read(struct nvkm_fuse *fuse, u32 addr)
28{ 28{
29 struct nvkm_fuse *fuse = (void *)object; 29 return fuse->func->read(fuse, addr);
30 return nvkm_subdev_init(&fuse->base);
31} 30}
32 31
33void 32static void *
34_nvkm_fuse_dtor(struct nvkm_object *object) 33nvkm_fuse_dtor(struct nvkm_subdev *subdev)
35{ 34{
36 struct nvkm_fuse *fuse = (void *)object; 35 return nvkm_fuse(subdev);
37 nvkm_subdev_destroy(&fuse->base);
38} 36}
39 37
38static const struct nvkm_subdev_func
39nvkm_fuse = {
40 .dtor = nvkm_fuse_dtor,
41};
42
40int 43int
41nvkm_fuse_create_(struct nvkm_object *parent, struct nvkm_object *engine, 44nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
42 struct nvkm_oclass *oclass, int length, void **pobject) 45 int index, struct nvkm_fuse **pfuse)
43{ 46{
44 struct nvkm_fuse *fuse; 47 struct nvkm_fuse *fuse;
45 int ret; 48 if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
46 49 return -ENOMEM;
47 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "FUSE", 50 nvkm_subdev_ctor(&nvkm_fuse, device, index, 0, &fuse->subdev);
48 "fuse", length, pobject); 51 fuse->func = func;
49 fuse = *pobject; 52 spin_lock_init(&fuse->lock);
50 return ret; 53 return 0;
51} 54}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 393ef3a0faaf..13671fedc805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -23,56 +23,31 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26struct gf100_fuse_priv {
27 struct nvkm_fuse base;
28
29 spinlock_t fuse_enable_lock;
30};
31
32static u32 26static u32
33gf100_fuse_rd32(struct nvkm_object *object, u64 addr) 27gf100_fuse_read(struct nvkm_fuse *fuse, u32 addr)
34{ 28{
35 struct gf100_fuse_priv *priv = (void *)object; 29 struct nvkm_device *device = fuse->subdev.device;
36 unsigned long flags; 30 unsigned long flags;
37 u32 fuse_enable, unk, val; 31 u32 fuse_enable, unk, val;
38 32
39 /* racy if another part of nvkm start writing to these regs */ 33 /* racy if another part of nvkm start writing to these regs */
40 spin_lock_irqsave(&priv->fuse_enable_lock, flags); 34 spin_lock_irqsave(&fuse->lock, flags);
41 fuse_enable = nv_mask(priv, 0x22400, 0x800, 0x800); 35 fuse_enable = nvkm_mask(device, 0x022400, 0x800, 0x800);
42 unk = nv_mask(priv, 0x21000, 0x1, 0x1); 36 unk = nvkm_mask(device, 0x021000, 0x1, 0x1);
43 val = nv_rd32(priv, 0x21100 + addr); 37 val = nvkm_rd32(device, 0x021100 + addr);
44 nv_wr32(priv, 0x21000, unk); 38 nvkm_wr32(device, 0x021000, unk);
45 nv_wr32(priv, 0x22400, fuse_enable); 39 nvkm_wr32(device, 0x022400, fuse_enable);
46 spin_unlock_irqrestore(&priv->fuse_enable_lock, flags); 40 spin_unlock_irqrestore(&fuse->lock, flags);
47 return val; 41 return val;
48} 42}
49 43
44static const struct nvkm_fuse_func
45gf100_fuse = {
46 .read = gf100_fuse_read,
47};
50 48
51static int 49int
52gf100_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 50gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
53 struct nvkm_oclass *oclass, void *data, u32 size,
54 struct nvkm_object **pobject)
55{ 51{
56 struct gf100_fuse_priv *priv; 52 return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse);
57 int ret;
58
59 ret = nvkm_fuse_create(parent, engine, oclass, &priv);
60 *pobject = nv_object(priv);
61 if (ret)
62 return ret;
63
64 spin_lock_init(&priv->fuse_enable_lock);
65 return 0;
66} 53}
67
68struct nvkm_oclass
69gf100_fuse_oclass = {
70 .handle = NV_SUBDEV(FUSE, 0xC0),
71 .ofuncs = &(struct nvkm_ofuncs) {
72 .ctor = gf100_fuse_ctor,
73 .dtor = _nvkm_fuse_dtor,
74 .init = _nvkm_fuse_init,
75 .fini = _nvkm_fuse_fini,
76 .rd32 = gf100_fuse_rd32,
77 },
78};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 0b256aa4960f..9aff4ea04506 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -23,40 +23,20 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26struct gm107_fuse_priv {
27 struct nvkm_fuse base;
28};
29
30static u32 26static u32
31gm107_fuse_rd32(struct nvkm_object *object, u64 addr) 27gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
32{ 28{
33 struct gf100_fuse_priv *priv = (void *)object; 29 struct nvkm_device *device = fuse->subdev.device;
34 return nv_rd32(priv, 0x21100 + addr); 30 return nvkm_rd32(device, 0x021100 + addr);
35} 31}
36 32
33static const struct nvkm_fuse_func
34gm107_fuse = {
35 .read = gm107_fuse_read,
36};
37 37
38static int 38int
39gm107_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 39gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
40 struct nvkm_oclass *oclass, void *data, u32 size,
41 struct nvkm_object **pobject)
42{ 40{
43 struct gm107_fuse_priv *priv; 41 return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse);
44 int ret;
45
46 ret = nvkm_fuse_create(parent, engine, oclass, &priv);
47 *pobject = nv_object(priv);
48
49 return ret;
50} 42}
51
52struct nvkm_oclass
53gm107_fuse_oclass = {
54 .handle = NV_SUBDEV(FUSE, 0x117),
55 .ofuncs = &(struct nvkm_ofuncs) {
56 .ctor = gm107_fuse_ctor,
57 .dtor = _nvkm_fuse_dtor,
58 .init = _nvkm_fuse_init,
59 .fini = _nvkm_fuse_fini,
60 .rd32 = gm107_fuse_rd32,
61 },
62};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index 0d2afc426100..514c193db25d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -23,54 +23,29 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26struct nv50_fuse_priv {
27 struct nvkm_fuse base;
28
29 spinlock_t fuse_enable_lock;
30};
31
32static u32 26static u32
33nv50_fuse_rd32(struct nvkm_object *object, u64 addr) 27nv50_fuse_read(struct nvkm_fuse *fuse, u32 addr)
34{ 28{
35 struct nv50_fuse_priv *priv = (void *)object; 29 struct nvkm_device *device = fuse->subdev.device;
36 unsigned long flags; 30 unsigned long flags;
37 u32 fuse_enable, val; 31 u32 fuse_enable, val;
38 32
39 /* racy if another part of nvkm start writing to this reg */ 33 /* racy if another part of nvkm start writing to this reg */
40 spin_lock_irqsave(&priv->fuse_enable_lock, flags); 34 spin_lock_irqsave(&fuse->lock, flags);
41 fuse_enable = nv_mask(priv, 0x1084, 0x800, 0x800); 35 fuse_enable = nvkm_mask(device, 0x001084, 0x800, 0x800);
42 val = nv_rd32(priv, 0x21000 + addr); 36 val = nvkm_rd32(device, 0x021000 + addr);
43 nv_wr32(priv, 0x1084, fuse_enable); 37 nvkm_wr32(device, 0x001084, fuse_enable);
44 spin_unlock_irqrestore(&priv->fuse_enable_lock, flags); 38 spin_unlock_irqrestore(&fuse->lock, flags);
45 return val; 39 return val;
46} 40}
47 41
42static const struct nvkm_fuse_func
43nv50_fuse = {
44 .read = &nv50_fuse_read,
45};
48 46
49static int 47int
50nv50_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 48nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
51 struct nvkm_oclass *oclass, void *data, u32 size,
52 struct nvkm_object **pobject)
53{ 49{
54 struct nv50_fuse_priv *priv; 50 return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse);
55 int ret;
56
57 ret = nvkm_fuse_create(parent, engine, oclass, &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 spin_lock_init(&priv->fuse_enable_lock);
63 return 0;
64} 51}
65
66struct nvkm_oclass
67nv50_fuse_oclass = {
68 .handle = NV_SUBDEV(FUSE, 0x50),
69 .ofuncs = &(struct nvkm_ofuncs) {
70 .ctor = nv50_fuse_ctor,
71 .dtor = _nvkm_fuse_dtor,
72 .init = _nvkm_fuse_init,
73 .fini = _nvkm_fuse_fini,
74 .rd32 = nv50_fuse_rd32,
75 },
76};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 7e050f789384..b0390b540ef5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -1,7 +1,12 @@
1#ifndef __NVKM_FUSE_PRIV_H__ 1#ifndef __NVKM_FUSE_PRIV_H__
2#define __NVKM_FUSE_PRIV_H__ 2#define __NVKM_FUSE_PRIV_H__
3#define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
3#include <subdev/fuse.h> 4#include <subdev/fuse.h>
4 5
5int _nvkm_fuse_init(struct nvkm_object *object); 6struct nvkm_fuse_func {
6void _nvkm_fuse_dtor(struct nvkm_object *object); 7 u32 (*read)(struct nvkm_fuse *, u32 addr);
8};
9
10int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *,
11 int index, struct nvkm_fuse **);
7#endif 12#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index ea42a9ed1821..e52c5e87f242 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -2,5 +2,5 @@ nvkm-y += nvkm/subdev/gpio/base.o
2nvkm-y += nvkm/subdev/gpio/nv10.o 2nvkm-y += nvkm/subdev/gpio/nv10.o
3nvkm-y += nvkm/subdev/gpio/nv50.o 3nvkm-y += nvkm/subdev/gpio/nv50.o
4nvkm-y += nvkm/subdev/gpio/g94.o 4nvkm-y += nvkm/subdev/gpio/g94.o
5nvkm-y += nvkm/subdev/gpio/gf110.o 5nvkm-y += nvkm/subdev/gpio/gf119.o
6nvkm-y += nvkm/subdev/gpio/gk104.o 6nvkm-y += nvkm/subdev/gpio/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index dea58161ba46..d45ec99f0e38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -23,28 +23,33 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/device.h>
27#include <core/notify.h> 26#include <core/notify.h>
28 27
29static int 28static int
30nvkm_gpio_drive(struct nvkm_gpio *gpio, int idx, int line, int dir, int out) 29nvkm_gpio_drive(struct nvkm_gpio *gpio, int idx, int line, int dir, int out)
31{ 30{
32 const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass; 31 return gpio->func->drive(gpio, line, dir, out);
33 return impl->drive ? impl->drive(gpio, line, dir, out) : -ENODEV;
34} 32}
35 33
36static int 34static int
37nvkm_gpio_sense(struct nvkm_gpio *gpio, int idx, int line) 35nvkm_gpio_sense(struct nvkm_gpio *gpio, int idx, int line)
38{ 36{
39 const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass; 37 return gpio->func->sense(gpio, line);
40 return impl->sense ? impl->sense(gpio, line) : -ENODEV;
41} 38}
42 39
43static int 40void
41nvkm_gpio_reset(struct nvkm_gpio *gpio, u8 func)
42{
43 if (gpio->func->reset)
44 gpio->func->reset(gpio, func);
45}
46
47int
44nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, 48nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
45 struct dcb_gpio_func *func) 49 struct dcb_gpio_func *func)
46{ 50{
47 struct nvkm_bios *bios = nvkm_bios(gpio); 51 struct nvkm_device *device = gpio->subdev.device;
52 struct nvkm_bios *bios = device->bios;
48 u8 ver, len; 53 u8 ver, len;
49 u16 data; 54 u16 data;
50 55
@@ -56,11 +61,11 @@ nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
56 return 0; 61 return 0;
57 62
58 /* Apple iMac G4 NV18 */ 63 /* Apple iMac G4 NV18 */
59 if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) { 64 if (device->quirk && device->quirk->tv_gpio) {
60 if (tag == DCB_GPIO_TVDAC0) { 65 if (tag == DCB_GPIO_TVDAC0) {
61 *func = (struct dcb_gpio_func) { 66 *func = (struct dcb_gpio_func) {
62 .func = DCB_GPIO_TVDAC0, 67 .func = DCB_GPIO_TVDAC0,
63 .line = 4, 68 .line = device->quirk->tv_gpio,
64 .log[0] = 0, 69 .log[0] = 0,
65 .log[1] = 1, 70 .log[1] = 1,
66 }; 71 };
@@ -71,7 +76,7 @@ nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
71 return -ENOENT; 76 return -ENOENT;
72} 77}
73 78
74static int 79int
75nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state) 80nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
76{ 81{
77 struct dcb_gpio_func func; 82 struct dcb_gpio_func func;
@@ -87,7 +92,7 @@ nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
87 return ret; 92 return ret;
88} 93}
89 94
90static int 95int
91nvkm_gpio_get(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line) 96nvkm_gpio_get(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line)
92{ 97{
93 struct dcb_gpio_func func; 98 struct dcb_gpio_func func;
@@ -107,16 +112,14 @@ static void
107nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index) 112nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index)
108{ 113{
109 struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event); 114 struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
110 const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass; 115 gpio->func->intr_mask(gpio, type, 1 << index, 0);
111 impl->intr_mask(gpio, type, 1 << index, 0);
112} 116}
113 117
114static void 118static void
115nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index) 119nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index)
116{ 120{
117 struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event); 121 struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
118 const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass; 122 gpio->func->intr_mask(gpio, type, 1 << index, 1 << index);
119 impl->intr_mask(gpio, type, 1 << index, 1 << index);
120} 123}
121 124
122static int 125static int
@@ -133,16 +136,22 @@ nvkm_gpio_intr_ctor(struct nvkm_object *object, void *data, u32 size,
133 return -EINVAL; 136 return -EINVAL;
134} 137}
135 138
139static const struct nvkm_event_func
140nvkm_gpio_intr_func = {
141 .ctor = nvkm_gpio_intr_ctor,
142 .init = nvkm_gpio_intr_init,
143 .fini = nvkm_gpio_intr_fini,
144};
145
136static void 146static void
137nvkm_gpio_intr(struct nvkm_subdev *subdev) 147nvkm_gpio_intr(struct nvkm_subdev *subdev)
138{ 148{
139 struct nvkm_gpio *gpio = nvkm_gpio(subdev); 149 struct nvkm_gpio *gpio = nvkm_gpio(subdev);
140 const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
141 u32 hi, lo, i; 150 u32 hi, lo, i;
142 151
143 impl->intr_stat(gpio, &hi, &lo); 152 gpio->func->intr_stat(gpio, &hi, &lo);
144 153
145 for (i = 0; (hi | lo) && i < impl->lines; i++) { 154 for (i = 0; (hi | lo) && i < gpio->func->lines; i++) {
146 struct nvkm_gpio_ntfy_rep rep = { 155 struct nvkm_gpio_ntfy_rep rep = {
147 .mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) | 156 .mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
148 (NVKM_GPIO_LO * !!(lo & (1 << i))), 157 (NVKM_GPIO_LO * !!(lo & (1 << i))),
@@ -151,24 +160,15 @@ nvkm_gpio_intr(struct nvkm_subdev *subdev)
151 } 160 }
152} 161}
153 162
154static const struct nvkm_event_func 163static int
155nvkm_gpio_intr_func = { 164nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
156 .ctor = nvkm_gpio_intr_ctor,
157 .init = nvkm_gpio_intr_init,
158 .fini = nvkm_gpio_intr_fini,
159};
160
161int
162_nvkm_gpio_fini(struct nvkm_object *object, bool suspend)
163{ 165{
164 const struct nvkm_gpio_impl *impl = (void *)object->oclass; 166 struct nvkm_gpio *gpio = nvkm_gpio(subdev);
165 struct nvkm_gpio *gpio = nvkm_gpio(object); 167 u32 mask = (1 << gpio->func->lines) - 1;
166 u32 mask = (1 << impl->lines) - 1;
167
168 impl->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
169 impl->intr_stat(gpio, &mask, &mask);
170 168
171 return nvkm_subdev_fini(&gpio->base, suspend); 169 gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
170 gpio->func->intr_stat(gpio, &mask, &mask);
171 return 0;
172} 172}
173 173
174static struct dmi_system_id gpio_reset_ids[] = { 174static struct dmi_system_id gpio_reset_ids[] = {
@@ -182,70 +182,43 @@ static struct dmi_system_id gpio_reset_ids[] = {
182 { } 182 { }
183}; 183};
184 184
185int 185static int
186_nvkm_gpio_init(struct nvkm_object *object) 186nvkm_gpio_init(struct nvkm_subdev *subdev)
187{ 187{
188 struct nvkm_gpio *gpio = nvkm_gpio(object); 188 struct nvkm_gpio *gpio = nvkm_gpio(subdev);
189 int ret; 189 if (dmi_check_system(gpio_reset_ids))
190 190 nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
191 ret = nvkm_subdev_init(&gpio->base); 191 return 0;
192 if (ret)
193 return ret;
194
195 if (gpio->reset && dmi_check_system(gpio_reset_ids))
196 gpio->reset(gpio, DCB_GPIO_UNUSED);
197
198 return ret;
199} 192}
200 193
201void 194static void *
202_nvkm_gpio_dtor(struct nvkm_object *object) 195nvkm_gpio_dtor(struct nvkm_subdev *subdev)
203{ 196{
204 struct nvkm_gpio *gpio = (void *)object; 197 struct nvkm_gpio *gpio = nvkm_gpio(subdev);
205 nvkm_event_fini(&gpio->event); 198 nvkm_event_fini(&gpio->event);
206 nvkm_subdev_destroy(&gpio->base); 199 return gpio;
207} 200}
208 201
209int 202static const struct nvkm_subdev_func
210nvkm_gpio_create_(struct nvkm_object *parent, struct nvkm_object *engine, 203nvkm_gpio = {
211 struct nvkm_oclass *oclass, int length, void **pobject) 204 .dtor = nvkm_gpio_dtor,
212{ 205 .init = nvkm_gpio_init,
213 const struct nvkm_gpio_impl *impl = (void *)oclass; 206 .fini = nvkm_gpio_fini,
214 struct nvkm_gpio *gpio; 207 .intr = nvkm_gpio_intr,
215 int ret; 208};
216
217 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "GPIO",
218 "gpio", length, pobject);
219 gpio = *pobject;
220 if (ret)
221 return ret;
222
223 gpio->find = nvkm_gpio_find;
224 gpio->set = nvkm_gpio_set;
225 gpio->get = nvkm_gpio_get;
226 gpio->reset = impl->reset;
227
228 ret = nvkm_event_init(&nvkm_gpio_intr_func, 2, impl->lines,
229 &gpio->event);
230 if (ret)
231 return ret;
232
233 nv_subdev(gpio)->intr = nvkm_gpio_intr;
234 return 0;
235}
236 209
237int 210int
238_nvkm_gpio_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 211nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
239 struct nvkm_oclass *oclass, void *data, u32 size, 212 int index, struct nvkm_gpio **pgpio)
240 struct nvkm_object **pobject)
241{ 213{
242 struct nvkm_gpio *gpio; 214 struct nvkm_gpio *gpio;
243 int ret;
244 215
245 ret = nvkm_gpio_create(parent, engine, oclass, &gpio); 216 if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
246 *pobject = nv_object(gpio); 217 return -ENOMEM;
247 if (ret)
248 return ret;
249 218
250 return 0; 219 nvkm_subdev_ctor(&nvkm_gpio, device, index, 0, &gpio->subdev);
220 gpio->func = func;
221
222 return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
223 &gpio->event);
251} 224}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index 12b3e01fca8e..6dcda55fb865 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -26,21 +26,23 @@
26void 26void
27g94_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo) 27g94_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
28{ 28{
29 u32 intr0 = nv_rd32(gpio, 0x00e054); 29 struct nvkm_device *device = gpio->subdev.device;
30 u32 intr1 = nv_rd32(gpio, 0x00e074); 30 u32 intr0 = nvkm_rd32(device, 0x00e054);
31 u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0; 31 u32 intr1 = nvkm_rd32(device, 0x00e074);
32 u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1; 32 u32 stat0 = nvkm_rd32(device, 0x00e050) & intr0;
33 u32 stat1 = nvkm_rd32(device, 0x00e070) & intr1;
33 *lo = (stat1 & 0xffff0000) | (stat0 >> 16); 34 *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
34 *hi = (stat1 << 16) | (stat0 & 0x0000ffff); 35 *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
35 nv_wr32(gpio, 0x00e054, intr0); 36 nvkm_wr32(device, 0x00e054, intr0);
36 nv_wr32(gpio, 0x00e074, intr1); 37 nvkm_wr32(device, 0x00e074, intr1);
37} 38}
38 39
39void 40void
40g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data) 41g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
41{ 42{
42 u32 inte0 = nv_rd32(gpio, 0x00e050); 43 struct nvkm_device *device = gpio->subdev.device;
43 u32 inte1 = nv_rd32(gpio, 0x00e070); 44 u32 inte0 = nvkm_rd32(device, 0x00e050);
45 u32 inte1 = nvkm_rd32(device, 0x00e070);
44 if (type & NVKM_GPIO_LO) 46 if (type & NVKM_GPIO_LO)
45 inte0 = (inte0 & ~(mask << 16)) | (data << 16); 47 inte0 = (inte0 & ~(mask << 16)) | (data << 16);
46 if (type & NVKM_GPIO_HI) 48 if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@ g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
51 inte1 = (inte1 & ~(mask << 16)) | (data << 16); 53 inte1 = (inte1 & ~(mask << 16)) | (data << 16);
52 if (type & NVKM_GPIO_HI) 54 if (type & NVKM_GPIO_HI)
53 inte1 = (inte1 & ~mask) | data; 55 inte1 = (inte1 & ~mask) | data;
54 nv_wr32(gpio, 0x00e050, inte0); 56 nvkm_wr32(device, 0x00e050, inte0);
55 nv_wr32(gpio, 0x00e070, inte1); 57 nvkm_wr32(device, 0x00e070, inte1);
56} 58}
57 59
58struct nvkm_oclass * 60static const struct nvkm_gpio_func
59g94_gpio_oclass = &(struct nvkm_gpio_impl) { 61g94_gpio = {
60 .base.handle = NV_SUBDEV(GPIO, 0x94),
61 .base.ofuncs = &(struct nvkm_ofuncs) {
62 .ctor = _nvkm_gpio_ctor,
63 .dtor = _nvkm_gpio_dtor,
64 .init = _nvkm_gpio_init,
65 .fini = _nvkm_gpio_fini,
66 },
67 .lines = 32, 62 .lines = 32,
68 .intr_stat = g94_gpio_intr_stat, 63 .intr_stat = g94_gpio_intr_stat,
69 .intr_mask = g94_gpio_intr_mask, 64 .intr_mask = g94_gpio_intr_mask,
70 .drive = nv50_gpio_drive, 65 .drive = nv50_gpio_drive,
71 .sense = nv50_gpio_sense, 66 .sense = nv50_gpio_sense,
72 .reset = nv50_gpio_reset, 67 .reset = nv50_gpio_reset,
73}.base; 68};
69
70int
71g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
72{
73 return nvkm_gpio_new_(&g94_gpio, device, index, pgpio);
74}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
index 2c3bb255d1f8..bb7400dfaef8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
@@ -24,15 +24,16 @@
24#include "priv.h" 24#include "priv.h"
25 25
26void 26void
27gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match) 27gf119_gpio_reset(struct nvkm_gpio *gpio, u8 match)
28{ 28{
29 struct nvkm_bios *bios = nvkm_bios(gpio); 29 struct nvkm_device *device = gpio->subdev.device;
30 struct nvkm_bios *bios = device->bios;
30 u8 ver, len; 31 u8 ver, len;
31 u16 entry; 32 u16 entry;
32 int ent = -1; 33 int ent = -1;
33 34
34 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { 35 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
35 u32 data = nv_ro32(bios, entry); 36 u32 data = nvbios_rd32(bios, entry);
36 u8 line = (data & 0x0000003f); 37 u8 line = (data & 0x0000003f);
37 u8 defs = !!(data & 0x00000080); 38 u8 defs = !!(data & 0x00000080);
38 u8 func = (data & 0x0000ff00) >> 8; 39 u8 func = (data & 0x0000ff00) >> 8;
@@ -43,42 +44,43 @@ gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match)
43 (match != DCB_GPIO_UNUSED && match != func)) 44 (match != DCB_GPIO_UNUSED && match != func))
44 continue; 45 continue;
45 46
46 gpio->set(gpio, 0, func, line, defs); 47 nvkm_gpio_set(gpio, 0, func, line, defs);
47 48
48 nv_mask(gpio, 0x00d610 + (line * 4), 0xff, unk0); 49 nvkm_mask(device, 0x00d610 + (line * 4), 0xff, unk0);
49 if (unk1--) 50 if (unk1--)
50 nv_mask(gpio, 0x00d740 + (unk1 * 4), 0xff, line); 51 nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
51 } 52 }
52} 53}
53 54
54int 55int
55gf110_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out) 56gf119_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
56{ 57{
58 struct nvkm_device *device = gpio->subdev.device;
57 u32 data = ((dir ^ 1) << 13) | (out << 12); 59 u32 data = ((dir ^ 1) << 13) | (out << 12);
58 nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data); 60 nvkm_mask(device, 0x00d610 + (line * 4), 0x00003000, data);
59 nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */ 61 nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
60 return 0; 62 return 0;
61} 63}
62 64
63int 65int
64gf110_gpio_sense(struct nvkm_gpio *gpio, int line) 66gf119_gpio_sense(struct nvkm_gpio *gpio, int line)
65{ 67{
66 return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000); 68 struct nvkm_device *device = gpio->subdev.device;
69 return !!(nvkm_rd32(device, 0x00d610 + (line * 4)) & 0x00004000);
67} 70}
68 71
69struct nvkm_oclass * 72static const struct nvkm_gpio_func
70gf110_gpio_oclass = &(struct nvkm_gpio_impl) { 73gf119_gpio = {
71 .base.handle = NV_SUBDEV(GPIO, 0xd0),
72 .base.ofuncs = &(struct nvkm_ofuncs) {
73 .ctor = _nvkm_gpio_ctor,
74 .dtor = _nvkm_gpio_dtor,
75 .init = _nvkm_gpio_init,
76 .fini = _nvkm_gpio_fini,
77 },
78 .lines = 32, 74 .lines = 32,
79 .intr_stat = g94_gpio_intr_stat, 75 .intr_stat = g94_gpio_intr_stat,
80 .intr_mask = g94_gpio_intr_mask, 76 .intr_mask = g94_gpio_intr_mask,
81 .drive = gf110_gpio_drive, 77 .drive = gf119_gpio_drive,
82 .sense = gf110_gpio_sense, 78 .sense = gf119_gpio_sense,
83 .reset = gf110_gpio_reset, 79 .reset = gf119_gpio_reset,
84}.base; 80};
81
82int
83gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
84{
85 return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio);
86}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 42fd2faaaa4f..3f45afd17d5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -26,21 +26,23 @@
26static void 26static void
27gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo) 27gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
28{ 28{
29 u32 intr0 = nv_rd32(gpio, 0x00dc00); 29 struct nvkm_device *device = gpio->subdev.device;
30 u32 intr1 = nv_rd32(gpio, 0x00dc80); 30 u32 intr0 = nvkm_rd32(device, 0x00dc00);
31 u32 stat0 = nv_rd32(gpio, 0x00dc08) & intr0; 31 u32 intr1 = nvkm_rd32(device, 0x00dc80);
32 u32 stat1 = nv_rd32(gpio, 0x00dc88) & intr1; 32 u32 stat0 = nvkm_rd32(device, 0x00dc08) & intr0;
33 u32 stat1 = nvkm_rd32(device, 0x00dc88) & intr1;
33 *lo = (stat1 & 0xffff0000) | (stat0 >> 16); 34 *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
34 *hi = (stat1 << 16) | (stat0 & 0x0000ffff); 35 *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
35 nv_wr32(gpio, 0x00dc00, intr0); 36 nvkm_wr32(device, 0x00dc00, intr0);
36 nv_wr32(gpio, 0x00dc80, intr1); 37 nvkm_wr32(device, 0x00dc80, intr1);
37} 38}
38 39
39void 40void
40gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data) 41gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
41{ 42{
42 u32 inte0 = nv_rd32(gpio, 0x00dc08); 43 struct nvkm_device *device = gpio->subdev.device;
43 u32 inte1 = nv_rd32(gpio, 0x00dc88); 44 u32 inte0 = nvkm_rd32(device, 0x00dc08);
45 u32 inte1 = nvkm_rd32(device, 0x00dc88);
44 if (type & NVKM_GPIO_LO) 46 if (type & NVKM_GPIO_LO)
45 inte0 = (inte0 & ~(mask << 16)) | (data << 16); 47 inte0 = (inte0 & ~(mask << 16)) | (data << 16);
46 if (type & NVKM_GPIO_HI) 48 if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@ gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
51 inte1 = (inte1 & ~(mask << 16)) | (data << 16); 53 inte1 = (inte1 & ~(mask << 16)) | (data << 16);
52 if (type & NVKM_GPIO_HI) 54 if (type & NVKM_GPIO_HI)
53 inte1 = (inte1 & ~mask) | data; 55 inte1 = (inte1 & ~mask) | data;
54 nv_wr32(gpio, 0x00dc08, inte0); 56 nvkm_wr32(device, 0x00dc08, inte0);
55 nv_wr32(gpio, 0x00dc88, inte1); 57 nvkm_wr32(device, 0x00dc88, inte1);
56} 58}
57 59
58struct nvkm_oclass * 60static const struct nvkm_gpio_func
59gk104_gpio_oclass = &(struct nvkm_gpio_impl) { 61gk104_gpio = {
60 .base.handle = NV_SUBDEV(GPIO, 0xe0),
61 .base.ofuncs = &(struct nvkm_ofuncs) {
62 .ctor = _nvkm_gpio_ctor,
63 .dtor = _nvkm_gpio_dtor,
64 .init = _nvkm_gpio_init,
65 .fini = _nvkm_gpio_fini,
66 },
67 .lines = 32, 62 .lines = 32,
68 .intr_stat = gk104_gpio_intr_stat, 63 .intr_stat = gk104_gpio_intr_stat,
69 .intr_mask = gk104_gpio_intr_mask, 64 .intr_mask = gk104_gpio_intr_mask,
70 .drive = gf110_gpio_drive, 65 .drive = gf119_gpio_drive,
71 .sense = gf110_gpio_sense, 66 .sense = gf119_gpio_sense,
72 .reset = gf110_gpio_reset, 67 .reset = gf119_gpio_reset,
73}.base; 68};
69
70int
71gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
72{
73 return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio);
74}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index 2b295154247e..ae3499b48330 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -28,19 +28,20 @@
28static int 28static int
29nv10_gpio_sense(struct nvkm_gpio *gpio, int line) 29nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
30{ 30{
31 struct nvkm_device *device = gpio->subdev.device;
31 if (line < 2) { 32 if (line < 2) {
32 line = line * 16; 33 line = line * 16;
33 line = nv_rd32(gpio, 0x600818) >> line; 34 line = nvkm_rd32(device, 0x600818) >> line;
34 return !!(line & 0x0100); 35 return !!(line & 0x0100);
35 } else 36 } else
36 if (line < 10) { 37 if (line < 10) {
37 line = (line - 2) * 4; 38 line = (line - 2) * 4;
38 line = nv_rd32(gpio, 0x60081c) >> line; 39 line = nvkm_rd32(device, 0x60081c) >> line;
39 return !!(line & 0x04); 40 return !!(line & 0x04);
40 } else 41 } else
41 if (line < 14) { 42 if (line < 14) {
42 line = (line - 10) * 4; 43 line = (line - 10) * 4;
43 line = nv_rd32(gpio, 0x600850) >> line; 44 line = nvkm_rd32(device, 0x600850) >> line;
44 return !!(line & 0x04); 45 return !!(line & 0x04);
45 } 46 }
46 47
@@ -50,6 +51,7 @@ nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
50static int 51static int
51nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out) 52nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
52{ 53{
54 struct nvkm_device *device = gpio->subdev.device;
53 u32 reg, mask, data; 55 u32 reg, mask, data;
54 56
55 if (line < 2) { 57 if (line < 2) {
@@ -73,43 +75,44 @@ nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
73 return -EINVAL; 75 return -EINVAL;
74 } 76 }
75 77
76 nv_mask(gpio, reg, mask << line, data << line); 78 nvkm_mask(device, reg, mask << line, data << line);
77 return 0; 79 return 0;
78} 80}
79 81
80static void 82static void
81nv10_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo) 83nv10_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
82{ 84{
83 u32 intr = nv_rd32(gpio, 0x001104); 85 struct nvkm_device *device = gpio->subdev.device;
84 u32 stat = nv_rd32(gpio, 0x001144) & intr; 86 u32 intr = nvkm_rd32(device, 0x001104);
87 u32 stat = nvkm_rd32(device, 0x001144) & intr;
85 *lo = (stat & 0xffff0000) >> 16; 88 *lo = (stat & 0xffff0000) >> 16;
86 *hi = (stat & 0x0000ffff); 89 *hi = (stat & 0x0000ffff);
87 nv_wr32(gpio, 0x001104, intr); 90 nvkm_wr32(device, 0x001104, intr);
88} 91}
89 92
90static void 93static void
91nv10_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data) 94nv10_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
92{ 95{
93 u32 inte = nv_rd32(gpio, 0x001144); 96 struct nvkm_device *device = gpio->subdev.device;
97 u32 inte = nvkm_rd32(device, 0x001144);
94 if (type & NVKM_GPIO_LO) 98 if (type & NVKM_GPIO_LO)
95 inte = (inte & ~(mask << 16)) | (data << 16); 99 inte = (inte & ~(mask << 16)) | (data << 16);
96 if (type & NVKM_GPIO_HI) 100 if (type & NVKM_GPIO_HI)
97 inte = (inte & ~mask) | data; 101 inte = (inte & ~mask) | data;
98 nv_wr32(gpio, 0x001144, inte); 102 nvkm_wr32(device, 0x001144, inte);
99} 103}
100 104
101struct nvkm_oclass * 105static const struct nvkm_gpio_func
102nv10_gpio_oclass = &(struct nvkm_gpio_impl) { 106nv10_gpio = {
103 .base.handle = NV_SUBDEV(GPIO, 0x10),
104 .base.ofuncs = &(struct nvkm_ofuncs) {
105 .ctor = _nvkm_gpio_ctor,
106 .dtor = _nvkm_gpio_dtor,
107 .init = _nvkm_gpio_init,
108 .fini = _nvkm_gpio_fini,
109 },
110 .lines = 16, 107 .lines = 16,
111 .intr_stat = nv10_gpio_intr_stat, 108 .intr_stat = nv10_gpio_intr_stat,
112 .intr_mask = nv10_gpio_intr_mask, 109 .intr_mask = nv10_gpio_intr_mask,
113 .drive = nv10_gpio_drive, 110 .drive = nv10_gpio_drive,
114 .sense = nv10_gpio_sense, 111 .sense = nv10_gpio_sense,
115}.base; 112};
113
114int
115nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
116{
117 return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio);
118}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 6a031035bd27..8996649209ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -26,14 +26,15 @@
26void 26void
27nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match) 27nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
28{ 28{
29 struct nvkm_bios *bios = nvkm_bios(gpio); 29 struct nvkm_device *device = gpio->subdev.device;
30 struct nvkm_bios *bios = device->bios;
30 u8 ver, len; 31 u8 ver, len;
31 u16 entry; 32 u16 entry;
32 int ent = -1; 33 int ent = -1;
33 34
34 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { 35 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
35 static const u32 regs[] = { 0xe100, 0xe28c }; 36 static const u32 regs[] = { 0xe100, 0xe28c };
36 u32 data = nv_ro32(bios, entry); 37 u32 data = nvbios_rd32(bios, entry);
37 u8 line = (data & 0x0000001f); 38 u8 line = (data & 0x0000001f);
38 u8 func = (data & 0x0000ff00) >> 8; 39 u8 func = (data & 0x0000ff00) >> 8;
39 u8 defs = !!(data & 0x01000000); 40 u8 defs = !!(data & 0x01000000);
@@ -47,9 +48,9 @@ nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
47 (match != DCB_GPIO_UNUSED && match != func)) 48 (match != DCB_GPIO_UNUSED && match != func))
48 continue; 49 continue;
49 50
50 gpio->set(gpio, 0, func, line, defs); 51 nvkm_gpio_set(gpio, 0, func, line, defs);
51 52
52 nv_mask(gpio, reg, 0x00010001 << lsh, val << lsh); 53 nvkm_mask(device, reg, 0x00010001 << lsh, val << lsh);
53 } 54 }
54} 55}
55 56
@@ -69,60 +70,63 @@ nv50_gpio_location(int line, u32 *reg, u32 *shift)
69int 70int
70nv50_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out) 71nv50_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
71{ 72{
73 struct nvkm_device *device = gpio->subdev.device;
72 u32 reg, shift; 74 u32 reg, shift;
73 75
74 if (nv50_gpio_location(line, &reg, &shift)) 76 if (nv50_gpio_location(line, &reg, &shift))
75 return -EINVAL; 77 return -EINVAL;
76 78
77 nv_mask(gpio, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift); 79 nvkm_mask(device, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
78 return 0; 80 return 0;
79} 81}
80 82
81int 83int
82nv50_gpio_sense(struct nvkm_gpio *gpio, int line) 84nv50_gpio_sense(struct nvkm_gpio *gpio, int line)
83{ 85{
86 struct nvkm_device *device = gpio->subdev.device;
84 u32 reg, shift; 87 u32 reg, shift;
85 88
86 if (nv50_gpio_location(line, &reg, &shift)) 89 if (nv50_gpio_location(line, &reg, &shift))
87 return -EINVAL; 90 return -EINVAL;
88 91
89 return !!(nv_rd32(gpio, reg) & (4 << shift)); 92 return !!(nvkm_rd32(device, reg) & (4 << shift));
90} 93}
91 94
92static void 95static void
93nv50_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo) 96nv50_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
94{ 97{
95 u32 intr = nv_rd32(gpio, 0x00e054); 98 struct nvkm_device *device = gpio->subdev.device;
96 u32 stat = nv_rd32(gpio, 0x00e050) & intr; 99 u32 intr = nvkm_rd32(device, 0x00e054);
100 u32 stat = nvkm_rd32(device, 0x00e050) & intr;
97 *lo = (stat & 0xffff0000) >> 16; 101 *lo = (stat & 0xffff0000) >> 16;
98 *hi = (stat & 0x0000ffff); 102 *hi = (stat & 0x0000ffff);
99 nv_wr32(gpio, 0x00e054, intr); 103 nvkm_wr32(device, 0x00e054, intr);
100} 104}
101 105
102static void 106static void
103nv50_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data) 107nv50_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
104{ 108{
105 u32 inte = nv_rd32(gpio, 0x00e050); 109 struct nvkm_device *device = gpio->subdev.device;
110 u32 inte = nvkm_rd32(device, 0x00e050);
106 if (type & NVKM_GPIO_LO) 111 if (type & NVKM_GPIO_LO)
107 inte = (inte & ~(mask << 16)) | (data << 16); 112 inte = (inte & ~(mask << 16)) | (data << 16);
108 if (type & NVKM_GPIO_HI) 113 if (type & NVKM_GPIO_HI)
109 inte = (inte & ~mask) | data; 114 inte = (inte & ~mask) | data;
110 nv_wr32(gpio, 0x00e050, inte); 115 nvkm_wr32(device, 0x00e050, inte);
111} 116}
112 117
113struct nvkm_oclass * 118static const struct nvkm_gpio_func
114nv50_gpio_oclass = &(struct nvkm_gpio_impl) { 119nv50_gpio = {
115 .base.handle = NV_SUBDEV(GPIO, 0x50),
116 .base.ofuncs = &(struct nvkm_ofuncs) {
117 .ctor = _nvkm_gpio_ctor,
118 .dtor = _nvkm_gpio_dtor,
119 .init = _nvkm_gpio_init,
120 .fini = _nvkm_gpio_fini,
121 },
122 .lines = 16, 120 .lines = 16,
123 .intr_stat = nv50_gpio_intr_stat, 121 .intr_stat = nv50_gpio_intr_stat,
124 .intr_mask = nv50_gpio_intr_mask, 122 .intr_mask = nv50_gpio_intr_mask,
125 .drive = nv50_gpio_drive, 123 .drive = nv50_gpio_drive,
126 .sense = nv50_gpio_sense, 124 .sense = nv50_gpio_sense,
127 .reset = nv50_gpio_reset, 125 .reset = nv50_gpio_reset,
128}.base; 126};
127
128int
129nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
130{
131 return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio);
132}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 382f8d44e140..371bcdbbe0d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -1,33 +1,9 @@
1#ifndef __NVKM_GPIO_PRIV_H__ 1#ifndef __NVKM_GPIO_PRIV_H__
2#define __NVKM_GPIO_PRIV_H__ 2#define __NVKM_GPIO_PRIV_H__
3#define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
3#include <subdev/gpio.h> 4#include <subdev/gpio.h>
4 5
5#define nvkm_gpio_create(p,e,o,d) \ 6struct nvkm_gpio_func {
6 nvkm_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
7#define nvkm_gpio_destroy(p) ({ \
8 struct nvkm_gpio *gpio = (p); \
9 _nvkm_gpio_dtor(nv_object(gpio)); \
10})
11#define nvkm_gpio_init(p) ({ \
12 struct nvkm_gpio *gpio = (p); \
13 _nvkm_gpio_init(nv_object(gpio)); \
14})
15#define nvkm_gpio_fini(p,s) ({ \
16 struct nvkm_gpio *gpio = (p); \
17 _nvkm_gpio_fini(nv_object(gpio), (s)); \
18})
19
20int nvkm_gpio_create_(struct nvkm_object *, struct nvkm_object *,
21 struct nvkm_oclass *, int, void **);
22int _nvkm_gpio_ctor(struct nvkm_object *, struct nvkm_object *,
23 struct nvkm_oclass *, void *, u32,
24 struct nvkm_object **);
25void _nvkm_gpio_dtor(struct nvkm_object *);
26int _nvkm_gpio_init(struct nvkm_object *);
27int _nvkm_gpio_fini(struct nvkm_object *, bool);
28
29struct nvkm_gpio_impl {
30 struct nvkm_oclass base;
31 int lines; 7 int lines;
32 8
33 /* read and ack pending interrupts, returning only data 9 /* read and ack pending interrupts, returning only data
@@ -51,6 +27,9 @@ struct nvkm_gpio_impl {
51 void (*reset)(struct nvkm_gpio *, u8); 27 void (*reset)(struct nvkm_gpio *, u8);
52}; 28};
53 29
30int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *,
31 int index, struct nvkm_gpio **);
32
54void nv50_gpio_reset(struct nvkm_gpio *, u8); 33void nv50_gpio_reset(struct nvkm_gpio *, u8);
55int nv50_gpio_drive(struct nvkm_gpio *, int, int, int); 34int nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
56int nv50_gpio_sense(struct nvkm_gpio *, int); 35int nv50_gpio_sense(struct nvkm_gpio *, int);
@@ -58,7 +37,7 @@ int nv50_gpio_sense(struct nvkm_gpio *, int);
58void g94_gpio_intr_stat(struct nvkm_gpio *, u32 *, u32 *); 37void g94_gpio_intr_stat(struct nvkm_gpio *, u32 *, u32 *);
59void g94_gpio_intr_mask(struct nvkm_gpio *, u32, u32, u32); 38void g94_gpio_intr_mask(struct nvkm_gpio *, u32, u32, u32);
60 39
61void gf110_gpio_reset(struct nvkm_gpio *, u8); 40void gf119_gpio_reset(struct nvkm_gpio *, u8);
62int gf110_gpio_drive(struct nvkm_gpio *, int, int, int); 41int gf119_gpio_drive(struct nvkm_gpio *, int, int, int);
63int gf110_gpio_sense(struct nvkm_gpio *, int); 42int gf119_gpio_sense(struct nvkm_gpio *, int);
64#endif 43#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index d68307409980..1f730613c237 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -1,16 +1,30 @@
1nvkm-y += nvkm/subdev/i2c/base.o 1nvkm-y += nvkm/subdev/i2c/base.o
2nvkm-y += nvkm/subdev/i2c/anx9805.o
3nvkm-y += nvkm/subdev/i2c/aux.o
4nvkm-y += nvkm/subdev/i2c/bit.o
5nvkm-y += nvkm/subdev/i2c/pad.o
6nvkm-y += nvkm/subdev/i2c/padnv04.o
7nvkm-y += nvkm/subdev/i2c/padg94.o
8nvkm-y += nvkm/subdev/i2c/padgm204.o
9nvkm-y += nvkm/subdev/i2c/nv04.o 2nvkm-y += nvkm/subdev/i2c/nv04.o
10nvkm-y += nvkm/subdev/i2c/nv4e.o 3nvkm-y += nvkm/subdev/i2c/nv4e.o
11nvkm-y += nvkm/subdev/i2c/nv50.o 4nvkm-y += nvkm/subdev/i2c/nv50.o
12nvkm-y += nvkm/subdev/i2c/g94.o 5nvkm-y += nvkm/subdev/i2c/g94.o
13nvkm-y += nvkm/subdev/i2c/gf110.o
14nvkm-y += nvkm/subdev/i2c/gf117.o 6nvkm-y += nvkm/subdev/i2c/gf117.o
7nvkm-y += nvkm/subdev/i2c/gf119.o
15nvkm-y += nvkm/subdev/i2c/gk104.o 8nvkm-y += nvkm/subdev/i2c/gk104.o
16nvkm-y += nvkm/subdev/i2c/gm204.o 9nvkm-y += nvkm/subdev/i2c/gm204.o
10
11nvkm-y += nvkm/subdev/i2c/pad.o
12nvkm-y += nvkm/subdev/i2c/padnv04.o
13nvkm-y += nvkm/subdev/i2c/padnv4e.o
14nvkm-y += nvkm/subdev/i2c/padnv50.o
15nvkm-y += nvkm/subdev/i2c/padg94.o
16nvkm-y += nvkm/subdev/i2c/padgf119.o
17nvkm-y += nvkm/subdev/i2c/padgm204.o
18
19nvkm-y += nvkm/subdev/i2c/bus.o
20nvkm-y += nvkm/subdev/i2c/busnv04.o
21nvkm-y += nvkm/subdev/i2c/busnv4e.o
22nvkm-y += nvkm/subdev/i2c/busnv50.o
23nvkm-y += nvkm/subdev/i2c/busgf119.o
24nvkm-y += nvkm/subdev/i2c/bit.o
25
26nvkm-y += nvkm/subdev/i2c/aux.o
27nvkm-y += nvkm/subdev/i2c/auxg94.o
28nvkm-y += nvkm/subdev/i2c/auxgm204.o
29
30nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index d17dd1cf3c34..b7b01c3f7037 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -21,272 +21,258 @@
21 * 21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "port.h" 24#define anx9805_pad(p) container_of((p), struct anx9805_pad, base)
25#define anx9805_bus(p) container_of((p), struct anx9805_bus, base)
26#define anx9805_aux(p) container_of((p), struct anx9805_aux, base)
27#include "aux.h"
28#include "bus.h"
29
30struct anx9805_pad {
31 struct nvkm_i2c_pad base;
32 struct nvkm_i2c_bus *bus;
33 u8 addr;
34};
25 35
26struct anx9805_i2c_port { 36struct anx9805_bus {
27 struct nvkm_i2c_port base; 37 struct nvkm_i2c_bus base;
28 u32 addr; 38 struct anx9805_pad *pad;
29 u32 ctrl; 39 u8 addr;
30}; 40};
31 41
32static int 42static int
33anx9805_train(struct nvkm_i2c_port *port, int link_nr, int link_bw, bool enh) 43anx9805_bus_xfer(struct nvkm_i2c_bus *base, struct i2c_msg *msgs, int num)
34{ 44{
35 struct anx9805_i2c_port *chan = (void *)port; 45 struct anx9805_bus *bus = anx9805_bus(base);
36 struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent; 46 struct anx9805_pad *pad = bus->pad;
37 u8 tmp, i; 47 struct i2c_adapter *adap = &pad->bus->i2c;
38 48 struct i2c_msg *msg = msgs;
39 DBG("ANX9805 train %d 0x%02x %d\n", link_nr, link_bw, enh); 49 int ret = -ETIMEDOUT;
50 int i, j, cnt = num;
51 u8 seg = 0x00, off = 0x00, tmp;
40 52
41 nv_wri2cr(mast, chan->addr, 0xa0, link_bw); 53 tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x10;
42 nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00)); 54 nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x10);
43 nv_wri2cr(mast, chan->addr, 0xa2, 0x01); 55 nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
44 nv_wri2cr(mast, chan->addr, 0xa8, 0x01); 56 nvkm_wri2cr(adap, bus->addr, 0x43, 0x05);
57 mdelay(5);
45 58
46 i = 0; 59 while (cnt--) {
47 while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) { 60 if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
48 mdelay(5); 61 nvkm_wri2cr(adap, bus->addr, 0x40, msg->addr << 1);
49 if (i++ == 100) { 62 nvkm_wri2cr(adap, bus->addr, 0x41, seg);
50 nv_error(port, "link training timed out\n"); 63 nvkm_wri2cr(adap, bus->addr, 0x42, off);
51 return -ETIMEDOUT; 64 nvkm_wri2cr(adap, bus->addr, 0x44, msg->len);
65 nvkm_wri2cr(adap, bus->addr, 0x45, 0x00);
66 nvkm_wri2cr(adap, bus->addr, 0x43, 0x01);
67 for (i = 0; i < msg->len; i++) {
68 j = 0;
69 while (nvkm_rdi2cr(adap, bus->addr, 0x46) & 0x10) {
70 mdelay(5);
71 if (j++ == 32)
72 goto done;
73 }
74 msg->buf[i] = nvkm_rdi2cr(adap, bus->addr, 0x47);
75 }
76 } else
77 if (!(msg->flags & I2C_M_RD)) {
78 if (msg->addr == 0x50 && msg->len == 0x01) {
79 off = msg->buf[0];
80 } else
81 if (msg->addr == 0x30 && msg->len == 0x01) {
82 seg = msg->buf[0];
83 } else
84 goto done;
85 } else {
86 goto done;
52 } 87 }
88 msg++;
53 } 89 }
54 90
55 if (tmp & 0x70) { 91 ret = num;
56 nv_error(port, "link training failed: 0x%02x\n", tmp); 92done:
57 return -EIO; 93 nvkm_wri2cr(adap, bus->addr, 0x43, 0x00);
94 return ret;
95}
96
97static const struct nvkm_i2c_bus_func
98anx9805_bus_func = {
99 .xfer = anx9805_bus_xfer,
100};
101
102static int
103anx9805_bus_new(struct nvkm_i2c_pad *base, int id, u8 drive,
104 struct nvkm_i2c_bus **pbus)
105{
106 struct anx9805_pad *pad = anx9805_pad(base);
107 struct anx9805_bus *bus;
108 int ret;
109
110 if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
111 return -ENOMEM;
112 *pbus = &bus->base;
113 bus->pad = pad;
114
115 ret = nvkm_i2c_bus_ctor(&anx9805_bus_func, &pad->base, id, &bus->base);
116 if (ret)
117 return ret;
118
119 switch (pad->addr) {
120 case 0x39: bus->addr = 0x3d; break;
121 case 0x3b: bus->addr = 0x3f; break;
122 default:
123 return -ENOSYS;
58 } 124 }
59 125
60 return 1; 126 return 0;
61} 127}
62 128
129struct anx9805_aux {
130 struct nvkm_i2c_aux base;
131 struct anx9805_pad *pad;
132 u8 addr;
133};
134
63static int 135static int
64anx9805_aux(struct nvkm_i2c_port *port, bool retry, 136anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
65 u8 type, u32 addr, u8 *data, u8 size) 137 u8 type, u32 addr, u8 *data, u8 size)
66{ 138{
67 struct anx9805_i2c_port *chan = (void *)port; 139 struct anx9805_aux *aux = anx9805_aux(base);
68 struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent; 140 struct anx9805_pad *pad = aux->pad;
141 struct i2c_adapter *adap = &pad->bus->i2c;
69 int i, ret = -ETIMEDOUT; 142 int i, ret = -ETIMEDOUT;
70 u8 buf[16] = {}; 143 u8 buf[16] = {};
71 u8 tmp; 144 u8 tmp;
72 145
73 DBG("%02x %05x %d\n", type, addr, size); 146 AUX_DBG(&aux->base, "%02x %05x %d", type, addr, size);
74 147
75 tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04; 148 tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
76 nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04); 149 nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
77 nv_wri2cr(mast, chan->ctrl, 0x07, tmp); 150 nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
78 nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01); 151 nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
79 152
80 nv_wri2cr(mast, chan->addr, 0xe4, 0x80); 153 nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
81 if (!(type & 1)) { 154 if (!(type & 1)) {
82 memcpy(buf, data, size); 155 memcpy(buf, data, size);
83 DBG("%16ph", buf); 156 AUX_DBG(&aux->base, "%16ph", buf);
84 for (i = 0; i < size; i++) 157 for (i = 0; i < size; i++)
85 nv_wri2cr(mast, chan->addr, 0xf0 + i, buf[i]); 158 nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
86 } 159 }
87 nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type); 160 nvkm_wri2cr(adap, aux->addr, 0xe5, ((size - 1) << 4) | type);
88 nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0); 161 nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >> 0);
89 nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8); 162 nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >> 8);
90 nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16); 163 nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
91 nv_wri2cr(mast, chan->addr, 0xe9, 0x01); 164 nvkm_wri2cr(adap, aux->addr, 0xe9, 0x01);
92 165
93 i = 0; 166 i = 0;
94 while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) { 167 while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xe9)) & 0x01) {
95 mdelay(5); 168 mdelay(5);
96 if (i++ == 32) 169 if (i++ == 32)
97 goto done; 170 goto done;
98 } 171 }
99 172
100 if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) { 173 if ((tmp = nvkm_rdi2cr(adap, pad->addr, 0xf7)) & 0x01) {
101 ret = -EIO; 174 ret = -EIO;
102 goto done; 175 goto done;
103 } 176 }
104 177
105 if (type & 1) { 178 if (type & 1) {
106 for (i = 0; i < size; i++) 179 for (i = 0; i < size; i++)
107 buf[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i); 180 buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
108 DBG("%16ph", buf); 181 AUX_DBG(&aux->base, "%16ph", buf);
109 memcpy(data, buf, size); 182 memcpy(data, buf, size);
110 } 183 }
111 184
112 ret = 0; 185 ret = 0;
113done: 186done:
114 nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01); 187 nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
115 return ret; 188 return ret;
116} 189}
117 190
118static const struct nvkm_i2c_func
119anx9805_aux_func = {
120 .aux = anx9805_aux,
121 .lnk_ctl = anx9805_train,
122};
123
124static int 191static int
125anx9805_aux_chan_ctor(struct nvkm_object *parent, 192anx9805_aux_lnk_ctl(struct nvkm_i2c_aux *base,
126 struct nvkm_object *engine, 193 int link_nr, int link_bw, bool enh)
127 struct nvkm_oclass *oclass, void *data, u32 index,
128 struct nvkm_object **pobject)
129{ 194{
130 struct nvkm_i2c_port *mast = (void *)parent; 195 struct anx9805_aux *aux = anx9805_aux(base);
131 struct anx9805_i2c_port *chan; 196 struct anx9805_pad *pad = aux->pad;
132 int ret; 197 struct i2c_adapter *adap = &pad->bus->i2c;
133 198 u8 tmp, i;
134 ret = nvkm_i2c_port_create(parent, engine, oclass, index,
135 &nvkm_i2c_aux_algo, &anx9805_aux_func,
136 &chan);
137 *pobject = nv_object(chan);
138 if (ret)
139 return ret;
140
141 switch ((oclass->handle & 0xff00) >> 8) {
142 case 0x0d:
143 chan->addr = 0x38;
144 chan->ctrl = 0x39;
145 break;
146 case 0x0e:
147 chan->addr = 0x3c;
148 chan->ctrl = 0x3b;
149 break;
150 default:
151 BUG_ON(1);
152 }
153
154 if (mast->adapter.algo == &i2c_bit_algo) {
155 struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
156 algo->udelay = max(algo->udelay, 40);
157 }
158
159 return 0;
160}
161
162static struct nvkm_ofuncs
163anx9805_aux_ofuncs = {
164 .ctor = anx9805_aux_chan_ctor,
165 .dtor = _nvkm_i2c_port_dtor,
166 .init = _nvkm_i2c_port_init,
167 .fini = _nvkm_i2c_port_fini,
168};
169 199
170static int 200 AUX_DBG(&aux->base, "ANX9805 train %d %02x %d",
171anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 201 link_nr, link_bw, enh);
172{
173 struct anx9805_i2c_port *port = adap->algo_data;
174 struct nvkm_i2c_port *mast = (void *)nv_object(port)->parent;
175 struct i2c_msg *msg = msgs;
176 int ret = -ETIMEDOUT;
177 int i, j, cnt = num;
178 u8 seg = 0x00, off = 0x00, tmp;
179 202
180 tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10; 203 nvkm_wri2cr(adap, aux->addr, 0xa0, link_bw);
181 nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10); 204 nvkm_wri2cr(adap, aux->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
182 nv_wri2cr(mast, port->ctrl, 0x07, tmp); 205 nvkm_wri2cr(adap, aux->addr, 0xa2, 0x01);
183 nv_wri2cr(mast, port->addr, 0x43, 0x05); 206 nvkm_wri2cr(adap, aux->addr, 0xa8, 0x01);
184 mdelay(5);
185 207
186 while (cnt--) { 208 i = 0;
187 if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) { 209 while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xa8)) & 0x01) {
188 nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1); 210 mdelay(5);
189 nv_wri2cr(mast, port->addr, 0x41, seg); 211 if (i++ == 100) {
190 nv_wri2cr(mast, port->addr, 0x42, off); 212 AUX_ERR(&aux->base, "link training timeout");
191 nv_wri2cr(mast, port->addr, 0x44, msg->len); 213 return -ETIMEDOUT;
192 nv_wri2cr(mast, port->addr, 0x45, 0x00);
193 nv_wri2cr(mast, port->addr, 0x43, 0x01);
194 for (i = 0; i < msg->len; i++) {
195 j = 0;
196 while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
197 mdelay(5);
198 if (j++ == 32)
199 goto done;
200 }
201 msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
202 }
203 } else
204 if (!(msg->flags & I2C_M_RD)) {
205 if (msg->addr == 0x50 && msg->len == 0x01) {
206 off = msg->buf[0];
207 } else
208 if (msg->addr == 0x30 && msg->len == 0x01) {
209 seg = msg->buf[0];
210 } else
211 goto done;
212 } else {
213 goto done;
214 } 214 }
215 msg++;
216 } 215 }
217 216
218 ret = num; 217 if (tmp & 0x70) {
219done: 218 AUX_ERR(&aux->base, "link training failed");
220 nv_wri2cr(mast, port->addr, 0x43, 0x00); 219 return -EIO;
221 return ret; 220 }
222}
223 221
224static u32 222 return 0;
225anx9805_func(struct i2c_adapter *adap)
226{
227 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
228} 223}
229 224
230static const struct i2c_algorithm 225static const struct nvkm_i2c_aux_func
231anx9805_i2c_algo = { 226anx9805_aux_func = {
232 .master_xfer = anx9805_xfer, 227 .xfer = anx9805_aux_xfer,
233 .functionality = anx9805_func 228 .lnk_ctl = anx9805_aux_lnk_ctl,
234};
235
236static const struct nvkm_i2c_func
237anx9805_i2c_func = {
238}; 229};
239 230
240static int 231static int
241anx9805_ddc_port_ctor(struct nvkm_object *parent, 232anx9805_aux_new(struct nvkm_i2c_pad *base, int id, u8 drive,
242 struct nvkm_object *engine, 233 struct nvkm_i2c_aux **pbus)
243 struct nvkm_oclass *oclass, void *data, u32 index,
244 struct nvkm_object **pobject)
245{ 234{
246 struct nvkm_i2c_port *mast = (void *)parent; 235 struct anx9805_pad *pad = anx9805_pad(base);
247 struct anx9805_i2c_port *port; 236 struct anx9805_aux *aux;
248 int ret; 237 int ret;
249 238
250 ret = nvkm_i2c_port_create(parent, engine, oclass, index, 239 if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
251 &anx9805_i2c_algo, &anx9805_i2c_func, &port); 240 return -ENOMEM;
252 *pobject = nv_object(port); 241 *pbus = &aux->base;
242 aux->pad = pad;
243
244 ret = nvkm_i2c_aux_ctor(&anx9805_aux_func, &pad->base, id, &aux->base);
253 if (ret) 245 if (ret)
254 return ret; 246 return ret;
255 247
256 switch ((oclass->handle & 0xff00) >> 8) { 248 switch (pad->addr) {
257 case 0x0d: 249 case 0x39: aux->addr = 0x38; break;
258 port->addr = 0x3d; 250 case 0x3b: aux->addr = 0x3c; break;
259 port->ctrl = 0x39;
260 break;
261 case 0x0e:
262 port->addr = 0x3f;
263 port->ctrl = 0x3b;
264 break;
265 default: 251 default:
266 BUG_ON(1); 252 return -ENOSYS;
267 }
268
269 if (mast->adapter.algo == &i2c_bit_algo) {
270 struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
271 algo->udelay = max(algo->udelay, 40);
272 } 253 }
273 254
274 return 0; 255 return 0;
275} 256}
276 257
277static struct nvkm_ofuncs 258static const struct nvkm_i2c_pad_func
278anx9805_ddc_ofuncs = { 259anx9805_pad_func = {
279 .ctor = anx9805_ddc_port_ctor, 260 .bus_new_4 = anx9805_bus_new,
280 .dtor = _nvkm_i2c_port_dtor, 261 .aux_new_6 = anx9805_aux_new,
281 .init = _nvkm_i2c_port_init,
282 .fini = _nvkm_i2c_port_fini,
283}; 262};
284 263
285struct nvkm_oclass 264int
286nvkm_anx9805_sclass[] = { 265anx9805_pad_new(struct nvkm_i2c_bus *bus, int id, u8 addr,
287 { .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs }, 266 struct nvkm_i2c_pad **ppad)
288 { .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs }, 267{
289 { .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs }, 268 struct anx9805_pad *pad;
290 { .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs }, 269
291 {} 270 if (!(pad = kzalloc(sizeof(*pad), GFP_KERNEL)))
292}; 271 return -ENOMEM;
272 *ppad = &pad->base;
273
274 nvkm_i2c_pad_ctor(&anx9805_pad_func, bus->pad->i2c, id, &pad->base);
275 pad->bus = bus;
276 pad->addr = addr;
277 return 0;
278}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 1c18860f80d1..f0851d57df2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -21,50 +21,17 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "aux.h"
25 25#include "pad.h"
26int
27nv_rdaux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
28{
29 struct nvkm_i2c *i2c = nvkm_i2c(port);
30 if (port->func->aux) {
31 int ret = i2c->acquire(port, 0);
32 if (ret == 0) {
33 ret = port->func->aux(port, true, 9, addr, data, size);
34 i2c->release(port);
35 }
36 return ret;
37 }
38 return -ENODEV;
39}
40
41int
42nv_wraux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
43{
44 struct nvkm_i2c *i2c = nvkm_i2c(port);
45 if (port->func->aux) {
46 int ret = i2c->acquire(port, 0);
47 if (ret == 0) {
48 ret = port->func->aux(port, true, 8, addr, data, size);
49 i2c->release(port);
50 }
51 return ret;
52 }
53 return -ENODEV;
54}
55 26
56static int 27static int
57aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 28nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
58{ 29{
59 struct nvkm_i2c_port *port = adap->algo_data; 30 struct nvkm_i2c_aux *aux = container_of(adap, typeof(*aux), i2c);
60 struct nvkm_i2c *i2c = nvkm_i2c(port);
61 struct i2c_msg *msg = msgs; 31 struct i2c_msg *msg = msgs;
62 int ret, mcnt = num; 32 int ret, mcnt = num;
63 33
64 if (!port->func->aux) 34 ret = nvkm_i2c_aux_acquire(aux);
65 return -ENODEV;
66
67 ret = i2c->acquire(port, 0);
68 if (ret) 35 if (ret)
69 return ret; 36 return ret;
70 37
@@ -84,9 +51,9 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
84 if (mcnt || remaining > 16) 51 if (mcnt || remaining > 16)
85 cmd |= 4; /* MOT */ 52 cmd |= 4; /* MOT */
86 53
87 ret = port->func->aux(port, true, cmd, msg->addr, ptr, cnt); 54 ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, cnt);
88 if (ret < 0) { 55 if (ret < 0) {
89 i2c->release(port); 56 nvkm_i2c_aux_release(aux);
90 return ret; 57 return ret;
91 } 58 }
92 59
@@ -97,17 +64,111 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
97 msg++; 64 msg++;
98 } 65 }
99 66
100 i2c->release(port); 67 nvkm_i2c_aux_release(aux);
101 return num; 68 return num;
102} 69}
103 70
104static u32 71static u32
105aux_func(struct i2c_adapter *adap) 72nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
106{ 73{
107 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 74 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
108} 75}
109 76
110const struct i2c_algorithm nvkm_i2c_aux_algo = { 77const struct i2c_algorithm
111 .master_xfer = aux_xfer, 78nvkm_i2c_aux_i2c_algo = {
112 .functionality = aux_func 79 .master_xfer = nvkm_i2c_aux_i2c_xfer,
80 .functionality = nvkm_i2c_aux_i2c_func
113}; 81};
82
83void
84nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *aux, bool monitor)
85{
86 struct nvkm_i2c_pad *pad = aux->pad;
87 AUX_TRACE(aux, "monitor: %s", monitor ? "yes" : "no");
88 if (monitor)
89 nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_AUX);
90 else
91 nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_OFF);
92}
93
94void
95nvkm_i2c_aux_release(struct nvkm_i2c_aux *aux)
96{
97 struct nvkm_i2c_pad *pad = aux->pad;
98 AUX_TRACE(aux, "release");
99 nvkm_i2c_pad_release(pad);
100 mutex_unlock(&aux->mutex);
101}
102
103int
104nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
105{
106 struct nvkm_i2c_pad *pad = aux->pad;
107 int ret;
108 AUX_TRACE(aux, "acquire");
109 mutex_lock(&aux->mutex);
110 ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
111 if (ret)
112 mutex_unlock(&aux->mutex);
113 return ret;
114}
115
116int
117nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
118 u32 addr, u8 *data, u8 size)
119{
120 return aux->func->xfer(aux, retry, type, addr, data, size);
121}
122
123int
124nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *aux, int nr, int bw, bool ef)
125{
126 if (aux->func->lnk_ctl)
127 return aux->func->lnk_ctl(aux, nr, bw, ef);
128 return -ENODEV;
129}
130
131void
132nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
133{
134 struct nvkm_i2c_aux *aux = *paux;
135 if (aux && !WARN_ON(!aux->func)) {
136 AUX_TRACE(aux, "dtor");
137 list_del(&aux->head);
138 i2c_del_adapter(&aux->i2c);
139 kfree(*paux);
140 *paux = NULL;
141 }
142}
143
144int
145nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
146 struct nvkm_i2c_pad *pad, int id,
147 struct nvkm_i2c_aux *aux)
148{
149 struct nvkm_device *device = pad->i2c->subdev.device;
150
151 aux->func = func;
152 aux->pad = pad;
153 aux->id = id;
154 mutex_init(&aux->mutex);
155 list_add_tail(&aux->head, &pad->i2c->aux);
156 AUX_TRACE(aux, "ctor");
157
158 snprintf(aux->i2c.name, sizeof(aux->i2c.name), "nvkm-%s-aux-%04x",
159 dev_name(device->dev), id);
160 aux->i2c.owner = THIS_MODULE;
161 aux->i2c.dev.parent = device->dev;
162 aux->i2c.algo = &nvkm_i2c_aux_i2c_algo;
163 return i2c_add_adapter(&aux->i2c);
164}
165
166int
167nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
168 struct nvkm_i2c_pad *pad, int id,
169 struct nvkm_i2c_aux **paux)
170{
171 if (!(*paux = kzalloc(sizeof(**paux), GFP_KERNEL)))
172 return -ENOMEM;
173 return nvkm_i2c_aux_ctor(func, pad, id, *paux);
174}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
new file mode 100644
index 000000000000..35a892e4a4c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -0,0 +1,30 @@
1#ifndef __NVKM_I2C_AUX_H__
2#define __NVKM_I2C_AUX_H__
3#include "pad.h"
4
5struct nvkm_i2c_aux_func {
6 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
7 u32 addr, u8 *data, u8 size);
8 int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
9 bool enhanced_framing);
10};
11
12int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
13 int id, struct nvkm_i2c_aux *);
14int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
15 int id, struct nvkm_i2c_aux **);
16void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
17int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
18 u32 addr, u8 *data, u8 size);
19
20int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
21int gm204_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
22
23#define AUX_MSG(b,l,f,a...) do { \
24 struct nvkm_i2c_aux *_aux = (b); \
25 nvkm_##l(&_aux->pad->i2c->subdev, "aux %04x: "f"\n", _aux->id, ##a); \
26} while(0)
27#define AUX_ERR(b,f,a...) AUX_MSG((b), error, f, ##a)
28#define AUX_DBG(b,f,a...) AUX_MSG((b), debug, f, ##a)
29#define AUX_TRACE(b,f,a...) AUX_MSG((b), trace, f, ##a)
30#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
new file mode 100644
index 000000000000..954f5b76bfcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial busions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base)
25#include "aux.h"
26
27struct g94_i2c_aux {
28 struct nvkm_i2c_aux base;
29 int ch;
30};
31
32static void
33g94_i2c_aux_fini(struct g94_i2c_aux *aux)
34{
35 struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
36 nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00310000, 0x00000000);
37}
38
39static int
40g94_i2c_aux_init(struct g94_i2c_aux *aux)
41{
42 struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
43 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
44 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
45 const u32 urep = unksel ? 0x01000000 : 0x02000000;
46 u32 ctrl, timeout;
47
48 /* wait up to 1ms for any previous transaction to be done... */
49 timeout = 1000;
50 do {
51 ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
52 udelay(1);
53 if (!timeout--) {
54 AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
55 return -EBUSY;
56 }
57 } while (ctrl & 0x03010000);
58
59 /* set some magic, and wait up to 1ms for it to appear */
60 nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00300000, ureq);
61 timeout = 1000;
62 do {
63 ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
64 udelay(1);
65 if (!timeout--) {
66 AUX_ERR(&aux->base, "magic wait %08x", ctrl);
67 g94_i2c_aux_fini(aux);
68 return -EBUSY;
69 }
70 } while ((ctrl & 0x03000000) != urep);
71
72 return 0;
73}
74
75static int
76g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
77 u8 type, u32 addr, u8 *data, u8 size)
78{
79 struct g94_i2c_aux *aux = g94_i2c_aux(obj);
80 struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
81 const u32 base = aux->ch * 0x50;
82 u32 ctrl, stat, timeout, retries;
83 u32 xbuf[4] = {};
84 int ret, i;
85
86 AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
87
88 ret = g94_i2c_aux_init(aux);
89 if (ret < 0)
90 goto out;
91
92 stat = nvkm_rd32(device, 0x00e4e8 + base);
93 if (!(stat & 0x10000000)) {
94 AUX_TRACE(&aux->base, "sink not detected");
95 ret = -ENXIO;
96 goto out;
97 }
98
99 if (!(type & 1)) {
100 memcpy(xbuf, data, size);
101 for (i = 0; i < 16; i += 4) {
102 AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
103 nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
104 }
105 }
106
107 ctrl = nvkm_rd32(device, 0x00e4e4 + base);
108 ctrl &= ~0x0001f0ff;
109 ctrl |= type << 12;
110 ctrl |= size - 1;
111 nvkm_wr32(device, 0x00e4e0 + base, addr);
112
113 /* (maybe) retry transaction a number of times on failure... */
114 for (retries = 0; !ret && retries < 32; retries++) {
115 /* reset, and delay a while if this is a retry */
116 nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
117 nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
118 if (retries)
119 udelay(400);
120
121 /* transaction request, wait up to 1ms for it to complete */
122 nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
123
124 timeout = 1000;
125 do {
126 ctrl = nvkm_rd32(device, 0x00e4e4 + base);
127 udelay(1);
128 if (!timeout--) {
129 AUX_ERR(&aux->base, "timeout %08x", ctrl);
130 ret = -EIO;
131 goto out;
132 }
133 } while (ctrl & 0x00010000);
134 ret = 1;
135
136 /* read status, and check if transaction completed ok */
137 stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
138 if ((stat & 0x000f0000) == 0x00080000 ||
139 (stat & 0x000f0000) == 0x00020000)
140 ret = retry ? 0 : 1;
141 if ((stat & 0x00000100))
142 ret = -ETIMEDOUT;
143 if ((stat & 0x00000e00))
144 ret = -EIO;
145
146 AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
147 }
148
149 if (type & 1) {
150 for (i = 0; i < 16; i += 4) {
151 xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
152 AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
153 }
154 memcpy(data, xbuf, size);
155 }
156
157out:
158 g94_i2c_aux_fini(aux);
159 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
160}
161
162static const struct nvkm_i2c_aux_func
163g94_i2c_aux_func = {
164 .xfer = g94_i2c_aux_xfer,
165};
166
167int
168g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
169 struct nvkm_i2c_aux **paux)
170{
171 struct g94_i2c_aux *aux;
172
173 if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
174 return -ENOMEM;
175 *paux = &aux->base;
176
177 nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
178 aux->ch = drive;
179 aux->base.intr = 1 << aux->ch;
180 return 0;
181}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
new file mode 100644
index 000000000000..bed231b56dbd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial busions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#define gm204_i2c_aux(p) container_of((p), struct gm204_i2c_aux, base)
25#include "aux.h"
26
27struct gm204_i2c_aux {
28 struct nvkm_i2c_aux base;
29 int ch;
30};
31
32static void
33gm204_i2c_aux_fini(struct gm204_i2c_aux *aux)
34{
35 struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
36 nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
37}
38
39static int
40gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
41{
42 struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
43 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
44 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
45 const u32 urep = unksel ? 0x01000000 : 0x02000000;
46 u32 ctrl, timeout;
47
48 /* wait up to 1ms for any previous transaction to be done... */
49 timeout = 1000;
50 do {
51 ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
52 udelay(1);
53 if (!timeout--) {
54 AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
55 return -EBUSY;
56 }
57 } while (ctrl & 0x03010000);
58
59 /* set some magic, and wait up to 1ms for it to appear */
60 nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
61 timeout = 1000;
62 do {
63 ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
64 udelay(1);
65 if (!timeout--) {
66 AUX_ERR(&aux->base, "magic wait %08x", ctrl);
67 gm204_i2c_aux_fini(aux);
68 return -EBUSY;
69 }
70 } while ((ctrl & 0x03000000) != urep);
71
72 return 0;
73}
74
75static int
76gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
77 u8 type, u32 addr, u8 *data, u8 size)
78{
79 struct gm204_i2c_aux *aux = gm204_i2c_aux(obj);
80 struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
81 const u32 base = aux->ch * 0x50;
82 u32 ctrl, stat, timeout, retries;
83 u32 xbuf[4] = {};
84 int ret, i;
85
86 AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
87
88 ret = gm204_i2c_aux_init(aux);
89 if (ret < 0)
90 goto out;
91
92 stat = nvkm_rd32(device, 0x00d958 + base);
93 if (!(stat & 0x10000000)) {
94 AUX_TRACE(&aux->base, "sink not detected");
95 ret = -ENXIO;
96 goto out;
97 }
98
99 if (!(type & 1)) {
100 memcpy(xbuf, data, size);
101 for (i = 0; i < 16; i += 4) {
102 AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
103 nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
104 }
105 }
106
107 ctrl = nvkm_rd32(device, 0x00d954 + base);
108 ctrl &= ~0x0001f0ff;
109 ctrl |= type << 12;
110 ctrl |= size - 1;
111 nvkm_wr32(device, 0x00d950 + base, addr);
112
113 /* (maybe) retry transaction a number of times on failure... */
114 for (retries = 0; !ret && retries < 32; retries++) {
115 /* reset, and delay a while if this is a retry */
116 nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
117 nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
118 if (retries)
119 udelay(400);
120
121 /* transaction request, wait up to 1ms for it to complete */
122 nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
123
124 timeout = 1000;
125 do {
126 ctrl = nvkm_rd32(device, 0x00d954 + base);
127 udelay(1);
128 if (!timeout--) {
129 AUX_ERR(&aux->base, "timeout %08x", ctrl);
130 ret = -EIO;
131 goto out;
132 }
133 } while (ctrl & 0x00010000);
134 ret = 1;
135
136 /* read status, and check if transaction completed ok */
137 stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
138 if ((stat & 0x000f0000) == 0x00080000 ||
139 (stat & 0x000f0000) == 0x00020000)
140 ret = retry ? 0 : 1;
141 if ((stat & 0x00000100))
142 ret = -ETIMEDOUT;
143 if ((stat & 0x00000e00))
144 ret = -EIO;
145
146 AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
147 }
148
149 if (type & 1) {
150 for (i = 0; i < 16; i += 4) {
151 xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
152 AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
153 }
154 memcpy(data, xbuf, size);
155 }
156
157out:
158 gm204_i2c_aux_fini(aux);
159 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
160}
161
162static const struct nvkm_i2c_aux_func
163gm204_i2c_aux_func = {
164 .xfer = gm204_i2c_aux_xfer,
165};
166
167int
168gm204_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
169 struct nvkm_i2c_aux **paux)
170{
171 struct gm204_i2c_aux *aux;
172
173 if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
174 return -ENOMEM;
175 *paux = &aux->base;
176
177 nvkm_i2c_aux_ctor(&gm204_i2c_aux_func, pad, index, &aux->base);
178 aux->ch = drive;
179 aux->base.intr = 1 << aux->ch;
180 return 0;
181}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 9200f122c02c..243a71ff0a0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -22,328 +22,91 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25#include "aux.h"
26#include "bus.h"
25#include "pad.h" 27#include "pad.h"
26 28
27#include <core/device.h>
28#include <core/notify.h> 29#include <core/notify.h>
29#include <core/option.h> 30#include <core/option.h>
30#include <subdev/bios.h> 31#include <subdev/bios.h>
31#include <subdev/bios/dcb.h> 32#include <subdev/bios/dcb.h>
33#include <subdev/bios/i2c.h>
32 34
33/****************************************************************************** 35static struct nvkm_i2c_pad *
34 * interface to linux i2c bit-banging algorithm 36nvkm_i2c_pad_find(struct nvkm_i2c *i2c, int id)
35 *****************************************************************************/
36
37#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
38#define CSTMSEL true
39#else
40#define CSTMSEL false
41#endif
42
43static int
44nvkm_i2c_pre_xfer(struct i2c_adapter *adap)
45{ 37{
46 struct i2c_algo_bit_data *bit = adap->algo_data; 38 struct nvkm_i2c_pad *pad;
47 struct nvkm_i2c_port *port = bit->data;
48 return nvkm_i2c(port)->acquire(port, bit->timeout);
49}
50 39
51static void 40 list_for_each_entry(pad, &i2c->pad, head) {
52nvkm_i2c_post_xfer(struct i2c_adapter *adap) 41 if (pad->id == id)
53{ 42 return pad;
54 struct i2c_algo_bit_data *bit = adap->algo_data;
55 struct nvkm_i2c_port *port = bit->data;
56 return nvkm_i2c(port)->release(port);
57}
58
59static void
60nvkm_i2c_setscl(void *data, int state)
61{
62 struct nvkm_i2c_port *port = data;
63 port->func->drive_scl(port, state);
64}
65
66static void
67nvkm_i2c_setsda(void *data, int state)
68{
69 struct nvkm_i2c_port *port = data;
70 port->func->drive_sda(port, state);
71}
72
73static int
74nvkm_i2c_getscl(void *data)
75{
76 struct nvkm_i2c_port *port = data;
77 return port->func->sense_scl(port);
78}
79
80static int
81nvkm_i2c_getsda(void *data)
82{
83 struct nvkm_i2c_port *port = data;
84 return port->func->sense_sda(port);
85}
86
87/******************************************************************************
88 * base i2c "port" class implementation
89 *****************************************************************************/
90
91int
92_nvkm_i2c_port_fini(struct nvkm_object *object, bool suspend)
93{
94 struct nvkm_i2c_port *port = (void *)object;
95 struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
96 nv_ofuncs(pad)->fini(nv_object(pad), suspend);
97 return nvkm_object_fini(&port->base, suspend);
98}
99
100void
101_nvkm_i2c_port_dtor(struct nvkm_object *object)
102{
103 struct nvkm_i2c_port *port = (void *)object;
104 i2c_del_adapter(&port->adapter);
105 nvkm_object_destroy(&port->base);
106}
107
108int
109nvkm_i2c_port_create_(struct nvkm_object *parent, struct nvkm_object *engine,
110 struct nvkm_oclass *oclass, u8 index,
111 const struct i2c_algorithm *algo,
112 const struct nvkm_i2c_func *func,
113 int size, void **pobject)
114{
115 struct nvkm_device *device = nv_device(parent);
116 struct nvkm_i2c *i2c = nvkm_i2c(parent);
117 struct nvkm_i2c_port *port;
118 int ret;
119
120 ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
121 port = *pobject;
122 if (ret)
123 return ret;
124
125 snprintf(port->adapter.name, sizeof(port->adapter.name),
126 "nvkm-%s-%d", device->name, index);
127 port->adapter.owner = THIS_MODULE;
128 port->adapter.dev.parent = nv_device_base(device);
129 port->index = index;
130 port->aux = -1;
131 port->func = func;
132 mutex_init(&port->mutex);
133
134 if ( algo == &nvkm_i2c_bit_algo &&
135 !nvkm_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
136 struct i2c_algo_bit_data *bit;
137
138 bit = kzalloc(sizeof(*bit), GFP_KERNEL);
139 if (!bit)
140 return -ENOMEM;
141
142 bit->udelay = 10;
143 bit->timeout = usecs_to_jiffies(2200);
144 bit->data = port;
145 bit->pre_xfer = nvkm_i2c_pre_xfer;
146 bit->post_xfer = nvkm_i2c_post_xfer;
147 bit->setsda = nvkm_i2c_setsda;
148 bit->setscl = nvkm_i2c_setscl;
149 bit->getsda = nvkm_i2c_getsda;
150 bit->getscl = nvkm_i2c_getscl;
151
152 port->adapter.algo_data = bit;
153 ret = i2c_bit_add_bus(&port->adapter);
154 } else {
155 port->adapter.algo_data = port;
156 port->adapter.algo = algo;
157 ret = i2c_add_adapter(&port->adapter);
158 } 43 }
159 44
160 if (ret == 0) 45 return NULL;
161 list_add_tail(&port->head, &i2c->ports);
162 return ret;
163} 46}
164 47
165/****************************************************************************** 48struct nvkm_i2c_bus *
166 * base i2c subdev class implementation 49nvkm_i2c_bus_find(struct nvkm_i2c *i2c, int id)
167 *****************************************************************************/
168
169static struct nvkm_i2c_port *
170nvkm_i2c_find(struct nvkm_i2c *i2c, u8 index)
171{ 50{
172 struct nvkm_bios *bios = nvkm_bios(i2c); 51 struct nvkm_bios *bios = i2c->subdev.device->bios;
173 struct nvkm_i2c_port *port; 52 struct nvkm_i2c_bus *bus;
174 53
175 if (index == NV_I2C_DEFAULT(0) || 54 if (id == NVKM_I2C_BUS_PRI || id == NVKM_I2C_BUS_SEC) {
176 index == NV_I2C_DEFAULT(1)) {
177 u8 ver, hdr, cnt, len; 55 u8 ver, hdr, cnt, len;
178 u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len); 56 u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
179 if (i2c && ver >= 0x30) { 57 if (i2c && ver >= 0x30) {
180 u8 auxidx = nv_ro08(bios, i2c + 4); 58 u8 auxidx = nvbios_rd08(bios, i2c + 4);
181 if (index == NV_I2C_DEFAULT(0)) 59 if (id == NVKM_I2C_BUS_PRI)
182 index = (auxidx & 0x0f) >> 0; 60 id = NVKM_I2C_BUS_CCB((auxidx & 0x0f) >> 0);
183 else 61 else
184 index = (auxidx & 0xf0) >> 4; 62 id = NVKM_I2C_BUS_CCB((auxidx & 0xf0) >> 4);
185 } else { 63 } else {
186 index = 2; 64 id = NVKM_I2C_BUS_CCB(2);
187 } 65 }
188 } 66 }
189 67
190 list_for_each_entry(port, &i2c->ports, head) { 68 list_for_each_entry(bus, &i2c->bus, head) {
191 if (port->index == index) 69 if (bus->id == id)
192 return port; 70 return bus;
193 } 71 }
194 72
195 return NULL; 73 return NULL;
196} 74}
197 75
198static struct nvkm_i2c_port * 76struct nvkm_i2c_aux *
199nvkm_i2c_find_type(struct nvkm_i2c *i2c, u16 type) 77nvkm_i2c_aux_find(struct nvkm_i2c *i2c, int id)
200{ 78{
201 struct nvkm_i2c_port *port; 79 struct nvkm_i2c_aux *aux;
202 80
203 list_for_each_entry(port, &i2c->ports, head) { 81 list_for_each_entry(aux, &i2c->aux, head) {
204 if (nv_hclass(port) == type) 82 if (aux->id == id)
205 return port; 83 return aux;
206 } 84 }
207 85
208 return NULL; 86 return NULL;
209} 87}
210 88
211static void 89static void
212nvkm_i2c_release_pad(struct nvkm_i2c_port *port) 90nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int id)
213{
214 struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
215 struct nvkm_i2c *i2c = nvkm_i2c(port);
216
217 if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
218 nv_ofuncs(pad)->fini(nv_object(pad), false);
219 wake_up_all(&i2c->wait);
220 }
221}
222
223static int
224nvkm_i2c_try_acquire_pad(struct nvkm_i2c_port *port)
225{
226 struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
227
228 if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
229 struct nvkm_object *owner = (void *)pad->port;
230 do {
231 if (owner == (void *)port)
232 return 0;
233 owner = owner->parent;
234 } while(owner);
235 nvkm_i2c_release_pad(port);
236 return -EBUSY;
237 }
238
239 pad->next = port;
240 nv_ofuncs(pad)->init(nv_object(pad));
241 return 0;
242}
243
244static int
245nvkm_i2c_acquire_pad(struct nvkm_i2c_port *port, unsigned long timeout)
246{
247 struct nvkm_i2c *i2c = nvkm_i2c(port);
248
249 if (timeout) {
250 if (wait_event_timeout(i2c->wait,
251 nvkm_i2c_try_acquire_pad(port) == 0,
252 timeout) == 0)
253 return -EBUSY;
254 } else {
255 wait_event(i2c->wait, nvkm_i2c_try_acquire_pad(port) == 0);
256 }
257
258 return 0;
259}
260
261static void
262nvkm_i2c_release(struct nvkm_i2c_port *port)
263__releases(pad->mutex)
264{
265 nvkm_i2c(port)->release_pad(port);
266 mutex_unlock(&port->mutex);
267}
268
269static int
270nvkm_i2c_acquire(struct nvkm_i2c_port *port, unsigned long timeout)
271__acquires(pad->mutex)
272{
273 int ret;
274 mutex_lock(&port->mutex);
275 if ((ret = nvkm_i2c(port)->acquire_pad(port, timeout)))
276 mutex_unlock(&port->mutex);
277 return ret;
278}
279
280static int
281nvkm_i2c_identify(struct nvkm_i2c *i2c, int index, const char *what,
282 struct nvkm_i2c_board_info *info,
283 bool (*match)(struct nvkm_i2c_port *,
284 struct i2c_board_info *, void *), void *data)
285{
286 struct nvkm_i2c_port *port = nvkm_i2c_find(i2c, index);
287 int i;
288
289 if (!port) {
290 nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
291 return -ENODEV;
292 }
293
294 nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
295 for (i = 0; info[i].dev.addr; i++) {
296 u8 orig_udelay = 0;
297
298 if ((port->adapter.algo == &i2c_bit_algo) &&
299 (info[i].udelay != 0)) {
300 struct i2c_algo_bit_data *algo = port->adapter.algo_data;
301 nv_debug(i2c, "using custom udelay %d instead of %d\n",
302 info[i].udelay, algo->udelay);
303 orig_udelay = algo->udelay;
304 algo->udelay = info[i].udelay;
305 }
306
307 if (nv_probe_i2c(port, info[i].dev.addr) &&
308 (!match || match(port, &info[i].dev, data))) {
309 nv_info(i2c, "detected %s: %s\n", what,
310 info[i].dev.type);
311 return i;
312 }
313
314 if (orig_udelay) {
315 struct i2c_algo_bit_data *algo = port->adapter.algo_data;
316 algo->udelay = orig_udelay;
317 }
318 }
319
320 nv_debug(i2c, "no devices found.\n");
321 return -ENODEV;
322}
323
324static void
325nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int index)
326{ 91{
327 struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event); 92 struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
328 struct nvkm_i2c_port *port = i2c->find(i2c, index); 93 struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
329 const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass; 94 if (aux)
330 if (port && port->aux >= 0) 95 i2c->func->aux_mask(i2c, type, aux->intr, 0);
331 impl->aux_mask(i2c, type, 1 << port->aux, 0);
332} 96}
333 97
334static void 98static void
335nvkm_i2c_intr_init(struct nvkm_event *event, int type, int index) 99nvkm_i2c_intr_init(struct nvkm_event *event, int type, int id)
336{ 100{
337 struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event); 101 struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
338 struct nvkm_i2c_port *port = i2c->find(i2c, index); 102 struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
339 const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass; 103 if (aux)
340 if (port && port->aux >= 0) 104 i2c->func->aux_mask(i2c, type, aux->intr, aux->intr);
341 impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
342} 105}
343 106
344static int 107static int
345nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size, 108nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
346 struct nvkm_notify *notify) 109 struct nvkm_notify *notify)
347{ 110{
348 struct nvkm_i2c_ntfy_req *req = data; 111 struct nvkm_i2c_ntfy_req *req = data;
349 if (!WARN_ON(size != sizeof(*req))) { 112 if (!WARN_ON(size != sizeof(*req))) {
@@ -355,38 +118,6 @@ nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
355 return -EINVAL; 118 return -EINVAL;
356} 119}
357 120
358static void
359nvkm_i2c_intr(struct nvkm_subdev *subdev)
360{
361 struct nvkm_i2c_impl *impl = (void *)nv_oclass(subdev);
362 struct nvkm_i2c *i2c = nvkm_i2c(subdev);
363 struct nvkm_i2c_port *port;
364 u32 hi, lo, rq, tx, e;
365
366 if (impl->aux_stat) {
367 impl->aux_stat(i2c, &hi, &lo, &rq, &tx);
368 if (hi || lo || rq || tx) {
369 list_for_each_entry(port, &i2c->ports, head) {
370 if (e = 0, port->aux < 0)
371 continue;
372
373 if (hi & (1 << port->aux)) e |= NVKM_I2C_PLUG;
374 if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
375 if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
376 if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
377 if (e) {
378 struct nvkm_i2c_ntfy_rep rep = {
379 .mask = e,
380 };
381 nvkm_event_send(&i2c->event, rep.mask,
382 port->index, &rep,
383 sizeof(rep));
384 }
385 }
386 }
387 }
388}
389
390static const struct nvkm_event_func 121static const struct nvkm_event_func
391nvkm_i2c_intr_func = { 122nvkm_i2c_intr_func = {
392 .ctor = nvkm_i2c_intr_ctor, 123 .ctor = nvkm_i2c_intr_ctor,
@@ -394,229 +125,272 @@ nvkm_i2c_intr_func = {
394 .fini = nvkm_i2c_intr_fini, 125 .fini = nvkm_i2c_intr_fini,
395}; 126};
396 127
397int 128static void
398_nvkm_i2c_fini(struct nvkm_object *object, bool suspend) 129nvkm_i2c_intr(struct nvkm_subdev *subdev)
399{ 130{
400 struct nvkm_i2c_impl *impl = (void *)nv_oclass(object); 131 struct nvkm_i2c *i2c = nvkm_i2c(subdev);
401 struct nvkm_i2c *i2c = (void *)object; 132 struct nvkm_i2c_aux *aux;
402 struct nvkm_i2c_port *port; 133 u32 hi, lo, rq, tx;
403 u32 mask; 134
404 int ret; 135 if (!i2c->func->aux_stat)
136 return;
137
138 i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx);
139 if (!hi && !lo && !rq && !tx)
140 return;
405 141
406 list_for_each_entry(port, &i2c->ports, head) { 142 list_for_each_entry(aux, &i2c->aux, head) {
407 ret = nv_ofuncs(port)->fini(nv_object(port), suspend); 143 u32 mask = 0;
408 if (ret && suspend) 144 if (hi & aux->intr) mask |= NVKM_I2C_PLUG;
409 goto fail; 145 if (lo & aux->intr) mask |= NVKM_I2C_UNPLUG;
146 if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
147 if (tx & aux->intr) mask |= NVKM_I2C_DONE;
148 if (mask) {
149 struct nvkm_i2c_ntfy_rep rep = {
150 .mask = mask,
151 };
152 nvkm_event_send(&i2c->event, rep.mask, aux->id,
153 &rep, sizeof(rep));
154 }
410 } 155 }
156}
157
158static int
159nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
160{
161 struct nvkm_i2c *i2c = nvkm_i2c(subdev);
162 struct nvkm_i2c_pad *pad;
163 u32 mask;
411 164
412 if ((mask = (1 << impl->aux) - 1), impl->aux_stat) { 165 if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
413 impl->aux_mask(i2c, NVKM_I2C_ANY, mask, 0); 166 i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
414 impl->aux_stat(i2c, &mask, &mask, &mask, &mask); 167 i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
415 } 168 }
416 169
417 return nvkm_subdev_fini(&i2c->base, suspend); 170 list_for_each_entry(pad, &i2c->pad, head) {
418fail: 171 nvkm_i2c_pad_fini(pad);
419 list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
420 nv_ofuncs(port)->init(nv_object(port));
421 } 172 }
422 173
423 return ret; 174 return 0;
424} 175}
425 176
426int 177static int
427_nvkm_i2c_init(struct nvkm_object *object) 178nvkm_i2c_init(struct nvkm_subdev *subdev)
428{ 179{
429 struct nvkm_i2c *i2c = (void *)object; 180 struct nvkm_i2c *i2c = nvkm_i2c(subdev);
430 struct nvkm_i2c_port *port; 181 struct nvkm_i2c_bus *bus;
431 int ret; 182 struct nvkm_i2c_pad *pad;
432 183
433 ret = nvkm_subdev_init(&i2c->base); 184 list_for_each_entry(pad, &i2c->pad, head) {
434 if (ret == 0) { 185 nvkm_i2c_pad_init(pad);
435 list_for_each_entry(port, &i2c->ports, head) {
436 ret = nv_ofuncs(port)->init(nv_object(port));
437 if (ret)
438 goto fail;
439 }
440 } 186 }
441 187
442 return ret; 188 list_for_each_entry(bus, &i2c->bus, head) {
443fail: 189 nvkm_i2c_bus_init(bus);
444 list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
445 nv_ofuncs(port)->fini(nv_object(port), false);
446 } 190 }
447 191
448 return ret; 192 return 0;
449} 193}
450 194
451void 195static void *
452_nvkm_i2c_dtor(struct nvkm_object *object) 196nvkm_i2c_dtor(struct nvkm_subdev *subdev)
453{ 197{
454 struct nvkm_i2c *i2c = (void *)object; 198 struct nvkm_i2c *i2c = nvkm_i2c(subdev);
455 struct nvkm_i2c_port *port, *temp;
456 199
457 nvkm_event_fini(&i2c->event); 200 nvkm_event_fini(&i2c->event);
458 201
459 list_for_each_entry_safe(port, temp, &i2c->ports, head) { 202 while (!list_empty(&i2c->aux)) {
460 nvkm_object_ref(NULL, (struct nvkm_object **)&port); 203 struct nvkm_i2c_aux *aux =
204 list_first_entry(&i2c->aux, typeof(*aux), head);
205 nvkm_i2c_aux_del(&aux);
461 } 206 }
462 207
463 nvkm_subdev_destroy(&i2c->base); 208 while (!list_empty(&i2c->bus)) {
464} 209 struct nvkm_i2c_bus *bus =
465 210 list_first_entry(&i2c->bus, typeof(*bus), head);
466static struct nvkm_oclass * 211 nvkm_i2c_bus_del(&bus);
467nvkm_i2c_extdev_sclass[] = { 212 }
468 nvkm_anx9805_sclass,
469};
470 213
471static void 214 while (!list_empty(&i2c->pad)) {
472nvkm_i2c_create_port(struct nvkm_i2c *i2c, int index, u8 type, 215 struct nvkm_i2c_pad *pad =
473 struct dcb_i2c_entry *info) 216 list_first_entry(&i2c->pad, typeof(*pad), head);
474{ 217 nvkm_i2c_pad_del(&pad);
475 const struct nvkm_i2c_impl *impl = (void *)nv_oclass(i2c);
476 struct nvkm_oclass *oclass;
477 struct nvkm_object *parent;
478 struct nvkm_object *object;
479 int ret, pad;
480
481 if (info->share != DCB_I2C_UNUSED) {
482 pad = info->share;
483 oclass = impl->pad_s;
484 } else {
485 if (type != DCB_I2C_NVIO_AUX)
486 pad = 0x100 + info->drive;
487 else
488 pad = 0x100 + info->auxch;
489 oclass = impl->pad_x;
490 } 218 }
491 219
492 ret = nvkm_object_ctor(nv_object(i2c), NULL, oclass, 220 return i2c;
493 NULL, pad, &parent); 221}
494 if (ret < 0)
495 return;
496 222
497 oclass = impl->sclass; 223static const struct nvkm_subdev_func
498 do { 224nvkm_i2c = {
499 ret = -EINVAL; 225 .dtor = nvkm_i2c_dtor,
500 if (oclass->handle == type) { 226 .init = nvkm_i2c_init,
501 ret = nvkm_object_ctor(parent, NULL, oclass, 227 .fini = nvkm_i2c_fini,
502 info, index, &object); 228 .intr = nvkm_i2c_intr,
503 } 229};
504 } while (ret && (++oclass)->handle);
505 230
506 nvkm_object_ref(NULL, &parent); 231static const struct nvkm_i2c_drv {
232 u8 bios;
233 u8 addr;
234 int (*pad_new)(struct nvkm_i2c_bus *, int id, u8 addr,
235 struct nvkm_i2c_pad **);
507} 236}
237nvkm_i2c_drv[] = {
238 { 0x0d, 0x39, anx9805_pad_new },
239 { 0x0e, 0x3b, anx9805_pad_new },
240 {}
241};
508 242
509int 243int
510nvkm_i2c_create_(struct nvkm_object *parent, struct nvkm_object *engine, 244nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
511 struct nvkm_oclass *oclass, int length, void **pobject) 245 int index, struct nvkm_i2c **pi2c)
512{ 246{
513 struct nvkm_bios *bios = nvkm_bios(parent); 247 struct nvkm_bios *bios = device->bios;
514 struct nvkm_i2c *i2c; 248 struct nvkm_i2c *i2c;
515 struct nvkm_object *object; 249 struct dcb_i2c_entry ccbE;
516 struct dcb_i2c_entry info; 250 struct dcb_output dcbE;
517 int ret, i, j, index = -1; 251 u8 ver, hdr;
518 struct dcb_output outp; 252 int ret, i;
519 u8 ver, hdr; 253
520 u32 data; 254 if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
521 255 return -ENOMEM;
522 ret = nvkm_subdev_create(parent, engine, oclass, 0, "I2C", "i2c", &i2c); 256
523 *pobject = nv_object(i2c); 257 nvkm_subdev_ctor(&nvkm_i2c, device, index, 0, &i2c->subdev);
524 if (ret) 258 i2c->func = func;
525 return ret; 259 INIT_LIST_HEAD(&i2c->pad);
526 260 INIT_LIST_HEAD(&i2c->bus);
527 nv_subdev(i2c)->intr = nvkm_i2c_intr; 261 INIT_LIST_HEAD(&i2c->aux);
528 i2c->find = nvkm_i2c_find; 262
529 i2c->find_type = nvkm_i2c_find_type; 263 i = -1;
530 i2c->acquire_pad = nvkm_i2c_acquire_pad; 264 while (!dcb_i2c_parse(bios, ++i, &ccbE)) {
531 i2c->release_pad = nvkm_i2c_release_pad; 265 struct nvkm_i2c_pad *pad = NULL;
532 i2c->acquire = nvkm_i2c_acquire; 266 struct nvkm_i2c_bus *bus = NULL;
533 i2c->release = nvkm_i2c_release; 267 struct nvkm_i2c_aux *aux = NULL;
534 i2c->identify = nvkm_i2c_identify; 268
535 init_waitqueue_head(&i2c->wait); 269 nvkm_debug(&i2c->subdev, "ccb %02x: type %02x drive %02x "
536 INIT_LIST_HEAD(&i2c->ports); 270 "sense %02x share %02x auxch %02x\n", i, ccbE.type,
537 271 ccbE.drive, ccbE.sense, ccbE.share, ccbE.auxch);
538 while (!dcb_i2c_parse(bios, ++index, &info)) { 272
539 switch (info.type) { 273 if (ccbE.share != DCB_I2C_UNUSED) {
540 case DCB_I2C_NV04_BIT: 274 const int id = NVKM_I2C_PAD_HYBRID(ccbE.share);
541 case DCB_I2C_NV4E_BIT: 275 if (!(pad = nvkm_i2c_pad_find(i2c, id)))
542 case DCB_I2C_NVIO_BIT: 276 ret = func->pad_s_new(i2c, id, &pad);
543 nvkm_i2c_create_port(i2c, NV_I2C_PORT(index), 277 else
544 info.type, &info); 278 ret = 0;
545 break; 279 } else {
546 case DCB_I2C_NVIO_AUX: 280 ret = func->pad_x_new(i2c, NVKM_I2C_PAD_CCB(i), &pad);
547 nvkm_i2c_create_port(i2c, NV_I2C_AUX(index), 281 }
548 info.type, &info); 282
549 break; 283 if (ret) {
550 case DCB_I2C_PMGR: 284 nvkm_error(&i2c->subdev, "ccb %02x pad, %d\n", i, ret);
551 if (info.drive != DCB_I2C_UNUSED) { 285 nvkm_i2c_pad_del(&pad);
552 nvkm_i2c_create_port(i2c, NV_I2C_PORT(index), 286 continue;
553 DCB_I2C_NVIO_BIT, &info); 287 }
554 } 288
555 if (info.auxch != DCB_I2C_UNUSED) { 289 if (pad->func->bus_new_0 && ccbE.type == DCB_I2C_NV04_BIT) {
556 nvkm_i2c_create_port(i2c, NV_I2C_AUX(index), 290 ret = pad->func->bus_new_0(pad, NVKM_I2C_BUS_CCB(i),
557 DCB_I2C_NVIO_AUX, &info); 291 ccbE.drive,
558 } 292 ccbE.sense, &bus);
559 break; 293 } else
560 case DCB_I2C_UNUSED: 294 if (pad->func->bus_new_4 &&
561 default: 295 ( ccbE.type == DCB_I2C_NV4E_BIT ||
296 ccbE.type == DCB_I2C_NVIO_BIT ||
297 (ccbE.type == DCB_I2C_PMGR &&
298 ccbE.drive != DCB_I2C_UNUSED))) {
299 ret = pad->func->bus_new_4(pad, NVKM_I2C_BUS_CCB(i),
300 ccbE.drive, &bus);
301 }
302
303 if (ret) {
304 nvkm_error(&i2c->subdev, "ccb %02x bus, %d\n", i, ret);
305 nvkm_i2c_bus_del(&bus);
306 }
307
308 if (pad->func->aux_new_6 &&
309 ( ccbE.type == DCB_I2C_NVIO_AUX ||
310 (ccbE.type == DCB_I2C_PMGR &&
311 ccbE.auxch != DCB_I2C_UNUSED))) {
312 ret = pad->func->aux_new_6(pad, NVKM_I2C_BUS_CCB(i),
313 ccbE.auxch, &aux);
314 } else {
315 ret = 0;
316 }
317
318 if (ret) {
319 nvkm_error(&i2c->subdev, "ccb %02x aux, %d\n", i, ret);
320 nvkm_i2c_aux_del(&aux);
321 }
322
323 if (ccbE.type != DCB_I2C_UNUSED && !bus && !aux) {
324 nvkm_warn(&i2c->subdev, "ccb %02x was ignored\n", i);
562 continue; 325 continue;
563 } 326 }
564 } 327 }
565 328
566 /* in addition to the busses specified in the i2c table, there
567 * may be ddc/aux channels hiding behind external tmds/dp/etc
568 * transmitters.
569 */
570 index = NV_I2C_EXT(0);
571 i = -1; 329 i = -1;
572 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) { 330 while (dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE)) {
573 if (!outp.location || !outp.extdev) 331 const struct nvkm_i2c_drv *drv = nvkm_i2c_drv;
332 struct nvkm_i2c_bus *bus;
333 struct nvkm_i2c_pad *pad;
334
335 /* internal outputs handled by native i2c busses (above) */
336 if (!dcbE.location)
574 continue; 337 continue;
575 338
576 switch (outp.type) { 339 /* we need an i2c bus to talk to the external encoder */
577 case DCB_OUTPUT_TMDS: 340 bus = nvkm_i2c_bus_find(i2c, dcbE.i2c_index);
578 info.type = NV_I2C_TYPE_EXTDDC(outp.extdev); 341 if (!bus) {
579 break; 342 nvkm_debug(&i2c->subdev, "dcb %02x no bus\n", i);
580 case DCB_OUTPUT_DP:
581 info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
582 break;
583 default:
584 continue; 343 continue;
585 } 344 }
586 345
587 ret = -ENODEV; 346 /* ... and a driver for it */
588 j = -1; 347 while (drv->pad_new) {
589 while (ret && ++j < ARRAY_SIZE(nvkm_i2c_extdev_sclass)) { 348 if (drv->bios == dcbE.extdev)
590 parent = nv_object(i2c->find(i2c, outp.i2c_index)); 349 break;
591 oclass = nvkm_i2c_extdev_sclass[j]; 350 drv++;
592 do {
593 if (oclass->handle != info.type)
594 continue;
595 ret = nvkm_object_ctor(parent, NULL, oclass,
596 NULL, index++, &object);
597 } while (ret && (++oclass)->handle);
598 } 351 }
599 }
600 352
601 ret = nvkm_event_init(&nvkm_i2c_intr_func, 4, index, &i2c->event); 353 if (!drv->pad_new) {
602 if (ret) 354 nvkm_debug(&i2c->subdev, "dcb %02x drv %02x unknown\n",
603 return ret; 355 i, dcbE.extdev);
604 356 continue;
605 return 0; 357 }
606}
607 358
608int 359 /* find/create an instance of the driver */
609_nvkm_i2c_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 360 pad = nvkm_i2c_pad_find(i2c, NVKM_I2C_PAD_EXT(dcbE.extdev));
610 struct nvkm_oclass *oclass, void *data, u32 size, 361 if (!pad) {
611 struct nvkm_object **pobject) 362 const int id = NVKM_I2C_PAD_EXT(dcbE.extdev);
612{ 363 ret = drv->pad_new(bus, id, drv->addr, &pad);
613 struct nvkm_i2c *i2c; 364 if (ret) {
614 int ret; 365 nvkm_error(&i2c->subdev, "dcb %02x pad, %d\n",
366 i, ret);
367 nvkm_i2c_pad_del(&pad);
368 continue;
369 }
370 }
615 371
616 ret = nvkm_i2c_create(parent, engine, oclass, &i2c); 372 /* create any i2c bus / aux channel required by the output */
617 *pobject = nv_object(i2c); 373 if (pad->func->aux_new_6 && dcbE.type == DCB_OUTPUT_DP) {
618 if (ret) 374 const int id = NVKM_I2C_AUX_EXT(dcbE.extdev);
619 return ret; 375 struct nvkm_i2c_aux *aux = NULL;
376 ret = pad->func->aux_new_6(pad, id, 0, &aux);
377 if (ret) {
378 nvkm_error(&i2c->subdev, "dcb %02x aux, %d\n",
379 i, ret);
380 nvkm_i2c_aux_del(&aux);
381 }
382 } else
383 if (pad->func->bus_new_4) {
384 const int id = NVKM_I2C_BUS_EXT(dcbE.extdev);
385 struct nvkm_i2c_bus *bus = NULL;
386 ret = pad->func->bus_new_4(pad, id, 0, &bus);
387 if (ret) {
388 nvkm_error(&i2c->subdev, "dcb %02x bus, %d\n",
389 i, ret);
390 nvkm_i2c_bus_del(&bus);
391 }
392 }
393 }
620 394
621 return 0; 395 return nvkm_event_init(&nvkm_i2c_intr_func, 4, i, &i2c->event);
622} 396}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
index 861a453d2a67..cdce11bbabe5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
@@ -9,7 +9,7 @@
9 * Software is furnished to do so, subject to the following conditions: 9 * Software is furnished to do so, subject to the following conditions:
10 * 10 *
11 * The above copyright notice and this permission notice shall be included in 11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software. 12 * all copies or substantial busions of the Software.
13 * 13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@@ -21,7 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "bus.h"
25 25
26#ifdef CONFIG_NOUVEAU_I2C_INTERNAL 26#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
27#define T_TIMEOUT 2200000 27#define T_TIMEOUT 2200000
@@ -29,205 +29,188 @@
29#define T_HOLD 5000 29#define T_HOLD 5000
30 30
31static inline void 31static inline void
32i2c_drive_scl(struct nvkm_i2c_port *port, int state) 32nvkm_i2c_drive_scl(struct nvkm_i2c_bus *bus, int state)
33{ 33{
34 port->func->drive_scl(port, state); 34 bus->func->drive_scl(bus, state);
35} 35}
36 36
37static inline void 37static inline void
38i2c_drive_sda(struct nvkm_i2c_port *port, int state) 38nvkm_i2c_drive_sda(struct nvkm_i2c_bus *bus, int state)
39{ 39{
40 port->func->drive_sda(port, state); 40 bus->func->drive_sda(bus, state);
41} 41}
42 42
43static inline int 43static inline int
44i2c_sense_scl(struct nvkm_i2c_port *port) 44nvkm_i2c_sense_scl(struct nvkm_i2c_bus *bus)
45{ 45{
46 return port->func->sense_scl(port); 46 return bus->func->sense_scl(bus);
47} 47}
48 48
49static inline int 49static inline int
50i2c_sense_sda(struct nvkm_i2c_port *port) 50nvkm_i2c_sense_sda(struct nvkm_i2c_bus *bus)
51{ 51{
52 return port->func->sense_sda(port); 52 return bus->func->sense_sda(bus);
53} 53}
54 54
55static void 55static void
56i2c_delay(struct nvkm_i2c_port *port, u32 nsec) 56nvkm_i2c_delay(struct nvkm_i2c_bus *bus, u32 nsec)
57{ 57{
58 udelay((nsec + 500) / 1000); 58 udelay((nsec + 500) / 1000);
59} 59}
60 60
61static bool 61static bool
62i2c_raise_scl(struct nvkm_i2c_port *port) 62nvkm_i2c_raise_scl(struct nvkm_i2c_bus *bus)
63{ 63{
64 u32 timeout = T_TIMEOUT / T_RISEFALL; 64 u32 timeout = T_TIMEOUT / T_RISEFALL;
65 65
66 i2c_drive_scl(port, 1); 66 nvkm_i2c_drive_scl(bus, 1);
67 do { 67 do {
68 i2c_delay(port, T_RISEFALL); 68 nvkm_i2c_delay(bus, T_RISEFALL);
69 } while (!i2c_sense_scl(port) && --timeout); 69 } while (!nvkm_i2c_sense_scl(bus) && --timeout);
70 70
71 return timeout != 0; 71 return timeout != 0;
72} 72}
73 73
74static int 74static int
75i2c_start(struct nvkm_i2c_port *port) 75i2c_start(struct nvkm_i2c_bus *bus)
76{ 76{
77 int ret = 0; 77 int ret = 0;
78 78
79 if (!i2c_sense_scl(port) || 79 if (!nvkm_i2c_sense_scl(bus) ||
80 !i2c_sense_sda(port)) { 80 !nvkm_i2c_sense_sda(bus)) {
81 i2c_drive_scl(port, 0); 81 nvkm_i2c_drive_scl(bus, 0);
82 i2c_drive_sda(port, 1); 82 nvkm_i2c_drive_sda(bus, 1);
83 if (!i2c_raise_scl(port)) 83 if (!nvkm_i2c_raise_scl(bus))
84 ret = -EBUSY; 84 ret = -EBUSY;
85 } 85 }
86 86
87 i2c_drive_sda(port, 0); 87 nvkm_i2c_drive_sda(bus, 0);
88 i2c_delay(port, T_HOLD); 88 nvkm_i2c_delay(bus, T_HOLD);
89 i2c_drive_scl(port, 0); 89 nvkm_i2c_drive_scl(bus, 0);
90 i2c_delay(port, T_HOLD); 90 nvkm_i2c_delay(bus, T_HOLD);
91 return ret; 91 return ret;
92} 92}
93 93
94static void 94static void
95i2c_stop(struct nvkm_i2c_port *port) 95i2c_stop(struct nvkm_i2c_bus *bus)
96{ 96{
97 i2c_drive_scl(port, 0); 97 nvkm_i2c_drive_scl(bus, 0);
98 i2c_drive_sda(port, 0); 98 nvkm_i2c_drive_sda(bus, 0);
99 i2c_delay(port, T_RISEFALL); 99 nvkm_i2c_delay(bus, T_RISEFALL);
100 100
101 i2c_drive_scl(port, 1); 101 nvkm_i2c_drive_scl(bus, 1);
102 i2c_delay(port, T_HOLD); 102 nvkm_i2c_delay(bus, T_HOLD);
103 i2c_drive_sda(port, 1); 103 nvkm_i2c_drive_sda(bus, 1);
104 i2c_delay(port, T_HOLD); 104 nvkm_i2c_delay(bus, T_HOLD);
105} 105}
106 106
107static int 107static int
108i2c_bitw(struct nvkm_i2c_port *port, int sda) 108i2c_bitw(struct nvkm_i2c_bus *bus, int sda)
109{ 109{
110 i2c_drive_sda(port, sda); 110 nvkm_i2c_drive_sda(bus, sda);
111 i2c_delay(port, T_RISEFALL); 111 nvkm_i2c_delay(bus, T_RISEFALL);
112 112
113 if (!i2c_raise_scl(port)) 113 if (!nvkm_i2c_raise_scl(bus))
114 return -ETIMEDOUT; 114 return -ETIMEDOUT;
115 i2c_delay(port, T_HOLD); 115 nvkm_i2c_delay(bus, T_HOLD);
116 116
117 i2c_drive_scl(port, 0); 117 nvkm_i2c_drive_scl(bus, 0);
118 i2c_delay(port, T_HOLD); 118 nvkm_i2c_delay(bus, T_HOLD);
119 return 0; 119 return 0;
120} 120}
121 121
122static int 122static int
123i2c_bitr(struct nvkm_i2c_port *port) 123i2c_bitr(struct nvkm_i2c_bus *bus)
124{ 124{
125 int sda; 125 int sda;
126 126
127 i2c_drive_sda(port, 1); 127 nvkm_i2c_drive_sda(bus, 1);
128 i2c_delay(port, T_RISEFALL); 128 nvkm_i2c_delay(bus, T_RISEFALL);
129 129
130 if (!i2c_raise_scl(port)) 130 if (!nvkm_i2c_raise_scl(bus))
131 return -ETIMEDOUT; 131 return -ETIMEDOUT;
132 i2c_delay(port, T_HOLD); 132 nvkm_i2c_delay(bus, T_HOLD);
133 133
134 sda = i2c_sense_sda(port); 134 sda = nvkm_i2c_sense_sda(bus);
135 135
136 i2c_drive_scl(port, 0); 136 nvkm_i2c_drive_scl(bus, 0);
137 i2c_delay(port, T_HOLD); 137 nvkm_i2c_delay(bus, T_HOLD);
138 return sda; 138 return sda;
139} 139}
140 140
141static int 141static int
142i2c_get_byte(struct nvkm_i2c_port *port, u8 *byte, bool last) 142nvkm_i2c_get_byte(struct nvkm_i2c_bus *bus, u8 *byte, bool last)
143{ 143{
144 int i, bit; 144 int i, bit;
145 145
146 *byte = 0; 146 *byte = 0;
147 for (i = 7; i >= 0; i--) { 147 for (i = 7; i >= 0; i--) {
148 bit = i2c_bitr(port); 148 bit = i2c_bitr(bus);
149 if (bit < 0) 149 if (bit < 0)
150 return bit; 150 return bit;
151 *byte |= bit << i; 151 *byte |= bit << i;
152 } 152 }
153 153
154 return i2c_bitw(port, last ? 1 : 0); 154 return i2c_bitw(bus, last ? 1 : 0);
155} 155}
156 156
157static int 157static int
158i2c_put_byte(struct nvkm_i2c_port *port, u8 byte) 158nvkm_i2c_put_byte(struct nvkm_i2c_bus *bus, u8 byte)
159{ 159{
160 int i, ret; 160 int i, ret;
161 for (i = 7; i >= 0; i--) { 161 for (i = 7; i >= 0; i--) {
162 ret = i2c_bitw(port, !!(byte & (1 << i))); 162 ret = i2c_bitw(bus, !!(byte & (1 << i)));
163 if (ret < 0) 163 if (ret < 0)
164 return ret; 164 return ret;
165 } 165 }
166 166
167 ret = i2c_bitr(port); 167 ret = i2c_bitr(bus);
168 if (ret == 1) /* nack */ 168 if (ret == 1) /* nack */
169 ret = -EIO; 169 ret = -EIO;
170 return ret; 170 return ret;
171} 171}
172 172
173static int 173static int
174i2c_addr(struct nvkm_i2c_port *port, struct i2c_msg *msg) 174i2c_addr(struct nvkm_i2c_bus *bus, struct i2c_msg *msg)
175{ 175{
176 u32 addr = msg->addr << 1; 176 u32 addr = msg->addr << 1;
177 if (msg->flags & I2C_M_RD) 177 if (msg->flags & I2C_M_RD)
178 addr |= 1; 178 addr |= 1;
179 return i2c_put_byte(port, addr); 179 return nvkm_i2c_put_byte(bus, addr);
180} 180}
181 181
182static int 182int
183i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 183nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
184{ 184{
185 struct nvkm_i2c_port *port = adap->algo_data;
186 struct i2c_msg *msg = msgs; 185 struct i2c_msg *msg = msgs;
187 int ret = 0, mcnt = num; 186 int ret = 0, mcnt = num;
188 187
189 ret = nvkm_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
190 if (ret)
191 return ret;
192
193 while (!ret && mcnt--) { 188 while (!ret && mcnt--) {
194 u8 remaining = msg->len; 189 u8 remaining = msg->len;
195 u8 *ptr = msg->buf; 190 u8 *ptr = msg->buf;
196 191
197 ret = i2c_start(port); 192 ret = i2c_start(bus);
198 if (ret == 0) 193 if (ret == 0)
199 ret = i2c_addr(port, msg); 194 ret = i2c_addr(bus, msg);
200 195
201 if (msg->flags & I2C_M_RD) { 196 if (msg->flags & I2C_M_RD) {
202 while (!ret && remaining--) 197 while (!ret && remaining--)
203 ret = i2c_get_byte(port, ptr++, !remaining); 198 ret = nvkm_i2c_get_byte(bus, ptr++, !remaining);
204 } else { 199 } else {
205 while (!ret && remaining--) 200 while (!ret && remaining--)
206 ret = i2c_put_byte(port, *ptr++); 201 ret = nvkm_i2c_put_byte(bus, *ptr++);
207 } 202 }
208 203
209 msg++; 204 msg++;
210 } 205 }
211 206
212 i2c_stop(port); 207 i2c_stop(bus);
213 nvkm_i2c(port)->release(port);
214 return (ret < 0) ? ret : num; 208 return (ret < 0) ? ret : num;
215} 209}
216#else 210#else
217static int 211int
218i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 212nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
219{ 213{
220 return -ENODEV; 214 return -ENODEV;
221} 215}
222#endif 216#endif
223
224static u32
225i2c_bit_func(struct i2c_adapter *adap)
226{
227 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
228}
229
230const struct i2c_algorithm nvkm_i2c_bit_algo = {
231 .master_xfer = i2c_bit_xfer,
232 .functionality = i2c_bit_func
233};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
new file mode 100644
index 000000000000..807a2b67bd64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -0,0 +1,245 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "bus.h"
25#include "pad.h"
26
27#include <core/option.h>
28
29/*******************************************************************************
30 * i2c-algo-bit
31 ******************************************************************************/
32static int
33nvkm_i2c_bus_pre_xfer(struct i2c_adapter *adap)
34{
35 struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
36 return nvkm_i2c_bus_acquire(bus);
37}
38
39static void
40nvkm_i2c_bus_post_xfer(struct i2c_adapter *adap)
41{
42 struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
43 return nvkm_i2c_bus_release(bus);
44}
45
46static void
47nvkm_i2c_bus_setscl(void *data, int state)
48{
49 struct nvkm_i2c_bus *bus = data;
50 bus->func->drive_scl(bus, state);
51}
52
53static void
54nvkm_i2c_bus_setsda(void *data, int state)
55{
56 struct nvkm_i2c_bus *bus = data;
57 bus->func->drive_sda(bus, state);
58}
59
60static int
61nvkm_i2c_bus_getscl(void *data)
62{
63 struct nvkm_i2c_bus *bus = data;
64 return bus->func->sense_scl(bus);
65}
66
67static int
68nvkm_i2c_bus_getsda(void *data)
69{
70 struct nvkm_i2c_bus *bus = data;
71 return bus->func->sense_sda(bus);
72}
73
74/*******************************************************************************
75 * !i2c-algo-bit (off-chip i2c bus / hw i2c / internal bit-banging algo)
76 ******************************************************************************/
77static int
78nvkm_i2c_bus_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
79{
80 struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
81 int ret;
82
83 ret = nvkm_i2c_bus_acquire(bus);
84 if (ret)
85 return ret;
86
87 ret = bus->func->xfer(bus, msgs, num);
88 nvkm_i2c_bus_release(bus);
89 return ret;
90}
91
92static u32
93nvkm_i2c_bus_func(struct i2c_adapter *adap)
94{
95 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
96}
97
98static const struct i2c_algorithm
99nvkm_i2c_bus_algo = {
100 .master_xfer = nvkm_i2c_bus_xfer,
101 .functionality = nvkm_i2c_bus_func,
102};
103
104/*******************************************************************************
105 * nvkm_i2c_bus base
106 ******************************************************************************/
107void
108nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
109{
110 BUS_TRACE(bus, "init");
111 if (bus->func->init)
112 bus->func->init(bus);
113}
114
115void
116nvkm_i2c_bus_release(struct nvkm_i2c_bus *bus)
117{
118 struct nvkm_i2c_pad *pad = bus->pad;
119 BUS_TRACE(bus, "release");
120 nvkm_i2c_pad_release(pad);
121 mutex_unlock(&bus->mutex);
122}
123
124int
125nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
126{
127 struct nvkm_i2c_pad *pad = bus->pad;
128 int ret;
129 BUS_TRACE(bus, "acquire");
130 mutex_lock(&bus->mutex);
131 ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
132 if (ret)
133 mutex_unlock(&bus->mutex);
134 return ret;
135}
136
137int
138nvkm_i2c_bus_probe(struct nvkm_i2c_bus *bus, const char *what,
139 struct nvkm_i2c_bus_probe *info,
140 bool (*match)(struct nvkm_i2c_bus *,
141 struct i2c_board_info *, void *), void *data)
142{
143 int i;
144
145 BUS_DBG(bus, "probing %ss", what);
146 for (i = 0; info[i].dev.addr; i++) {
147 u8 orig_udelay = 0;
148
149 if ((bus->i2c.algo == &i2c_bit_algo) && (info[i].udelay != 0)) {
150 struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
151 BUS_DBG(bus, "%dms delay instead of %dms",
152 info[i].udelay, algo->udelay);
153 orig_udelay = algo->udelay;
154 algo->udelay = info[i].udelay;
155 }
156
157 if (nvkm_probe_i2c(&bus->i2c, info[i].dev.addr) &&
158 (!match || match(bus, &info[i].dev, data))) {
159 BUS_DBG(bus, "detected %s: %s",
160 what, info[i].dev.type);
161 return i;
162 }
163
164 if (orig_udelay) {
165 struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
166 algo->udelay = orig_udelay;
167 }
168 }
169
170 BUS_DBG(bus, "no devices found.");
171 return -ENODEV;
172}
173
174void
175nvkm_i2c_bus_del(struct nvkm_i2c_bus **pbus)
176{
177 struct nvkm_i2c_bus *bus = *pbus;
178 if (bus && !WARN_ON(!bus->func)) {
179 BUS_TRACE(bus, "dtor");
180 list_del(&bus->head);
181 i2c_del_adapter(&bus->i2c);
182 kfree(bus->i2c.algo_data);
183 kfree(*pbus);
184 *pbus = NULL;
185 }
186}
187
188int
189nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *func,
190 struct nvkm_i2c_pad *pad, int id,
191 struct nvkm_i2c_bus *bus)
192{
193 struct nvkm_device *device = pad->i2c->subdev.device;
194 struct i2c_algo_bit_data *bit;
195#ifndef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
196 const bool internal = false;
197#else
198 const bool internal = true;
199#endif
200 int ret;
201
202 bus->func = func;
203 bus->pad = pad;
204 bus->id = id;
205 mutex_init(&bus->mutex);
206 list_add_tail(&bus->head, &pad->i2c->bus);
207 BUS_TRACE(bus, "ctor");
208
209 snprintf(bus->i2c.name, sizeof(bus->i2c.name), "nvkm-%s-bus-%04x",
210 dev_name(device->dev), id);
211 bus->i2c.owner = THIS_MODULE;
212 bus->i2c.dev.parent = device->dev;
213
214 if ( bus->func->drive_scl &&
215 !nvkm_boolopt(device->cfgopt, "NvI2C", internal)) {
216 if (!(bit = kzalloc(sizeof(*bit), GFP_KERNEL)))
217 return -ENOMEM;
218 bit->udelay = 10;
219 bit->timeout = usecs_to_jiffies(2200);
220 bit->data = bus;
221 bit->pre_xfer = nvkm_i2c_bus_pre_xfer;
222 bit->post_xfer = nvkm_i2c_bus_post_xfer;
223 bit->setscl = nvkm_i2c_bus_setscl;
224 bit->setsda = nvkm_i2c_bus_setsda;
225 bit->getscl = nvkm_i2c_bus_getscl;
226 bit->getsda = nvkm_i2c_bus_getsda;
227 bus->i2c.algo_data = bit;
228 ret = i2c_bit_add_bus(&bus->i2c);
229 } else {
230 bus->i2c.algo = &nvkm_i2c_bus_algo;
231 ret = i2c_add_adapter(&bus->i2c);
232 }
233
234 return ret;
235}
236
237int
238nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *func,
239 struct nvkm_i2c_pad *pad, int id,
240 struct nvkm_i2c_bus **pbus)
241{
242 if (!(*pbus = kzalloc(sizeof(**pbus), GFP_KERNEL)))
243 return -ENOMEM;
244 return nvkm_i2c_bus_ctor(func, pad, id, *pbus);
245}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
new file mode 100644
index 000000000000..e1be14c23e54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -0,0 +1,37 @@
1#ifndef __NVKM_I2C_BUS_H__
2#define __NVKM_I2C_BUS_H__
3#include "pad.h"
4
5struct nvkm_i2c_bus_func {
6 void (*init)(struct nvkm_i2c_bus *);
7 void (*drive_scl)(struct nvkm_i2c_bus *, int state);
8 void (*drive_sda)(struct nvkm_i2c_bus *, int state);
9 int (*sense_scl)(struct nvkm_i2c_bus *);
10 int (*sense_sda)(struct nvkm_i2c_bus *);
11 int (*xfer)(struct nvkm_i2c_bus *, struct i2c_msg *, int num);
12};
13
14int nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
15 int id, struct nvkm_i2c_bus *);
16int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
17 int id, struct nvkm_i2c_bus **);
18void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
19void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
20
21int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
22
23int nv04_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, u8,
24 struct nvkm_i2c_bus **);
25
26int nv4e_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
27int nv50_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
28int gf119_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
29
30#define BUS_MSG(b,l,f,a...) do { \
31 struct nvkm_i2c_bus *_bus = (b); \
32 nvkm_##l(&_bus->pad->i2c->subdev, "bus %04x: "f"\n", _bus->id, ##a); \
33} while(0)
34#define BUS_ERR(b,f,a...) BUS_MSG((b), error, f, ##a)
35#define BUS_DBG(b,f,a...) BUS_MSG((b), debug, f, ##a)
36#define BUS_TRACE(b,f,a...) BUS_MSG((b), trace, f, ##a)
37#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
new file mode 100644
index 000000000000..96bbdda0f439
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial busions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#define gf119_i2c_bus(p) container_of((p), struct gf119_i2c_bus, base)
25#include "bus.h"
26
27struct gf119_i2c_bus {
28 struct nvkm_i2c_bus base;
29 u32 addr;
30};
31
32static void
33gf119_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
34{
35 struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
36 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
37 nvkm_mask(device, bus->addr, 0x00000001, state ? 0x00000001 : 0);
38}
39
40static void
41gf119_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
42{
43 struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
44 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
45 nvkm_mask(device, bus->addr, 0x00000002, state ? 0x00000002 : 0);
46}
47
48static int
49gf119_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
50{
51 struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
52 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
53 return !!(nvkm_rd32(device, bus->addr) & 0x00000010);
54}
55
56static int
57gf119_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
58{
59 struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
60 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
61 return !!(nvkm_rd32(device, bus->addr) & 0x00000020);
62}
63
64static void
65gf119_i2c_bus_init(struct nvkm_i2c_bus *base)
66{
67 struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
68 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
69 nvkm_wr32(device, bus->addr, 0x00000007);
70}
71
72static const struct nvkm_i2c_bus_func
73gf119_i2c_bus_func = {
74 .init = gf119_i2c_bus_init,
75 .drive_scl = gf119_i2c_bus_drive_scl,
76 .drive_sda = gf119_i2c_bus_drive_sda,
77 .sense_scl = gf119_i2c_bus_sense_scl,
78 .sense_sda = gf119_i2c_bus_sense_sda,
79 .xfer = nvkm_i2c_bit_xfer,
80};
81
82int
83gf119_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
84 struct nvkm_i2c_bus **pbus)
85{
86 struct gf119_i2c_bus *bus;
87
88 if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
89 return -ENOMEM;
90 *pbus = &bus->base;
91
92 nvkm_i2c_bus_ctor(&gf119_i2c_bus_func, pad, id, &bus->base);
93 bus->addr = 0x00d014 + (drive * 0x20);
94 return 0;
95}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
new file mode 100644
index 000000000000..a58db159231f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial busions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#define nv04_i2c_bus(p) container_of((p), struct nv04_i2c_bus, base)
25#include "bus.h"
26
27#include <subdev/vga.h>
28
29struct nv04_i2c_bus {
30 struct nvkm_i2c_bus base;
31 u8 drive;
32 u8 sense;
33};
34
35static void
36nv04_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
37{
38 struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
39 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
40 u8 val = nvkm_rdvgac(device, 0, bus->drive);
41 if (state) val |= 0x20;
42 else val &= 0xdf;
43 nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
44}
45
46static void
47nv04_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
48{
49 struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
50 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
51 u8 val = nvkm_rdvgac(device, 0, bus->drive);
52 if (state) val |= 0x10;
53 else val &= 0xef;
54 nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
55}
56
57static int
58nv04_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
59{
60 struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
61 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
62 return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x04);
63}
64
65static int
66nv04_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
67{
68 struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
69 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
70 return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x08);
71}
72
73static const struct nvkm_i2c_bus_func
74nv04_i2c_bus_func = {
75 .drive_scl = nv04_i2c_bus_drive_scl,
76 .drive_sda = nv04_i2c_bus_drive_sda,
77 .sense_scl = nv04_i2c_bus_sense_scl,
78 .sense_sda = nv04_i2c_bus_sense_sda,
79 .xfer = nvkm_i2c_bit_xfer,
80};
81
82int
83nv04_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive, u8 sense,
84 struct nvkm_i2c_bus **pbus)
85{
86 struct nv04_i2c_bus *bus;
87
88 if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
89 return -ENOMEM;
90 *pbus = &bus->base;
91
92 nvkm_i2c_bus_ctor(&nv04_i2c_bus_func, pad, id, &bus->base);
93 bus->drive = drive;
94 bus->sense = sense;
95 return 0;
96}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
new file mode 100644
index 000000000000..cdd73dcb1197
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial busions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#define nv4e_i2c_bus(p) container_of((p), struct nv4e_i2c_bus, base)
25#include "bus.h"
26
27struct nv4e_i2c_bus {
28 struct nvkm_i2c_bus base;
29 u32 addr;
30};
31
32static void
33nv4e_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
34{
35 struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
36 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
37 nvkm_mask(device, bus->addr, 0x2f, state ? 0x21 : 0x01);
38}
39
40static void
41nv4e_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
42{
43 struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
44 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
45 nvkm_mask(device, bus->addr, 0x1f, state ? 0x11 : 0x01);
46}
47
48static int
49nv4e_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
50{
51 struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
52 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
53 return !!(nvkm_rd32(device, bus->addr) & 0x00040000);
54}
55
56static int
57nv4e_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
58{
59 struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
60 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
61 return !!(nvkm_rd32(device, bus->addr) & 0x00080000);
62}
63
64static const struct nvkm_i2c_bus_func
65nv4e_i2c_bus_func = {
66 .drive_scl = nv4e_i2c_bus_drive_scl,
67 .drive_sda = nv4e_i2c_bus_drive_sda,
68 .sense_scl = nv4e_i2c_bus_sense_scl,
69 .sense_sda = nv4e_i2c_bus_sense_sda,
70 .xfer = nvkm_i2c_bit_xfer,
71};
72
73int
74nv4e_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
75 struct nvkm_i2c_bus **pbus)
76{
77 struct nv4e_i2c_bus *bus;
78
79 if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
80 return -ENOMEM;
81 *pbus = &bus->base;
82
83 nvkm_i2c_bus_ctor(&nv4e_i2c_bus_func, pad, id, &bus->base);
84 bus->addr = 0x600800 + drive;
85 return 0;
86}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
new file mode 100644
index 000000000000..8db8399381ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial busions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#define nv50_i2c_bus(p) container_of((p), struct nv50_i2c_bus, base)
25#include "bus.h"
26
27#include <subdev/vga.h>
28
29struct nv50_i2c_bus {
30 struct nvkm_i2c_bus base;
31 u32 addr;
32 u32 data;
33};
34
35static void
36nv50_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
37{
38 struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
39 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
40 if (state) bus->data |= 0x01;
41 else bus->data &= 0xfe;
42 nvkm_wr32(device, bus->addr, bus->data);
43}
44
45static void
46nv50_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
47{
48 struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
49 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
50 if (state) bus->data |= 0x02;
51 else bus->data &= 0xfd;
52 nvkm_wr32(device, bus->addr, bus->data);
53}
54
55static int
56nv50_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
57{
58 struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
59 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
60 return !!(nvkm_rd32(device, bus->addr) & 0x00000001);
61}
62
63static int
64nv50_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
65{
66 struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
67 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
68 return !!(nvkm_rd32(device, bus->addr) & 0x00000002);
69}
70
71static void
72nv50_i2c_bus_init(struct nvkm_i2c_bus *base)
73{
74 struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
75 struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
76 nvkm_wr32(device, bus->addr, (bus->data = 0x00000007));
77}
78
79static const struct nvkm_i2c_bus_func
80nv50_i2c_bus_func = {
81 .init = nv50_i2c_bus_init,
82 .drive_scl = nv50_i2c_bus_drive_scl,
83 .drive_sda = nv50_i2c_bus_drive_sda,
84 .sense_scl = nv50_i2c_bus_sense_scl,
85 .sense_sda = nv50_i2c_bus_sense_sda,
86 .xfer = nvkm_i2c_bit_xfer,
87};
88
89int
90nv50_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
91 struct nvkm_i2c_bus **pbus)
92{
93 static const u32 addr[] = {
94 0x00e138, 0x00e150, 0x00e168, 0x00e180,
95 0x00e254, 0x00e274, 0x00e764, 0x00e780,
96 0x00e79c, 0x00e7b8
97 };
98 struct nv50_i2c_bus *bus;
99
100 if (drive >= ARRAY_SIZE(addr)) {
101 nvkm_warn(&pad->i2c->subdev, "bus %d unknown\n", drive);
102 return -ENODEV;
103 }
104
105 if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
106 return -ENOMEM;
107 *pbus = &bus->base;
108
109 nvkm_i2c_bus_ctor(&nv50_i2c_bus_func, pad, id, &bus->base);
110 bus->addr = addr[drive];
111 bus->data = 0x00000007;
112 return 0;
113}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index 2a2dd47b9835..bb2a31d88161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -21,26 +21,29 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "priv.h"
25#include "pad.h"
25 26
26void 27void
27g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) 28g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
28{ 29{
29 u32 intr = nv_rd32(i2c, 0x00e06c); 30 struct nvkm_device *device = i2c->subdev.device;
30 u32 stat = nv_rd32(i2c, 0x00e068) & intr, i; 31 u32 intr = nvkm_rd32(device, 0x00e06c);
32 u32 stat = nvkm_rd32(device, 0x00e068) & intr, i;
31 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) { 33 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
32 if ((stat & (1 << (i * 4)))) *hi |= 1 << i; 34 if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
33 if ((stat & (2 << (i * 4)))) *lo |= 1 << i; 35 if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
34 if ((stat & (4 << (i * 4)))) *rq |= 1 << i; 36 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
35 if ((stat & (8 << (i * 4)))) *tx |= 1 << i; 37 if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
36 } 38 }
37 nv_wr32(i2c, 0x00e06c, intr); 39 nvkm_wr32(device, 0x00e06c, intr);
38} 40}
39 41
40void 42void
41g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data) 43g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
42{ 44{
43 u32 temp = nv_rd32(i2c, 0x00e068), i; 45 struct nvkm_device *device = i2c->subdev.device;
46 u32 temp = nvkm_rd32(device, 0x00e068), i;
44 for (i = 0; i < 8; i++) { 47 for (i = 0; i < 8; i++) {
45 if (mask & (1 << i)) { 48 if (mask & (1 << i)) {
46 if (!(data & (1 << i))) { 49 if (!(data & (1 << i))) {
@@ -50,230 +53,20 @@ g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
50 temp |= type << (i * 4); 53 temp |= type << (i * 4);
51 } 54 }
52 } 55 }
53 nv_wr32(i2c, 0x00e068, temp); 56 nvkm_wr32(device, 0x00e068, temp);
54}
55
56#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
57#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
58
59static void
60auxch_fini(struct nvkm_i2c *aux, int ch)
61{
62 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
63}
64
65static int
66auxch_init(struct nvkm_i2c *aux, int ch)
67{
68 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
69 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
70 const u32 urep = unksel ? 0x01000000 : 0x02000000;
71 u32 ctrl, timeout;
72
73 /* wait up to 1ms for any previous transaction to be done... */
74 timeout = 1000;
75 do {
76 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
77 udelay(1);
78 if (!timeout--) {
79 AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
80 return -EBUSY;
81 }
82 } while (ctrl & 0x03010000);
83
84 /* set some magic, and wait up to 1ms for it to appear */
85 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
86 timeout = 1000;
87 do {
88 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
89 udelay(1);
90 if (!timeout--) {
91 AUX_ERR("magic wait 0x%08x\n", ctrl);
92 auxch_fini(aux, ch);
93 return -EBUSY;
94 }
95 } while ((ctrl & 0x03000000) != urep);
96
97 return 0;
98}
99
100int
101g94_aux(struct nvkm_i2c_port *base, bool retry,
102 u8 type, u32 addr, u8 *data, u8 size)
103{
104 struct nvkm_i2c *aux = nvkm_i2c(base);
105 struct nv50_i2c_port *port = (void *)base;
106 u32 ctrl, stat, timeout, retries;
107 u32 xbuf[4] = {};
108 int ch = port->addr;
109 int ret, i;
110
111 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
112
113 ret = auxch_init(aux, ch);
114 if (ret)
115 goto out;
116
117 stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
118 if (!(stat & 0x10000000)) {
119 AUX_DBG("sink not detected\n");
120 ret = -ENXIO;
121 goto out;
122 }
123
124 if (!(type & 1)) {
125 memcpy(xbuf, data, size);
126 for (i = 0; i < 16; i += 4) {
127 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
128 nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
129 }
130 }
131
132 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
133 ctrl &= ~0x0001f0ff;
134 ctrl |= type << 12;
135 ctrl |= size - 1;
136 nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
137
138 /* (maybe) retry transaction a number of times on failure... */
139 for (retries = 0; !ret && retries < 32; retries++) {
140 /* reset, and delay a while if this is a retry */
141 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
142 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
143 if (retries)
144 udelay(400);
145
146 /* transaction request, wait up to 1ms for it to complete */
147 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
148
149 timeout = 1000;
150 do {
151 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
152 udelay(1);
153 if (!timeout--) {
154 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
155 ret = -EIO;
156 goto out;
157 }
158 } while (ctrl & 0x00010000);
159 ret = 1;
160
161 /* read status, and check if transaction completed ok */
162 stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
163 if ((stat & 0x000f0000) == 0x00080000 ||
164 (stat & 0x000f0000) == 0x00020000)
165 ret = retry ? 0 : 1;
166 if ((stat & 0x00000100))
167 ret = -ETIMEDOUT;
168 if ((stat & 0x00000e00))
169 ret = -EIO;
170
171 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
172 }
173
174 if (type & 1) {
175 for (i = 0; i < 16; i += 4) {
176 xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
177 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
178 }
179 memcpy(data, xbuf, size);
180 }
181
182out:
183 auxch_fini(aux, ch);
184 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
185} 57}
186 58
187static const struct nvkm_i2c_func 59static const struct nvkm_i2c_func
188g94_i2c_func = { 60g94_i2c = {
189 .drive_scl = nv50_i2c_drive_scl, 61 .pad_x_new = g94_i2c_pad_x_new,
190 .drive_sda = nv50_i2c_drive_sda, 62 .pad_s_new = g94_i2c_pad_s_new,
191 .sense_scl = nv50_i2c_sense_scl, 63 .aux = 4,
192 .sense_sda = nv50_i2c_sense_sda, 64 .aux_stat = g94_aux_stat,
193}; 65 .aux_mask = g94_aux_mask,
194
195static int
196g94_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
197 struct nvkm_oclass *oclass, void *data, u32 index,
198 struct nvkm_object **pobject)
199{
200 struct dcb_i2c_entry *info = data;
201 struct nv50_i2c_port *port;
202 int ret;
203
204 ret = nvkm_i2c_port_create(parent, engine, oclass, index,
205 &nvkm_i2c_bit_algo, &g94_i2c_func, &port);
206 *pobject = nv_object(port);
207 if (ret)
208 return ret;
209
210 if (info->drive >= nv50_i2c_addr_nr)
211 return -EINVAL;
212
213 port->state = 7;
214 port->addr = nv50_i2c_addr[info->drive];
215 return 0;
216}
217
218static const struct nvkm_i2c_func
219g94_aux_func = {
220 .aux = g94_aux,
221}; 66};
222 67
223int 68int
224g94_aux_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 69g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
225 struct nvkm_oclass *oclass, void *data, u32 index,
226 struct nvkm_object **pobject)
227{ 70{
228 struct dcb_i2c_entry *info = data; 71 return nvkm_i2c_new_(&g94_i2c, device, index, pi2c);
229 struct nv50_i2c_port *port;
230 int ret;
231
232 ret = nvkm_i2c_port_create(parent, engine, oclass, index,
233 &nvkm_i2c_aux_algo, &g94_aux_func, &port);
234 *pobject = nv_object(port);
235 if (ret)
236 return ret;
237
238 port->base.aux = info->auxch;
239 port->addr = info->auxch;
240 return 0;
241} 72}
242
243static struct nvkm_oclass
244g94_i2c_sclass[] = {
245 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
246 .ofuncs = &(struct nvkm_ofuncs) {
247 .ctor = g94_i2c_port_ctor,
248 .dtor = _nvkm_i2c_port_dtor,
249 .init = nv50_i2c_port_init,
250 .fini = _nvkm_i2c_port_fini,
251 },
252 },
253 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
254 .ofuncs = &(struct nvkm_ofuncs) {
255 .ctor = g94_aux_port_ctor,
256 .dtor = _nvkm_i2c_port_dtor,
257 .init = _nvkm_i2c_port_init,
258 .fini = _nvkm_i2c_port_fini,
259 },
260 },
261 {}
262};
263
264struct nvkm_oclass *
265g94_i2c_oclass = &(struct nvkm_i2c_impl) {
266 .base.handle = NV_SUBDEV(I2C, 0x94),
267 .base.ofuncs = &(struct nvkm_ofuncs) {
268 .ctor = _nvkm_i2c_ctor,
269 .dtor = _nvkm_i2c_dtor,
270 .init = _nvkm_i2c_init,
271 .fini = _nvkm_i2c_fini,
272 },
273 .sclass = g94_i2c_sclass,
274 .pad_x = &nv04_i2c_pad_oclass,
275 .pad_s = &g94_i2c_pad_oclass,
276 .aux = 4,
277 .aux_stat = g94_aux_stat,
278 .aux_mask = g94_aux_mask,
279}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
deleted file mode 100644
index 4d4ac6638140..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25
26static int
27gf110_i2c_sense_scl(struct nvkm_i2c_port *base)
28{
29 struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
30 struct nv50_i2c_port *port = (void *)base;
31 return !!(nv_rd32(priv, port->addr) & 0x00000010);
32}
33
34static int
35gf110_i2c_sense_sda(struct nvkm_i2c_port *base)
36{
37 struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
38 struct nv50_i2c_port *port = (void *)base;
39 return !!(nv_rd32(priv, port->addr) & 0x00000020);
40}
41
42static const struct nvkm_i2c_func
43gf110_i2c_func = {
44 .drive_scl = nv50_i2c_drive_scl,
45 .drive_sda = nv50_i2c_drive_sda,
46 .sense_scl = gf110_i2c_sense_scl,
47 .sense_sda = gf110_i2c_sense_sda,
48};
49
50int
51gf110_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
52 struct nvkm_oclass *oclass, void *data, u32 index,
53 struct nvkm_object **pobject)
54{
55 struct dcb_i2c_entry *info = data;
56 struct nv50_i2c_port *port;
57 int ret;
58
59 ret = nvkm_i2c_port_create(parent, engine, oclass, index,
60 &nvkm_i2c_bit_algo, &gf110_i2c_func, &port);
61 *pobject = nv_object(port);
62 if (ret)
63 return ret;
64
65 port->state = 0x00000007;
66 port->addr = 0x00d014 + (info->drive * 0x20);
67 return 0;
68}
69
70struct nvkm_oclass
71gf110_i2c_sclass[] = {
72 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
73 .ofuncs = &(struct nvkm_ofuncs) {
74 .ctor = gf110_i2c_port_ctor,
75 .dtor = _nvkm_i2c_port_dtor,
76 .init = nv50_i2c_port_init,
77 .fini = _nvkm_i2c_port_fini,
78 },
79 },
80 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
81 .ofuncs = &(struct nvkm_ofuncs) {
82 .ctor = g94_aux_port_ctor,
83 .dtor = _nvkm_i2c_port_dtor,
84 .init = _nvkm_i2c_port_init,
85 .fini = _nvkm_i2c_port_fini,
86 },
87 },
88 {}
89};
90
91struct nvkm_oclass *
92gf110_i2c_oclass = &(struct nvkm_i2c_impl) {
93 .base.handle = NV_SUBDEV(I2C, 0xd0),
94 .base.ofuncs = &(struct nvkm_ofuncs) {
95 .ctor = _nvkm_i2c_ctor,
96 .dtor = _nvkm_i2c_dtor,
97 .init = _nvkm_i2c_init,
98 .fini = _nvkm_i2c_fini,
99 },
100 .sclass = gf110_i2c_sclass,
101 .pad_x = &nv04_i2c_pad_oclass,
102 .pad_s = &g94_i2c_pad_oclass,
103 .aux = 4,
104 .aux_stat = g94_aux_stat,
105 .aux_mask = g94_aux_mask,
106}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index e290b40f2d13..ae4aad3fcd2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -21,18 +21,16 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "priv.h"
25#include "pad.h"
25 26
26struct nvkm_oclass * 27static const struct nvkm_i2c_func
27gf117_i2c_oclass = &(struct nvkm_i2c_impl) { 28gf117_i2c = {
28 .base.handle = NV_SUBDEV(I2C, 0xd7), 29 .pad_x_new = gf119_i2c_pad_x_new,
29 .base.ofuncs = &(struct nvkm_ofuncs) { 30};
30 .ctor = _nvkm_i2c_ctor, 31
31 .dtor = _nvkm_i2c_dtor, 32int
32 .init = _nvkm_i2c_init, 33gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
33 .fini = _nvkm_i2c_fini, 34{
34 }, 35 return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c);
35 .sclass = gf110_i2c_sclass, 36}
36 .pad_x = &nv04_i2c_pad_oclass,
37 .pad_s = &nv04_i2c_pad_oclass,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
new file mode 100644
index 000000000000..6f2b02af42c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "pad.h"
26
27static const struct nvkm_i2c_func
28gf119_i2c = {
29 .pad_x_new = gf119_i2c_pad_x_new,
30 .pad_s_new = gf119_i2c_pad_s_new,
31 .aux = 4,
32 .aux_stat = g94_aux_stat,
33 .aux_mask = g94_aux_mask,
34};
35
36int
37gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
38{
39 return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c);
40}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index 1a464903a992..f9f6bf4b66c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -21,26 +21,29 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "priv.h"
25#include "pad.h"
25 26
26void 27void
27gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) 28gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
28{ 29{
29 u32 intr = nv_rd32(i2c, 0x00dc60); 30 struct nvkm_device *device = i2c->subdev.device;
30 u32 stat = nv_rd32(i2c, 0x00dc68) & intr, i; 31 u32 intr = nvkm_rd32(device, 0x00dc60);
32 u32 stat = nvkm_rd32(device, 0x00dc68) & intr, i;
31 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) { 33 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
32 if ((stat & (1 << (i * 4)))) *hi |= 1 << i; 34 if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
33 if ((stat & (2 << (i * 4)))) *lo |= 1 << i; 35 if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
34 if ((stat & (4 << (i * 4)))) *rq |= 1 << i; 36 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
35 if ((stat & (8 << (i * 4)))) *tx |= 1 << i; 37 if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
36 } 38 }
37 nv_wr32(i2c, 0x00dc60, intr); 39 nvkm_wr32(device, 0x00dc60, intr);
38} 40}
39 41
40void 42void
41gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data) 43gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
42{ 44{
43 u32 temp = nv_rd32(i2c, 0x00dc68), i; 45 struct nvkm_device *device = i2c->subdev.device;
46 u32 temp = nvkm_rd32(device, 0x00dc68), i;
44 for (i = 0; i < 8; i++) { 47 for (i = 0; i < 8; i++) {
45 if (mask & (1 << i)) { 48 if (mask & (1 << i)) {
46 if (!(data & (1 << i))) { 49 if (!(data & (1 << i))) {
@@ -50,22 +53,20 @@ gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
50 temp |= type << (i * 4); 53 temp |= type << (i * 4);
51 } 54 }
52 } 55 }
53 nv_wr32(i2c, 0x00dc68, temp); 56 nvkm_wr32(device, 0x00dc68, temp);
54} 57}
55 58
56struct nvkm_oclass * 59static const struct nvkm_i2c_func
57gk104_i2c_oclass = &(struct nvkm_i2c_impl) { 60gk104_i2c = {
58 .base.handle = NV_SUBDEV(I2C, 0xe0), 61 .pad_x_new = gf119_i2c_pad_x_new,
59 .base.ofuncs = &(struct nvkm_ofuncs) { 62 .pad_s_new = gf119_i2c_pad_s_new,
60 .ctor = _nvkm_i2c_ctor,
61 .dtor = _nvkm_i2c_dtor,
62 .init = _nvkm_i2c_init,
63 .fini = _nvkm_i2c_fini,
64 },
65 .sclass = gf110_i2c_sclass,
66 .pad_x = &nv04_i2c_pad_oclass,
67 .pad_s = &g94_i2c_pad_oclass,
68 .aux = 4, 63 .aux = 4,
69 .aux_stat = gk104_aux_stat, 64 .aux_stat = gk104_aux_stat,
70 .aux_mask = gk104_aux_mask, 65 .aux_mask = gk104_aux_mask,
71}.base; 66};
67
68int
69gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
70{
71 return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c);
72}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
index ab64237b3842..ff9f7d62f6be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
@@ -21,199 +21,20 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "priv.h"
25 25#include "pad.h"
26#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
27#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
28
29static void
30auxch_fini(struct nvkm_i2c *aux, int ch)
31{
32 nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
33}
34
35static int
36auxch_init(struct nvkm_i2c *aux, int ch)
37{
38 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
39 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
40 const u32 urep = unksel ? 0x01000000 : 0x02000000;
41 u32 ctrl, timeout;
42
43 /* wait up to 1ms for any previous transaction to be done... */
44 timeout = 1000;
45 do {
46 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
47 udelay(1);
48 if (!timeout--) {
49 AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
50 return -EBUSY;
51 }
52 } while (ctrl & 0x03010000);
53
54 /* set some magic, and wait up to 1ms for it to appear */
55 nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
56 timeout = 1000;
57 do {
58 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
59 udelay(1);
60 if (!timeout--) {
61 AUX_ERR("magic wait 0x%08x\n", ctrl);
62 auxch_fini(aux, ch);
63 return -EBUSY;
64 }
65 } while ((ctrl & 0x03000000) != urep);
66
67 return 0;
68}
69
70int
71gm204_aux(struct nvkm_i2c_port *base, bool retry,
72 u8 type, u32 addr, u8 *data, u8 size)
73{
74 struct nvkm_i2c *aux = nvkm_i2c(base);
75 struct nv50_i2c_port *port = (void *)base;
76 u32 ctrl, stat, timeout, retries;
77 u32 xbuf[4] = {};
78 int ch = port->addr;
79 int ret, i;
80
81 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
82
83 ret = auxch_init(aux, ch);
84 if (ret)
85 goto out;
86
87 stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
88 if (!(stat & 0x10000000)) {
89 AUX_DBG("sink not detected\n");
90 ret = -ENXIO;
91 goto out;
92 }
93
94 if (!(type & 1)) {
95 memcpy(xbuf, data, size);
96 for (i = 0; i < 16; i += 4) {
97 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
98 nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
99 }
100 }
101
102 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
103 ctrl &= ~0x0001f0ff;
104 ctrl |= type << 12;
105 ctrl |= size - 1;
106 nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
107
108 /* (maybe) retry transaction a number of times on failure... */
109 for (retries = 0; !ret && retries < 32; retries++) {
110 /* reset, and delay a while if this is a retry */
111 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
112 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
113 if (retries)
114 udelay(400);
115
116 /* transaction request, wait up to 1ms for it to complete */
117 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
118
119 timeout = 1000;
120 do {
121 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
122 udelay(1);
123 if (!timeout--) {
124 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
125 ret = -EIO;
126 goto out;
127 }
128 } while (ctrl & 0x00010000);
129 ret = 1;
130
131 /* read status, and check if transaction completed ok */
132 stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
133 if ((stat & 0x000f0000) == 0x00080000 ||
134 (stat & 0x000f0000) == 0x00020000)
135 ret = retry ? 0 : 1;
136 if ((stat & 0x00000100))
137 ret = -ETIMEDOUT;
138 if ((stat & 0x00000e00))
139 ret = -EIO;
140
141 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
142 }
143
144 if (type & 1) {
145 for (i = 0; i < 16; i += 4) {
146 xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
147 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
148 }
149 memcpy(data, xbuf, size);
150 }
151
152out:
153 auxch_fini(aux, ch);
154 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
155}
156 26
157static const struct nvkm_i2c_func 27static const struct nvkm_i2c_func
158gm204_aux_func = { 28gm204_i2c = {
159 .aux = gm204_aux, 29 .pad_x_new = gf119_i2c_pad_x_new,
30 .pad_s_new = gm204_i2c_pad_s_new,
31 .aux = 8,
32 .aux_stat = gk104_aux_stat,
33 .aux_mask = gk104_aux_mask,
160}; 34};
161 35
162int 36int
163gm204_aux_port_ctor(struct nvkm_object *parent, 37gm204_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
164 struct nvkm_object *engine,
165 struct nvkm_oclass *oclass, void *data, u32 index,
166 struct nvkm_object **pobject)
167{ 38{
168 struct dcb_i2c_entry *info = data; 39 return nvkm_i2c_new_(&gm204_i2c, device, index, pi2c);
169 struct nv50_i2c_port *port;
170 int ret;
171
172 ret = nvkm_i2c_port_create(parent, engine, oclass, index,
173 &nvkm_i2c_aux_algo, &gm204_aux_func, &port);
174 *pobject = nv_object(port);
175 if (ret)
176 return ret;
177
178 port->base.aux = info->auxch;
179 port->addr = info->auxch;
180 return 0;
181} 40}
182
183struct nvkm_oclass
184gm204_i2c_sclass[] = {
185 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
186 .ofuncs = &(struct nvkm_ofuncs) {
187 .ctor = gf110_i2c_port_ctor,
188 .dtor = _nvkm_i2c_port_dtor,
189 .init = nv50_i2c_port_init,
190 .fini = _nvkm_i2c_port_fini,
191 },
192 },
193 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
194 .ofuncs = &(struct nvkm_ofuncs) {
195 .ctor = gm204_aux_port_ctor,
196 .dtor = _nvkm_i2c_port_dtor,
197 .init = _nvkm_i2c_port_init,
198 .fini = _nvkm_i2c_port_fini,
199 },
200 },
201 {}
202};
203
204struct nvkm_oclass *
205gm204_i2c_oclass = &(struct nvkm_i2c_impl) {
206 .base.handle = NV_SUBDEV(I2C, 0x24),
207 .base.ofuncs = &(struct nvkm_ofuncs) {
208 .ctor = _nvkm_i2c_ctor,
209 .dtor = _nvkm_i2c_dtor,
210 .init = _nvkm_i2c_init,
211 .fini = _nvkm_i2c_fini,
212 },
213 .sclass = gm204_i2c_sclass,
214 .pad_x = &nv04_i2c_pad_oclass,
215 .pad_s = &gm204_i2c_pad_oclass,
216 .aux = 8,
217 .aux_stat = gk104_aux_stat,
218 .aux_mask = gk104_aux_mask,
219}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index 4cdf1c489353..18776f49355c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -22,107 +22,15 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25#include "pad.h"
26#include <subdev/vga.h>
27
28struct nv04_i2c_priv {
29 struct nvkm_i2c base;
30};
31
32struct nv04_i2c_port {
33 struct nvkm_i2c_port base;
34 u8 drive;
35 u8 sense;
36};
37
38static void
39nv04_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
40{
41 struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
42 struct nv04_i2c_port *port = (void *)base;
43 u8 val = nv_rdvgac(priv, 0, port->drive);
44 if (state) val |= 0x20;
45 else val &= 0xdf;
46 nv_wrvgac(priv, 0, port->drive, val | 0x01);
47}
48
49static void
50nv04_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
51{
52 struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
53 struct nv04_i2c_port *port = (void *)base;
54 u8 val = nv_rdvgac(priv, 0, port->drive);
55 if (state) val |= 0x10;
56 else val &= 0xef;
57 nv_wrvgac(priv, 0, port->drive, val | 0x01);
58}
59
60static int
61nv04_i2c_sense_scl(struct nvkm_i2c_port *base)
62{
63 struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
64 struct nv04_i2c_port *port = (void *)base;
65 return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
66}
67
68static int
69nv04_i2c_sense_sda(struct nvkm_i2c_port *base)
70{
71 struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
72 struct nv04_i2c_port *port = (void *)base;
73 return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
74}
75 26
76static const struct nvkm_i2c_func 27static const struct nvkm_i2c_func
77nv04_i2c_func = { 28nv04_i2c = {
78 .drive_scl = nv04_i2c_drive_scl, 29 .pad_x_new = nv04_i2c_pad_new,
79 .drive_sda = nv04_i2c_drive_sda,
80 .sense_scl = nv04_i2c_sense_scl,
81 .sense_sda = nv04_i2c_sense_sda,
82}; 30};
83 31
84static int 32int
85nv04_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 33nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
86 struct nvkm_oclass *oclass, void *data, u32 index,
87 struct nvkm_object **pobject)
88{ 34{
89 struct dcb_i2c_entry *info = data; 35 return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c);
90 struct nv04_i2c_port *port;
91 int ret;
92
93 ret = nvkm_i2c_port_create(parent, engine, oclass, index,
94 &nvkm_i2c_bit_algo, &nv04_i2c_func, &port);
95 *pobject = nv_object(port);
96 if (ret)
97 return ret;
98
99 port->drive = info->drive;
100 port->sense = info->sense;
101 return 0;
102} 36}
103
104static struct nvkm_oclass
105nv04_i2c_sclass[] = {
106 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
107 .ofuncs = &(struct nvkm_ofuncs) {
108 .ctor = nv04_i2c_port_ctor,
109 .dtor = _nvkm_i2c_port_dtor,
110 .init = _nvkm_i2c_port_init,
111 .fini = _nvkm_i2c_port_fini,
112 },
113 },
114 {}
115};
116
117struct nvkm_oclass *
118nv04_i2c_oclass = &(struct nvkm_i2c_impl) {
119 .base.handle = NV_SUBDEV(I2C, 0x04),
120 .base.ofuncs = &(struct nvkm_ofuncs) {
121 .ctor = _nvkm_i2c_ctor,
122 .dtor = _nvkm_i2c_dtor,
123 .init = _nvkm_i2c_init,
124 .fini = _nvkm_i2c_fini,
125 },
126 .sclass = nv04_i2c_sclass,
127 .pad_x = &nv04_i2c_pad_oclass,
128}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index 046fe5e2ea19..6b762f7cee9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -22,99 +22,15 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25#include "pad.h"
26#include <subdev/vga.h>
27
28struct nv4e_i2c_priv {
29 struct nvkm_i2c base;
30};
31
32struct nv4e_i2c_port {
33 struct nvkm_i2c_port base;
34 u32 addr;
35};
36
37static void
38nv4e_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
39{
40 struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
41 struct nv4e_i2c_port *port = (void *)base;
42 nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
43}
44
45static void
46nv4e_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
47{
48 struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
49 struct nv4e_i2c_port *port = (void *)base;
50 nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
51}
52
53static int
54nv4e_i2c_sense_scl(struct nvkm_i2c_port *base)
55{
56 struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
57 struct nv4e_i2c_port *port = (void *)base;
58 return !!(nv_rd32(priv, port->addr) & 0x00040000);
59}
60
61static int
62nv4e_i2c_sense_sda(struct nvkm_i2c_port *base)
63{
64 struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
65 struct nv4e_i2c_port *port = (void *)base;
66 return !!(nv_rd32(priv, port->addr) & 0x00080000);
67}
68 26
69static const struct nvkm_i2c_func 27static const struct nvkm_i2c_func
70nv4e_i2c_func = { 28nv4e_i2c = {
71 .drive_scl = nv4e_i2c_drive_scl, 29 .pad_x_new = nv4e_i2c_pad_new,
72 .drive_sda = nv4e_i2c_drive_sda,
73 .sense_scl = nv4e_i2c_sense_scl,
74 .sense_sda = nv4e_i2c_sense_sda,
75}; 30};
76 31
77static int 32int
78nv4e_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 33nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
79 struct nvkm_oclass *oclass, void *data, u32 index,
80 struct nvkm_object **pobject)
81{ 34{
82 struct dcb_i2c_entry *info = data; 35 return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c);
83 struct nv4e_i2c_port *port;
84 int ret;
85
86 ret = nvkm_i2c_port_create(parent, engine, oclass, index,
87 &nvkm_i2c_bit_algo, &nv4e_i2c_func, &port);
88 *pobject = nv_object(port);
89 if (ret)
90 return ret;
91
92 port->addr = 0x600800 + info->drive;
93 return 0;
94} 36}
95
96static struct nvkm_oclass
97nv4e_i2c_sclass[] = {
98 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
99 .ofuncs = &(struct nvkm_ofuncs) {
100 .ctor = nv4e_i2c_port_ctor,
101 .dtor = _nvkm_i2c_port_dtor,
102 .init = _nvkm_i2c_port_init,
103 .fini = _nvkm_i2c_port_fini,
104 },
105 },
106 {}
107};
108
109struct nvkm_oclass *
110nv4e_i2c_oclass = &(struct nvkm_i2c_impl) {
111 .base.handle = NV_SUBDEV(I2C, 0x4e),
112 .base.ofuncs = &(struct nvkm_ofuncs) {
113 .ctor = _nvkm_i2c_ctor,
114 .dtor = _nvkm_i2c_dtor,
115 .init = _nvkm_i2c_init,
116 .fini = _nvkm_i2c_fini,
117 },
118 .sclass = nv4e_i2c_sclass,
119 .pad_x = &nv04_i2c_pad_oclass,
120}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index fba5b26a5682..75640ab97d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -21,113 +21,16 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv50.h" 24#include "priv.h"
25 25#include "pad.h"
26void
27nv50_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
28{
29 struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
30 struct nv50_i2c_port *port = (void *)base;
31 if (state) port->state |= 0x01;
32 else port->state &= 0xfe;
33 nv_wr32(priv, port->addr, port->state);
34}
35
36void
37nv50_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
38{
39 struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
40 struct nv50_i2c_port *port = (void *)base;
41 if (state) port->state |= 0x02;
42 else port->state &= 0xfd;
43 nv_wr32(priv, port->addr, port->state);
44}
45
46int
47nv50_i2c_sense_scl(struct nvkm_i2c_port *base)
48{
49 struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
50 struct nv50_i2c_port *port = (void *)base;
51 return !!(nv_rd32(priv, port->addr) & 0x00000001);
52}
53
54int
55nv50_i2c_sense_sda(struct nvkm_i2c_port *base)
56{
57 struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
58 struct nv50_i2c_port *port = (void *)base;
59 return !!(nv_rd32(priv, port->addr) & 0x00000002);
60}
61 26
62static const struct nvkm_i2c_func 27static const struct nvkm_i2c_func
63nv50_i2c_func = { 28nv50_i2c = {
64 .drive_scl = nv50_i2c_drive_scl, 29 .pad_x_new = nv50_i2c_pad_new,
65 .drive_sda = nv50_i2c_drive_sda,
66 .sense_scl = nv50_i2c_sense_scl,
67 .sense_sda = nv50_i2c_sense_sda,
68};
69
70const u32 nv50_i2c_addr[] = {
71 0x00e138, 0x00e150, 0x00e168, 0x00e180,
72 0x00e254, 0x00e274, 0x00e764, 0x00e780,
73 0x00e79c, 0x00e7b8
74}; 30};
75const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
76
77static int
78nv50_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
79 struct nvkm_oclass *oclass, void *data, u32 index,
80 struct nvkm_object **pobject)
81{
82 struct dcb_i2c_entry *info = data;
83 struct nv50_i2c_port *port;
84 int ret;
85
86 ret = nvkm_i2c_port_create(parent, engine, oclass, index,
87 &nvkm_i2c_bit_algo, &nv50_i2c_func, &port);
88 *pobject = nv_object(port);
89 if (ret)
90 return ret;
91
92 if (info->drive >= nv50_i2c_addr_nr)
93 return -EINVAL;
94
95 port->state = 0x00000007;
96 port->addr = nv50_i2c_addr[info->drive];
97 return 0;
98}
99 31
100int 32int
101nv50_i2c_port_init(struct nvkm_object *object) 33nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
102{ 34{
103 struct nv50_i2c_priv *priv = (void *)nvkm_i2c(object); 35 return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c);
104 struct nv50_i2c_port *port = (void *)object;
105 nv_wr32(priv, port->addr, port->state);
106 return nvkm_i2c_port_init(&port->base);
107} 36}
108
109static struct nvkm_oclass
110nv50_i2c_sclass[] = {
111 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
112 .ofuncs = &(struct nvkm_ofuncs) {
113 .ctor = nv50_i2c_port_ctor,
114 .dtor = _nvkm_i2c_port_dtor,
115 .init = nv50_i2c_port_init,
116 .fini = _nvkm_i2c_port_fini,
117 },
118 },
119 {}
120};
121
122struct nvkm_oclass *
123nv50_i2c_oclass = &(struct nvkm_i2c_impl) {
124 .base.handle = NV_SUBDEV(I2C, 0x50),
125 .base.ofuncs = &(struct nvkm_ofuncs) {
126 .ctor = _nvkm_i2c_ctor,
127 .dtor = _nvkm_i2c_dtor,
128 .init = _nvkm_i2c_init,
129 .fini = _nvkm_i2c_fini,
130 },
131 .sclass = nv50_i2c_sclass,
132 .pad_x = &nv04_i2c_pad_oclass,
133}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
deleted file mode 100644
index b3139e721b02..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef __NV50_I2C_H__
2#define __NV50_I2C_H__
3#include "priv.h"
4
5struct nv50_i2c_priv {
6 struct nvkm_i2c base;
7};
8
9struct nv50_i2c_port {
10 struct nvkm_i2c_port base;
11 u32 addr;
12 u32 state;
13};
14
15extern const u32 nv50_i2c_addr[];
16extern const int nv50_i2c_addr_nr;
17int nv50_i2c_port_init(struct nvkm_object *);
18int nv50_i2c_sense_scl(struct nvkm_i2c_port *);
19int nv50_i2c_sense_sda(struct nvkm_i2c_port *);
20void nv50_i2c_drive_scl(struct nvkm_i2c_port *, int state);
21void nv50_i2c_drive_sda(struct nvkm_i2c_port *, int state);
22
23int g94_aux_port_ctor(struct nvkm_object *, struct nvkm_object *,
24 struct nvkm_oclass *, void *, u32,
25 struct nvkm_object **);
26void g94_i2c_acquire(struct nvkm_i2c_port *);
27void g94_i2c_release(struct nvkm_i2c_port *);
28
29int gf110_i2c_port_ctor(struct nvkm_object *, struct nvkm_object *,
30 struct nvkm_oclass *, void *, u32,
31 struct nvkm_object **);
32#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
index a242eeb67829..2c5fcb9c504b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2014 Red Hat Inc. 2 * Copyright 2015 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -19,65 +19,98 @@
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "pad.h" 24#include "pad.h"
25 25
26int 26static void
27_nvkm_i2c_pad_fini(struct nvkm_object *object, bool suspend) 27nvkm_i2c_pad_mode_locked(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
28{ 28{
29 struct nvkm_i2c_pad *pad = (void *)object; 29 PAD_TRACE(pad, "-> %s", (mode == NVKM_I2C_PAD_AUX) ? "aux" :
30 DBG("-> NULL\n"); 30 (mode == NVKM_I2C_PAD_I2C) ? "i2c" : "off");
31 pad->port = NULL; 31 if (pad->func->mode)
32 return nvkm_object_fini(&pad->base, suspend); 32 pad->func->mode(pad, mode);
33} 33}
34 34
35int 35void
36_nvkm_i2c_pad_init(struct nvkm_object *object) 36nvkm_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
37{ 37{
38 struct nvkm_i2c_pad *pad = (void *)object; 38 PAD_TRACE(pad, "mode %d", mode);
39 DBG("-> PORT:%02x\n", pad->next->index); 39 mutex_lock(&pad->mutex);
40 pad->port = pad->next; 40 nvkm_i2c_pad_mode_locked(pad, mode);
41 return nvkm_object_init(&pad->base); 41 pad->mode = mode;
42 mutex_unlock(&pad->mutex);
42} 43}
43 44
44int 45void
45nvkm_i2c_pad_create_(struct nvkm_object *parent, 46nvkm_i2c_pad_release(struct nvkm_i2c_pad *pad)
46 struct nvkm_object *engine,
47 struct nvkm_oclass *oclass, int index,
48 int size, void **pobject)
49{ 47{
50 struct nvkm_i2c *i2c = nvkm_i2c(parent); 48 PAD_TRACE(pad, "release");
51 struct nvkm_i2c_port *port; 49 if (pad->mode == NVKM_I2C_PAD_OFF)
52 struct nvkm_i2c_pad *pad; 50 nvkm_i2c_pad_mode_locked(pad, pad->mode);
53 int ret; 51 mutex_unlock(&pad->mutex);
52}
54 53
55 list_for_each_entry(port, &i2c->ports, head) { 54int
56 pad = nvkm_i2c_pad(port); 55nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
57 if (pad->index == index) { 56{
58 atomic_inc(&nv_object(pad)->refcount); 57 PAD_TRACE(pad, "acquire");
59 *pobject = pad; 58 mutex_lock(&pad->mutex);
60 return 1; 59 if (pad->mode != mode) {
60 if (pad->mode != NVKM_I2C_PAD_OFF) {
61 mutex_unlock(&pad->mutex);
62 return -EBUSY;
61 } 63 }
64 nvkm_i2c_pad_mode_locked(pad, mode);
62 } 65 }
66 return 0;
67}
68
69void
70nvkm_i2c_pad_fini(struct nvkm_i2c_pad *pad)
71{
72 PAD_TRACE(pad, "fini");
73 nvkm_i2c_pad_mode_locked(pad, NVKM_I2C_PAD_OFF);
74}
63 75
64 ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject); 76void
65 pad = *pobject; 77nvkm_i2c_pad_init(struct nvkm_i2c_pad *pad)
66 if (ret) 78{
67 return ret; 79 PAD_TRACE(pad, "init");
80 nvkm_i2c_pad_mode_locked(pad, pad->mode);
81}
68 82
69 pad->index = index; 83void
70 return 0; 84nvkm_i2c_pad_del(struct nvkm_i2c_pad **ppad)
85{
86 struct nvkm_i2c_pad *pad = *ppad;
87 if (pad) {
88 PAD_TRACE(pad, "dtor");
89 list_del(&pad->head);
90 kfree(pad);
91 pad = NULL;
92 }
93}
94
95void
96nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
97 int id, struct nvkm_i2c_pad *pad)
98{
99 pad->func = func;
100 pad->i2c = i2c;
101 pad->id = id;
102 pad->mode = NVKM_I2C_PAD_OFF;
103 mutex_init(&pad->mutex);
104 list_add_tail(&pad->head, &i2c->pad);
105 PAD_TRACE(pad, "ctor");
71} 106}
72 107
73int 108int
74_nvkm_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 109nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
75 struct nvkm_oclass *oclass, void *data, u32 index, 110 int id, struct nvkm_i2c_pad **ppad)
76 struct nvkm_object **pobject)
77{ 111{
78 struct nvkm_i2c_pad *pad; 112 if (!(*ppad = kzalloc(sizeof(**ppad), GFP_KERNEL)))
79 int ret; 113 return -ENOMEM;
80 ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad); 114 nvkm_i2c_pad_ctor(func, i2c, id, *ppad);
81 *pobject = nv_object(pad); 115 return 0;
82 return ret;
83} 116}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index f3422cc6f8db..9eeb992944c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,56 +1,67 @@
1#ifndef __NVKM_I2C_PAD_H__ 1#ifndef __NVKM_I2C_PAD_H__
2#define __NVKM_I2C_PAD_H__ 2#define __NVKM_I2C_PAD_H__
3#include "priv.h" 3#include <subdev/i2c.h>
4 4
5struct nvkm_i2c_pad { 5struct nvkm_i2c_pad {
6 struct nvkm_object base; 6 const struct nvkm_i2c_pad_func *func;
7 int index; 7 struct nvkm_i2c *i2c;
8 struct nvkm_i2c_port *port; 8#define NVKM_I2C_PAD_HYBRID(n) /* 'n' is hw pad index */ (n)
9 struct nvkm_i2c_port *next; 9#define NVKM_I2C_PAD_CCB(n) /* 'n' is ccb index */ ((n) + 0x100)
10#define NVKM_I2C_PAD_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x200)
11 int id;
12
13 enum nvkm_i2c_pad_mode {
14 NVKM_I2C_PAD_OFF,
15 NVKM_I2C_PAD_I2C,
16 NVKM_I2C_PAD_AUX,
17 } mode;
18 struct mutex mutex;
19 struct list_head head;
20};
21
22struct nvkm_i2c_pad_func {
23 int (*bus_new_0)(struct nvkm_i2c_pad *, int id, u8 drive, u8 sense,
24 struct nvkm_i2c_bus **);
25 int (*bus_new_4)(struct nvkm_i2c_pad *, int id, u8 drive,
26 struct nvkm_i2c_bus **);
27
28 int (*aux_new_6)(struct nvkm_i2c_pad *, int id, u8 drive,
29 struct nvkm_i2c_aux **);
30
31 void (*mode)(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
10}; 32};
11 33
12static inline struct nvkm_i2c_pad * 34void nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
13nvkm_i2c_pad(struct nvkm_i2c_port *port) 35 int id, struct nvkm_i2c_pad *);
14{ 36int nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
15 struct nvkm_object *pad = nv_object(port); 37 int id, struct nvkm_i2c_pad **);
16 while (!nv_iclass(pad->parent, NV_SUBDEV_CLASS)) 38void nvkm_i2c_pad_del(struct nvkm_i2c_pad **);
17 pad = pad->parent; 39void nvkm_i2c_pad_init(struct nvkm_i2c_pad *);
18 return (void *)pad; 40void nvkm_i2c_pad_fini(struct nvkm_i2c_pad *);
19} 41void nvkm_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
20 42int nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
21#define nvkm_i2c_pad_create(p,e,o,i,d) \ 43void nvkm_i2c_pad_release(struct nvkm_i2c_pad *);
22 nvkm_i2c_pad_create_((p), (e), (o), (i), sizeof(**d), (void **)d) 44
23#define nvkm_i2c_pad_destroy(p) ({ \ 45void g94_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
24 struct nvkm_i2c_pad *_p = (p); \ 46
25 _nvkm_i2c_pad_dtor(nv_object(_p)); \ 47int nv04_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
26}) 48int nv4e_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
27#define nvkm_i2c_pad_init(p) ({ \ 49int nv50_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
28 struct nvkm_i2c_pad *_p = (p); \ 50int g94_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
29 _nvkm_i2c_pad_init(nv_object(_p)); \ 51int gf119_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
30}) 52int gm204_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
31#define nvkm_i2c_pad_fini(p,s) ({ \ 53
32 struct nvkm_i2c_pad *_p = (p); \ 54int g94_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
33 _nvkm_i2c_pad_fini(nv_object(_p), (s)); \ 55int gf119_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
34}) 56int gm204_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
35 57
36int nvkm_i2c_pad_create_(struct nvkm_object *, struct nvkm_object *, 58int anx9805_pad_new(struct nvkm_i2c_bus *, int, u8, struct nvkm_i2c_pad **);
37 struct nvkm_oclass *, int index, int, void **); 59
38 60#define PAD_MSG(p,l,f,a...) do { \
39int _nvkm_i2c_pad_ctor(struct nvkm_object *, struct nvkm_object *, 61 struct nvkm_i2c_pad *_pad = (p); \
40 struct nvkm_oclass *, void *, u32, 62 nvkm_##l(&_pad->i2c->subdev, "pad %04x: "f"\n", _pad->id, ##a); \
41 struct nvkm_object **);
42#define _nvkm_i2c_pad_dtor nvkm_object_destroy
43int _nvkm_i2c_pad_init(struct nvkm_object *);
44int _nvkm_i2c_pad_fini(struct nvkm_object *, bool);
45
46#ifndef MSG
47#define MSG(l,f,a...) do { \
48 struct nvkm_i2c_pad *_pad = (void *)pad; \
49 nv_##l(_pad, "PAD:%c:%02x: "f, \
50 _pad->index >= 0x100 ? 'X' : 'S', \
51 _pad->index >= 0x100 ? _pad->index - 0x100 : _pad->index, ##a); \
52} while(0) 63} while(0)
53#define DBG(f,a...) MSG(debug, f, ##a) 64#define PAD_ERR(p,f,a...) PAD_MSG((p), error, f, ##a)
54#define ERR(f,a...) MSG(error, f, ##a) 65#define PAD_DBG(p,f,a...) PAD_MSG((p), debug, f, ##a)
55#endif 66#define PAD_TRACE(p,f,a...) PAD_MSG((p), trace, f, ##a)
56#endif 67#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
index e9832f7a7e38..5904bc5f2d2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
@@ -22,64 +22,55 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "pad.h" 24#include "pad.h"
25#include "aux.h"
26#include "bus.h"
25 27
26struct g94_i2c_pad { 28void
27 struct nvkm_i2c_pad base; 29g94_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
28 int addr;
29};
30
31static int
32g94_i2c_pad_fini(struct nvkm_object *object, bool suspend)
33{
34 struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
35 struct g94_i2c_pad *pad = (void *)object;
36 nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000001);
37 return nvkm_i2c_pad_fini(&pad->base, suspend);
38}
39
40static int
41g94_i2c_pad_init(struct nvkm_object *object)
42{ 30{
43 struct nvkm_i2c *i2c = (void *)nvkm_i2c(object); 31 struct nvkm_subdev *subdev = &pad->i2c->subdev;
44 struct g94_i2c_pad *pad = (void *)object; 32 struct nvkm_device *device = subdev->device;
33 const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
45 34
46 switch (nv_oclass(pad->base.next)->handle) { 35 switch (mode) {
47 case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX): 36 case NVKM_I2C_PAD_OFF:
48 nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x00000002); 37 nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000001);
38 break;
39 case NVKM_I2C_PAD_I2C:
40 nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x0000c001);
41 nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
42 break;
43 case NVKM_I2C_PAD_AUX:
44 nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x00000002);
45 nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
49 break; 46 break;
50 case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
51 default: 47 default:
52 nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x0000c001); 48 WARN_ON(1);
53 break; 49 break;
54 } 50 }
55
56 nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000000);
57 return nvkm_i2c_pad_init(&pad->base);
58} 51}
59 52
60static int 53static const struct nvkm_i2c_pad_func
61g94_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 54g94_i2c_pad_s_func = {
62 struct nvkm_oclass *oclass, void *data, u32 index, 55 .bus_new_4 = nv50_i2c_bus_new,
63 struct nvkm_object **pobject) 56 .aux_new_6 = g94_i2c_aux_new,
64{ 57 .mode = g94_i2c_pad_mode,
65 struct g94_i2c_pad *pad; 58};
66 int ret;
67
68 ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
69 *pobject = nv_object(pad);
70 if (ret)
71 return ret;
72 59
73 pad->addr = index * 0x50;; 60int
74 return 0; 61g94_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
62{
63 return nvkm_i2c_pad_new_(&g94_i2c_pad_s_func, i2c, id, ppad);
75} 64}
76 65
77struct nvkm_oclass 66static const struct nvkm_i2c_pad_func
78g94_i2c_pad_oclass = { 67g94_i2c_pad_x_func = {
79 .ofuncs = &(struct nvkm_ofuncs) { 68 .bus_new_4 = nv50_i2c_bus_new,
80 .ctor = g94_i2c_pad_ctor, 69 .aux_new_6 = g94_i2c_aux_new,
81 .dtor = _nvkm_i2c_pad_dtor,
82 .init = g94_i2c_pad_init,
83 .fini = g94_i2c_pad_fini,
84 },
85}; 70};
71
72int
73g94_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
74{
75 return nvkm_i2c_pad_new_(&g94_i2c_pad_x_func, i2c, id, ppad);
76}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
new file mode 100644
index 000000000000..d53212f1aa52
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "pad.h"
25#include "aux.h"
26#include "bus.h"
27
28static const struct nvkm_i2c_pad_func
29gf119_i2c_pad_s_func = {
30 .bus_new_4 = gf119_i2c_bus_new,
31 .aux_new_6 = g94_i2c_aux_new,
32 .mode = g94_i2c_pad_mode,
33};
34
35int
36gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
37{
38 return nvkm_i2c_pad_new_(&gf119_i2c_pad_s_func, i2c, id, ppad);
39}
40
41static const struct nvkm_i2c_pad_func
42gf119_i2c_pad_x_func = {
43 .bus_new_4 = gf119_i2c_bus_new,
44 .aux_new_6 = g94_i2c_aux_new,
45};
46
47int
48gf119_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
49{
50 return nvkm_i2c_pad_new_(&gf119_i2c_pad_x_func, i2c, id, ppad);
51}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
index be590405444d..24a4d760c67b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
@@ -22,64 +22,55 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "pad.h" 24#include "pad.h"
25#include "aux.h"
26#include "bus.h"
25 27
26struct gm204_i2c_pad { 28static void
27 struct nvkm_i2c_pad base; 29gm204_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
28 int addr;
29};
30
31static int
32gm204_i2c_pad_fini(struct nvkm_object *object, bool suspend)
33{
34 struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
35 struct gm204_i2c_pad *pad = (void *)object;
36 nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
37 return nvkm_i2c_pad_fini(&pad->base, suspend);
38}
39
40static int
41gm204_i2c_pad_init(struct nvkm_object *object)
42{ 30{
43 struct nvkm_i2c *i2c = (void *)nvkm_i2c(object); 31 struct nvkm_subdev *subdev = &pad->i2c->subdev;
44 struct gm204_i2c_pad *pad = (void *)object; 32 struct nvkm_device *device = subdev->device;
33 const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
45 34
46 switch (nv_oclass(pad->base.next)->handle) { 35 switch (mode) {
47 case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX): 36 case NVKM_I2C_PAD_OFF:
48 nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002); 37 nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000001);
38 break;
39 case NVKM_I2C_PAD_I2C:
40 nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x0000c001);
41 nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
42 break;
43 case NVKM_I2C_PAD_AUX:
44 nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x00000002);
45 nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
49 break; 46 break;
50 case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
51 default: 47 default:
52 nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001); 48 WARN_ON(1);
53 break; 49 break;
54 } 50 }
55
56 nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
57 return nvkm_i2c_pad_init(&pad->base);
58} 51}
59 52
60static int 53static const struct nvkm_i2c_pad_func
61gm204_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 54gm204_i2c_pad_s_func = {
62 struct nvkm_oclass *oclass, void *data, u32 index, 55 .bus_new_4 = gf119_i2c_bus_new,
63 struct nvkm_object **pobject) 56 .aux_new_6 = gm204_i2c_aux_new,
64{ 57 .mode = gm204_i2c_pad_mode,
65 struct gm204_i2c_pad *pad; 58};
66 int ret;
67
68 ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
69 *pobject = nv_object(pad);
70 if (ret)
71 return ret;
72 59
73 pad->addr = index * 0x50;; 60int
74 return 0; 61gm204_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
62{
63 return nvkm_i2c_pad_new_(&gm204_i2c_pad_s_func, i2c, id, ppad);
75} 64}
76 65
77struct nvkm_oclass 66static const struct nvkm_i2c_pad_func
78gm204_i2c_pad_oclass = { 67gm204_i2c_pad_x_func = {
79 .ofuncs = &(struct nvkm_ofuncs) { 68 .bus_new_4 = gf119_i2c_bus_new,
80 .ctor = gm204_i2c_pad_ctor, 69 .aux_new_6 = gm204_i2c_aux_new,
81 .dtor = _nvkm_i2c_pad_dtor,
82 .init = gm204_i2c_pad_init,
83 .fini = gm204_i2c_pad_fini,
84 },
85}; 70};
71
72int
73gm204_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
74{
75 return nvkm_i2c_pad_new_(&gm204_i2c_pad_x_func, i2c, id, ppad);
76}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
index 22c7daaad3a0..310046ad9c61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
@@ -22,13 +22,15 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "pad.h" 24#include "pad.h"
25#include "bus.h"
25 26
26struct nvkm_oclass 27static const struct nvkm_i2c_pad_func
27nv04_i2c_pad_oclass = { 28nv04_i2c_pad_func = {
28 .ofuncs = &(struct nvkm_ofuncs) { 29 .bus_new_0 = nv04_i2c_bus_new,
29 .ctor = _nvkm_i2c_pad_ctor,
30 .dtor = _nvkm_i2c_pad_dtor,
31 .init = _nvkm_i2c_pad_init,
32 .fini = _nvkm_i2c_pad_fini,
33 },
34}; 30};
31
32int
33nv04_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
34{
35 return nvkm_i2c_pad_new_(&nv04_i2c_pad_func, i2c, id, ppad);
36}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
new file mode 100644
index 000000000000..dda6fc0b089d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "pad.h"
25#include "bus.h"
26
27static const struct nvkm_i2c_pad_func
28nv4e_i2c_pad_func = {
29 .bus_new_4 = nv4e_i2c_bus_new,
30};
31
32int
33nv4e_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
34{
35 return nvkm_i2c_pad_new_(&nv4e_i2c_pad_func, i2c, id, ppad);
36}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
new file mode 100644
index 000000000000..a03f25b1914f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "pad.h"
25#include "bus.h"
26
27static const struct nvkm_i2c_pad_func
28nv50_i2c_pad_func = {
29 .bus_new_4 = nv50_i2c_bus_new,
30};
31
32int
33nv50_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
34{
35 return nvkm_i2c_pad_new_(&nv50_i2c_pad_func, i2c, id, ppad);
36}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
deleted file mode 100644
index 586f53dad813..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __NVKM_I2C_PORT_H__
2#define __NVKM_I2C_PORT_H__
3#include "priv.h"
4
5#ifndef MSG
6#define MSG(l,f,a...) do { \
7 struct nvkm_i2c_port *_port = (void *)port; \
8 nv_##l(_port, "PORT:%02x: "f, _port->index, ##a); \
9} while(0)
10#define DBG(f,a...) MSG(debug, f, ##a)
11#define ERR(f,a...) MSG(error, f, ##a)
12#endif
13#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index 6586e1567fcf..bf655a66ef40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -1,69 +1,14 @@
1#ifndef __NVKM_I2C_PRIV_H__ 1#ifndef __NVKM_I2C_PRIV_H__
2#define __NVKM_I2C_PRIV_H__ 2#define __NVKM_I2C_PRIV_H__
3#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
3#include <subdev/i2c.h> 4#include <subdev/i2c.h>
4 5
5extern struct nvkm_oclass nv04_i2c_pad_oclass; 6int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *,
6extern struct nvkm_oclass g94_i2c_pad_oclass; 7 int index, struct nvkm_i2c **);
7extern struct nvkm_oclass gm204_i2c_pad_oclass;
8 8
9#define nvkm_i2c_port_create(p,e,o,i,a,f,d) \ 9struct nvkm_i2c_func {
10 nvkm_i2c_port_create_((p), (e), (o), (i), (a), (f), \ 10 int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
11 sizeof(**d), (void **)d) 11 int (*pad_s_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
12#define nvkm_i2c_port_destroy(p) ({ \
13 struct nvkm_i2c_port *port = (p); \
14 _nvkm_i2c_port_dtor(nv_object(i2c)); \
15})
16#define nvkm_i2c_port_init(p) \
17 nvkm_object_init(&(p)->base)
18#define nvkm_i2c_port_fini(p,s) \
19 nvkm_object_fini(&(p)->base, (s))
20
21int nvkm_i2c_port_create_(struct nvkm_object *, struct nvkm_object *,
22 struct nvkm_oclass *, u8,
23 const struct i2c_algorithm *,
24 const struct nvkm_i2c_func *,
25 int, void **);
26void _nvkm_i2c_port_dtor(struct nvkm_object *);
27#define _nvkm_i2c_port_init nvkm_object_init
28int _nvkm_i2c_port_fini(struct nvkm_object *, bool);
29
30#define nvkm_i2c_create(p,e,o,d) \
31 nvkm_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
32#define nvkm_i2c_destroy(p) ({ \
33 struct nvkm_i2c *i2c = (p); \
34 _nvkm_i2c_dtor(nv_object(i2c)); \
35})
36#define nvkm_i2c_init(p) ({ \
37 struct nvkm_i2c *i2c = (p); \
38 _nvkm_i2c_init(nv_object(i2c)); \
39})
40#define nvkm_i2c_fini(p,s) ({ \
41 struct nvkm_i2c *i2c = (p); \
42 _nvkm_i2c_fini(nv_object(i2c), (s)); \
43})
44
45int nvkm_i2c_create_(struct nvkm_object *, struct nvkm_object *,
46 struct nvkm_oclass *, int, void **);
47int _nvkm_i2c_ctor(struct nvkm_object *, struct nvkm_object *,
48 struct nvkm_oclass *, void *, u32,
49 struct nvkm_object **);
50void _nvkm_i2c_dtor(struct nvkm_object *);
51int _nvkm_i2c_init(struct nvkm_object *);
52int _nvkm_i2c_fini(struct nvkm_object *, bool);
53
54extern struct nvkm_oclass nvkm_anx9805_sclass[];
55extern struct nvkm_oclass gf110_i2c_sclass[];
56
57extern const struct i2c_algorithm nvkm_i2c_bit_algo;
58extern const struct i2c_algorithm nvkm_i2c_aux_algo;
59
60struct nvkm_i2c_impl {
61 struct nvkm_oclass base;
62
63 /* supported i2c port classes */
64 struct nvkm_oclass *sclass;
65 struct nvkm_oclass *pad_x;
66 struct nvkm_oclass *pad_s;
67 12
68 /* number of native dp aux channels present */ 13 /* number of native dp aux channels present */
69 int aux; 14 int aux;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 8e578f802f66..37a0496f7ed1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -23,55 +23,54 @@
23 */ 23 */
24#include <subdev/ibus.h> 24#include <subdev/ibus.h>
25 25
26struct gf100_ibus_priv {
27 struct nvkm_ibus base;
28};
29
30static void 26static void
31gf100_ibus_intr_hub(struct gf100_ibus_priv *priv, int i) 27gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
32{ 28{
33 u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400)); 29 struct nvkm_device *device = ibus->device;
34 u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400)); 30 u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
35 u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400)); 31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
36 nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); 32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
37 nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000); 33 nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
34 nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
38} 35}
39 36
40static void 37static void
41gf100_ibus_intr_rop(struct gf100_ibus_priv *priv, int i) 38gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
42{ 39{
43 u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400)); 40 struct nvkm_device *device = ibus->device;
44 u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400)); 41 u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
45 u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400)); 42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
46 nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); 43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
47 nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000); 44 nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
45 nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
48} 46}
49 47
50static void 48static void
51gf100_ibus_intr_gpc(struct gf100_ibus_priv *priv, int i) 49gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
52{ 50{
53 u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400)); 51 struct nvkm_device *device = ibus->device;
54 u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400)); 52 u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
55 u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400)); 53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
56 nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); 54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
57 nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); 55 nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
56 nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
58} 57}
59 58
60static void 59static void
61gf100_ibus_intr(struct nvkm_subdev *subdev) 60gf100_ibus_intr(struct nvkm_subdev *ibus)
62{ 61{
63 struct gf100_ibus_priv *priv = (void *)subdev; 62 struct nvkm_device *device = ibus->device;
64 u32 intr0 = nv_rd32(priv, 0x121c58); 63 u32 intr0 = nvkm_rd32(device, 0x121c58);
65 u32 intr1 = nv_rd32(priv, 0x121c5c); 64 u32 intr1 = nvkm_rd32(device, 0x121c5c);
66 u32 hubnr = nv_rd32(priv, 0x121c70); 65 u32 hubnr = nvkm_rd32(device, 0x121c70);
67 u32 ropnr = nv_rd32(priv, 0x121c74); 66 u32 ropnr = nvkm_rd32(device, 0x121c74);
68 u32 gpcnr = nv_rd32(priv, 0x121c78); 67 u32 gpcnr = nvkm_rd32(device, 0x121c78);
69 u32 i; 68 u32 i;
70 69
71 for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) { 70 for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
72 u32 stat = 0x00000100 << i; 71 u32 stat = 0x00000100 << i;
73 if (intr0 & stat) { 72 if (intr0 & stat) {
74 gf100_ibus_intr_hub(priv, i); 73 gf100_ibus_intr_hub(ibus, i);
75 intr0 &= ~stat; 74 intr0 &= ~stat;
76 } 75 }
77 } 76 }
@@ -79,7 +78,7 @@ gf100_ibus_intr(struct nvkm_subdev *subdev)
79 for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) { 78 for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
80 u32 stat = 0x00010000 << i; 79 u32 stat = 0x00010000 << i;
81 if (intr0 & stat) { 80 if (intr0 & stat) {
82 gf100_ibus_intr_rop(priv, i); 81 gf100_ibus_intr_rop(ibus, i);
83 intr0 &= ~stat; 82 intr0 &= ~stat;
84 } 83 }
85 } 84 }
@@ -87,36 +86,24 @@ gf100_ibus_intr(struct nvkm_subdev *subdev)
87 for (i = 0; intr1 && i < gpcnr; i++) { 86 for (i = 0; intr1 && i < gpcnr; i++) {
88 u32 stat = 0x00000001 << i; 87 u32 stat = 0x00000001 << i;
89 if (intr1 & stat) { 88 if (intr1 & stat) {
90 gf100_ibus_intr_gpc(priv, i); 89 gf100_ibus_intr_gpc(ibus, i);
91 intr1 &= ~stat; 90 intr1 &= ~stat;
92 } 91 }
93 } 92 }
94} 93}
95 94
96static int 95static const struct nvkm_subdev_func
97gf100_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 96gf100_ibus = {
98 struct nvkm_oclass *oclass, void *data, u32 size, 97 .intr = gf100_ibus_intr,
99 struct nvkm_object **pobject) 98};
100{
101 struct gf100_ibus_priv *priv;
102 int ret;
103
104 ret = nvkm_ibus_create(parent, engine, oclass, &priv);
105 *pobject = nv_object(priv);
106 if (ret)
107 return ret;
108 99
109 nv_subdev(priv)->intr = gf100_ibus_intr; 100int
101gf100_ibus_new(struct nvkm_device *device, int index,
102 struct nvkm_subdev **pibus)
103{
104 struct nvkm_subdev *ibus;
105 if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
106 return -ENOMEM;
107 nvkm_subdev_ctor(&gf100_ibus, device, index, 0, ibus);
110 return 0; 108 return 0;
111} 109}
112
113struct nvkm_oclass
114gf100_ibus_oclass = {
115 .handle = NV_SUBDEV(IBUS, 0xc0),
116 .ofuncs = &(struct nvkm_ofuncs) {
117 .ctor = gf100_ibus_ctor,
118 .dtor = _nvkm_ibus_dtor,
119 .init = _nvkm_ibus_init,
120 .fini = _nvkm_ibus_fini,
121 },
122};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 7b6e9a6cd7b2..ba33609f643c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -23,55 +23,54 @@
23 */ 23 */
24#include <subdev/ibus.h> 24#include <subdev/ibus.h>
25 25
26struct gk104_ibus_priv {
27 struct nvkm_ibus base;
28};
29
30static void 26static void
31gk104_ibus_intr_hub(struct gk104_ibus_priv *priv, int i) 27gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
32{ 28{
33 u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800)); 29 struct nvkm_device *device = ibus->device;
34 u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800)); 30 u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
35 u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800)); 31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
36 nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); 32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
37 nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000); 33 nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
34 nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
38} 35}
39 36
40static void 37static void
41gk104_ibus_intr_rop(struct gk104_ibus_priv *priv, int i) 38gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
42{ 39{
43 u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800)); 40 struct nvkm_device *device = ibus->device;
44 u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800)); 41 u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
45 u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800)); 42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
46 nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); 43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
47 nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000); 44 nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
45 nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
48} 46}
49 47
50static void 48static void
51gk104_ibus_intr_gpc(struct gk104_ibus_priv *priv, int i) 49gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
52{ 50{
53 u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800)); 51 struct nvkm_device *device = ibus->device;
54 u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800)); 52 u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
55 u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800)); 53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
56 nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); 54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
57 nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); 55 nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
56 nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
58} 57}
59 58
60static void 59static void
61gk104_ibus_intr(struct nvkm_subdev *subdev) 60gk104_ibus_intr(struct nvkm_subdev *ibus)
62{ 61{
63 struct gk104_ibus_priv *priv = (void *)subdev; 62 struct nvkm_device *device = ibus->device;
64 u32 intr0 = nv_rd32(priv, 0x120058); 63 u32 intr0 = nvkm_rd32(device, 0x120058);
65 u32 intr1 = nv_rd32(priv, 0x12005c); 64 u32 intr1 = nvkm_rd32(device, 0x12005c);
66 u32 hubnr = nv_rd32(priv, 0x120070); 65 u32 hubnr = nvkm_rd32(device, 0x120070);
67 u32 ropnr = nv_rd32(priv, 0x120074); 66 u32 ropnr = nvkm_rd32(device, 0x120074);
68 u32 gpcnr = nv_rd32(priv, 0x120078); 67 u32 gpcnr = nvkm_rd32(device, 0x120078);
69 u32 i; 68 u32 i;
70 69
71 for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) { 70 for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
72 u32 stat = 0x00000100 << i; 71 u32 stat = 0x00000100 << i;
73 if (intr0 & stat) { 72 if (intr0 & stat) {
74 gk104_ibus_intr_hub(priv, i); 73 gk104_ibus_intr_hub(ibus, i);
75 intr0 &= ~stat; 74 intr0 &= ~stat;
76 } 75 }
77 } 76 }
@@ -79,7 +78,7 @@ gk104_ibus_intr(struct nvkm_subdev *subdev)
79 for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) { 78 for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
80 u32 stat = 0x00010000 << i; 79 u32 stat = 0x00010000 << i;
81 if (intr0 & stat) { 80 if (intr0 & stat) {
82 gk104_ibus_intr_rop(priv, i); 81 gk104_ibus_intr_rop(ibus, i);
83 intr0 &= ~stat; 82 intr0 &= ~stat;
84 } 83 }
85 } 84 }
@@ -87,53 +86,40 @@ gk104_ibus_intr(struct nvkm_subdev *subdev)
87 for (i = 0; intr1 && i < gpcnr; i++) { 86 for (i = 0; intr1 && i < gpcnr; i++) {
88 u32 stat = 0x00000001 << i; 87 u32 stat = 0x00000001 << i;
89 if (intr1 & stat) { 88 if (intr1 & stat) {
90 gk104_ibus_intr_gpc(priv, i); 89 gk104_ibus_intr_gpc(ibus, i);
91 intr1 &= ~stat; 90 intr1 &= ~stat;
92 } 91 }
93 } 92 }
94} 93}
95 94
96static int 95static int
97gk104_ibus_init(struct nvkm_object *object) 96gk104_ibus_init(struct nvkm_subdev *ibus)
98{ 97{
99 struct gk104_ibus_priv *priv = (void *)object; 98 struct nvkm_device *device = ibus->device;
100 int ret = nvkm_ibus_init(&priv->base); 99 nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
101 if (ret == 0) { 100 nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
102 nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000); 101 nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
103 nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200); 102 nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
104 nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800); 103 nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
105 nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100); 104 nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000200);
106 nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff); 105 nvkm_mask(device, 0x122358, 0x0003ffff, 0x00002880);
107 nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200); 106 return 0;
108 nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
109 }
110 return ret;
111} 107}
112 108
113static int 109static const struct nvkm_subdev_func
114gk104_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 110gk104_ibus = {
115 struct nvkm_oclass *oclass, void *data, u32 size, 111 .preinit = gk104_ibus_init,
116 struct nvkm_object **pobject) 112 .init = gk104_ibus_init,
117{ 113 .intr = gk104_ibus_intr,
118 struct gk104_ibus_priv *priv; 114};
119 int ret;
120
121 ret = nvkm_ibus_create(parent, engine, oclass, &priv);
122 *pobject = nv_object(priv);
123 if (ret)
124 return ret;
125 115
126 nv_subdev(priv)->intr = gk104_ibus_intr; 116int
117gk104_ibus_new(struct nvkm_device *device, int index,
118 struct nvkm_subdev **pibus)
119{
120 struct nvkm_subdev *ibus;
121 if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
122 return -ENOMEM;
123 nvkm_subdev_ctor(&gk104_ibus, device, index, 0, ibus);
127 return 0; 124 return 0;
128} 125}
129
130struct nvkm_oclass
131gk104_ibus_oclass = {
132 .handle = NV_SUBDEV(IBUS, 0xe0),
133 .ofuncs = &(struct nvkm_ofuncs) {
134 .ctor = gk104_ibus_ctor,
135 .dtor = _nvkm_ibus_dtor,
136 .init = gk104_ibus_init,
137 .fini = _nvkm_ibus_fini,
138 },
139};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 24dcdfb58a8d..3484079e885a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -22,89 +22,68 @@
22#include <subdev/ibus.h> 22#include <subdev/ibus.h>
23#include <subdev/timer.h> 23#include <subdev/timer.h>
24 24
25struct gk20a_ibus_priv {
26 struct nvkm_ibus base;
27};
28
29static void 25static void
30gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv) 26gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
31{ 27{
32 nv_mask(priv, 0x137250, 0x3f, 0); 28 struct nvkm_device *device = ibus->device;
29 nvkm_mask(device, 0x137250, 0x3f, 0);
33 30
34 nv_mask(priv, 0x000200, 0x20, 0); 31 nvkm_mask(device, 0x000200, 0x20, 0);
35 usleep_range(20, 30); 32 usleep_range(20, 30);
36 nv_mask(priv, 0x000200, 0x20, 0x20); 33 nvkm_mask(device, 0x000200, 0x20, 0x20);
37 34
38 nv_wr32(priv, 0x12004c, 0x4); 35 nvkm_wr32(device, 0x12004c, 0x4);
39 nv_wr32(priv, 0x122204, 0x2); 36 nvkm_wr32(device, 0x122204, 0x2);
40 nv_rd32(priv, 0x122204); 37 nvkm_rd32(device, 0x122204);
41 38
42 /* 39 /*
43 * Bug: increase clock timeout to avoid operation failure at high 40 * Bug: increase clock timeout to avoid operation failure at high
44 * gpcclk rate. 41 * gpcclk rate.
45 */ 42 */
46 nv_wr32(priv, 0x122354, 0x800); 43 nvkm_wr32(device, 0x122354, 0x800);
47 nv_wr32(priv, 0x128328, 0x800); 44 nvkm_wr32(device, 0x128328, 0x800);
48 nv_wr32(priv, 0x124320, 0x800); 45 nvkm_wr32(device, 0x124320, 0x800);
49} 46}
50 47
51static void 48static void
52gk20a_ibus_intr(struct nvkm_subdev *subdev) 49gk20a_ibus_intr(struct nvkm_subdev *ibus)
53{ 50{
54 struct gk20a_ibus_priv *priv = (void *)subdev; 51 struct nvkm_device *device = ibus->device;
55 u32 status0 = nv_rd32(priv, 0x120058); 52 u32 status0 = nvkm_rd32(device, 0x120058);
56 53
57 if (status0 & 0x7) { 54 if (status0 & 0x7) {
58 nv_debug(priv, "resetting priv ring\n"); 55 nvkm_debug(ibus, "resetting ibus ring\n");
59 gk20a_ibus_init_priv_ring(priv); 56 gk20a_ibus_init_ibus_ring(ibus);
60 } 57 }
61 58
62 /* Acknowledge interrupt */ 59 /* Acknowledge interrupt */
63 nv_mask(priv, 0x12004c, 0x2, 0x2); 60 nvkm_mask(device, 0x12004c, 0x2, 0x2);
64 61 nvkm_msec(device, 2000,
65 if (!nv_wait(subdev, 0x12004c, 0x3f, 0x00)) 62 if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
66 nv_warn(priv, "timeout waiting for ringmaster ack\n"); 63 break;
64 );
67} 65}
68 66
69static int 67static int
70gk20a_ibus_init(struct nvkm_object *object) 68gk20a_ibus_init(struct nvkm_subdev *ibus)
71{ 69{
72 struct gk20a_ibus_priv *priv = (void *)object; 70 gk20a_ibus_init_ibus_ring(ibus);
73 int ret;
74
75 ret = _nvkm_ibus_init(object);
76 if (ret)
77 return ret;
78
79 gk20a_ibus_init_priv_ring(priv);
80
81 return 0; 71 return 0;
82} 72}
83 73
84static int 74static const struct nvkm_subdev_func
85gk20a_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 75gk20a_ibus = {
86 struct nvkm_oclass *oclass, void *data, u32 size, 76 .init = gk20a_ibus_init,
87 struct nvkm_object **pobject) 77 .intr = gk20a_ibus_intr,
88{ 78};
89 struct gk20a_ibus_priv *priv;
90 int ret;
91
92 ret = nvkm_ibus_create(parent, engine, oclass, &priv);
93 *pobject = nv_object(priv);
94 if (ret)
95 return ret;
96 79
97 nv_subdev(priv)->intr = gk20a_ibus_intr; 80int
81gk20a_ibus_new(struct nvkm_device *device, int index,
82 struct nvkm_subdev **pibus)
83{
84 struct nvkm_subdev *ibus;
85 if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
86 return -ENOMEM;
87 nvkm_subdev_ctor(&gk20a_ibus, device, index, 0, ibus);
98 return 0; 88 return 0;
99} 89}
100
101struct nvkm_oclass
102gk20a_ibus_oclass = {
103 .handle = NV_SUBDEV(IBUS, 0xea),
104 .ofuncs = &(struct nvkm_ofuncs) {
105 .ctor = gk20a_ibus_ctor,
106 .dtor = _nvkm_ibus_dtor,
107 .init = gk20a_ibus_init,
108 .fini = _nvkm_ibus_fini,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index d16358cc6cbb..895ba74057d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -23,124 +23,291 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/engine.h> 26#include <core/memory.h>
27#include <subdev/bar.h>
27 28
28/****************************************************************************** 29/******************************************************************************
29 * instmem object base implementation 30 * instmem object base implementation
30 *****************************************************************************/ 31 *****************************************************************************/
32#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
31 33
32void 34struct nvkm_instobj {
33_nvkm_instobj_dtor(struct nvkm_object *object) 35 struct nvkm_memory memory;
36 struct nvkm_memory *parent;
37 struct nvkm_instmem *imem;
38 struct list_head head;
39 u32 *suspend;
40 void __iomem *map;
41};
42
43static enum nvkm_memory_target
44nvkm_instobj_target(struct nvkm_memory *memory)
45{
46 memory = nvkm_instobj(memory)->parent;
47 return nvkm_memory_target(memory);
48}
49
50static u64
51nvkm_instobj_addr(struct nvkm_memory *memory)
52{
53 memory = nvkm_instobj(memory)->parent;
54 return nvkm_memory_addr(memory);
55}
56
57static u64
58nvkm_instobj_size(struct nvkm_memory *memory)
59{
60 memory = nvkm_instobj(memory)->parent;
61 return nvkm_memory_size(memory);
62}
63
64static void
65nvkm_instobj_release(struct nvkm_memory *memory)
66{
67 struct nvkm_instobj *iobj = nvkm_instobj(memory);
68 nvkm_bar_flush(iobj->imem->subdev.device->bar);
69}
70
71static void __iomem *
72nvkm_instobj_acquire(struct nvkm_memory *memory)
73{
74 return nvkm_instobj(memory)->map;
75}
76
77static u32
78nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
79{
80 return ioread32_native(nvkm_instobj(memory)->map + offset);
81}
82
83static void
84nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
34{ 85{
35 struct nvkm_instmem *imem = nvkm_instmem(object); 86 iowrite32_native(data, nvkm_instobj(memory)->map + offset);
36 struct nvkm_instobj *iobj = (void *)object; 87}
88
89static void
90nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
91{
92 memory = nvkm_instobj(memory)->parent;
93 nvkm_memory_map(memory, vma, offset);
94}
37 95
38 mutex_lock(&nv_subdev(imem)->mutex); 96static void *
97nvkm_instobj_dtor(struct nvkm_memory *memory)
98{
99 struct nvkm_instobj *iobj = nvkm_instobj(memory);
39 list_del(&iobj->head); 100 list_del(&iobj->head);
40 mutex_unlock(&nv_subdev(imem)->mutex); 101 nvkm_memory_del(&iobj->parent);
102 return iobj;
103}
104
105const struct nvkm_memory_func
106nvkm_instobj_func = {
107 .dtor = nvkm_instobj_dtor,
108 .target = nvkm_instobj_target,
109 .addr = nvkm_instobj_addr,
110 .size = nvkm_instobj_size,
111 .acquire = nvkm_instobj_acquire,
112 .release = nvkm_instobj_release,
113 .rd32 = nvkm_instobj_rd32,
114 .wr32 = nvkm_instobj_wr32,
115 .map = nvkm_instobj_map,
116};
117
118static void
119nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
120{
121 memory = nvkm_instobj(memory)->parent;
122 nvkm_memory_boot(memory, vm);
123}
124
125static void
126nvkm_instobj_release_slow(struct nvkm_memory *memory)
127{
128 struct nvkm_instobj *iobj = nvkm_instobj(memory);
129 nvkm_instobj_release(memory);
130 nvkm_done(iobj->parent);
131}
132
133static void __iomem *
134nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
135{
136 struct nvkm_instobj *iobj = nvkm_instobj(memory);
137 iobj->map = nvkm_kmap(iobj->parent);
138 if (iobj->map)
139 memory->func = &nvkm_instobj_func;
140 return iobj->map;
141}
142
143static u32
144nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
145{
146 struct nvkm_instobj *iobj = nvkm_instobj(memory);
147 return nvkm_ro32(iobj->parent, offset);
148}
41 149
42 return nvkm_object_destroy(&iobj->base); 150static void
151nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
152{
153 struct nvkm_instobj *iobj = nvkm_instobj(memory);
154 return nvkm_wo32(iobj->parent, offset, data);
43} 155}
44 156
157const struct nvkm_memory_func
158nvkm_instobj_func_slow = {
159 .dtor = nvkm_instobj_dtor,
160 .target = nvkm_instobj_target,
161 .addr = nvkm_instobj_addr,
162 .size = nvkm_instobj_size,
163 .boot = nvkm_instobj_boot,
164 .acquire = nvkm_instobj_acquire_slow,
165 .release = nvkm_instobj_release_slow,
166 .rd32 = nvkm_instobj_rd32_slow,
167 .wr32 = nvkm_instobj_wr32_slow,
168 .map = nvkm_instobj_map,
169};
170
45int 171int
46nvkm_instobj_create_(struct nvkm_object *parent, struct nvkm_object *engine, 172nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
47 struct nvkm_oclass *oclass, int length, void **pobject) 173 struct nvkm_memory **pmemory)
48{ 174{
49 struct nvkm_instmem *imem = nvkm_instmem(parent); 175 struct nvkm_memory *memory = NULL;
50 struct nvkm_instobj *iobj; 176 struct nvkm_instobj *iobj;
177 u32 offset;
51 int ret; 178 int ret;
52 179
53 ret = nvkm_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS, 180 ret = imem->func->memory_new(imem, size, align, zero, &memory);
54 length, pobject);
55 iobj = *pobject;
56 if (ret) 181 if (ret)
57 return ret; 182 goto done;
58 183
59 mutex_lock(&imem->base.mutex); 184 if (!imem->func->persistent) {
60 list_add(&iobj->head, &imem->list); 185 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
61 mutex_unlock(&imem->base.mutex); 186 ret = -ENOMEM;
62 return 0; 187 goto done;
188 }
189
190 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
191 iobj->parent = memory;
192 iobj->imem = imem;
193 list_add_tail(&iobj->head, &imem->list);
194 memory = &iobj->memory;
195 }
196
197 if (!imem->func->zero && zero) {
198 void __iomem *map = nvkm_kmap(memory);
199 if (unlikely(!map)) {
200 for (offset = 0; offset < size; offset += 4)
201 nvkm_wo32(memory, offset, 0x00000000);
202 } else {
203 memset_io(map, 0x00, size);
204 }
205 nvkm_done(memory);
206 }
207
208done:
209 if (ret)
210 nvkm_memory_del(&memory);
211 *pmemory = memory;
212 return ret;
63} 213}
64 214
65/****************************************************************************** 215/******************************************************************************
66 * instmem subdev base implementation 216 * instmem subdev base implementation
67 *****************************************************************************/ 217 *****************************************************************************/
68 218
69static int 219u32
70nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent, 220nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
71 u32 size, u32 align, struct nvkm_object **pobject)
72{ 221{
73 struct nvkm_instmem_impl *impl = (void *)imem->base.object.oclass; 222 return imem->func->rd32(imem, addr);
74 struct nvkm_instobj_args args = { .size = size, .align = align };
75 return nvkm_object_ctor(parent, &parent->engine->subdev.object,
76 impl->instobj, &args, sizeof(args), pobject);
77} 223}
78 224
79int 225void
80_nvkm_instmem_fini(struct nvkm_object *object, bool suspend) 226nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
227{
228 return imem->func->wr32(imem, addr, data);
229}
230
231static int
232nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
81{ 233{
82 struct nvkm_instmem *imem = (void *)object; 234 struct nvkm_instmem *imem = nvkm_instmem(subdev);
83 struct nvkm_instobj *iobj; 235 struct nvkm_instobj *iobj;
84 int i, ret = 0; 236 int i;
237
238 if (imem->func->fini)
239 imem->func->fini(imem);
85 240
86 if (suspend) { 241 if (suspend) {
87 mutex_lock(&imem->base.mutex);
88 list_for_each_entry(iobj, &imem->list, head) { 242 list_for_each_entry(iobj, &imem->list, head) {
89 iobj->suspend = vmalloc(iobj->size); 243 struct nvkm_memory *memory = iobj->parent;
90 if (!iobj->suspend) { 244 u64 size = nvkm_memory_size(memory);
91 ret = -ENOMEM; 245
92 break; 246 iobj->suspend = vmalloc(size);
93 } 247 if (!iobj->suspend)
94 248 return -ENOMEM;
95 for (i = 0; i < iobj->size; i += 4) 249
96 iobj->suspend[i / 4] = nv_ro32(iobj, i); 250 for (i = 0; i < size; i += 4)
251 iobj->suspend[i / 4] = nvkm_ro32(memory, i);
97 } 252 }
98 mutex_unlock(&imem->base.mutex);
99 if (ret)
100 return ret;
101 } 253 }
102 254
103 return nvkm_subdev_fini(&imem->base, suspend); 255 return 0;
104} 256}
105 257
106int 258static int
107_nvkm_instmem_init(struct nvkm_object *object) 259nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
108{ 260{
109 struct nvkm_instmem *imem = (void *)object; 261 struct nvkm_instmem *imem = nvkm_instmem(subdev);
110 struct nvkm_instobj *iobj; 262 if (imem->func->oneinit)
111 int ret, i; 263 return imem->func->oneinit(imem);
264 return 0;
265}
112 266
113 ret = nvkm_subdev_init(&imem->base); 267static int
114 if (ret) 268nvkm_instmem_init(struct nvkm_subdev *subdev)
115 return ret; 269{
270 struct nvkm_instmem *imem = nvkm_instmem(subdev);
271 struct nvkm_instobj *iobj;
272 int i;
116 273
117 mutex_lock(&imem->base.mutex);
118 list_for_each_entry(iobj, &imem->list, head) { 274 list_for_each_entry(iobj, &imem->list, head) {
119 if (iobj->suspend) { 275 if (iobj->suspend) {
120 for (i = 0; i < iobj->size; i += 4) 276 struct nvkm_memory *memory = iobj->parent;
121 nv_wo32(iobj, i, iobj->suspend[i / 4]); 277 u64 size = nvkm_memory_size(memory);
278 for (i = 0; i < size; i += 4)
279 nvkm_wo32(memory, i, iobj->suspend[i / 4]);
122 vfree(iobj->suspend); 280 vfree(iobj->suspend);
123 iobj->suspend = NULL; 281 iobj->suspend = NULL;
124 } 282 }
125 } 283 }
126 mutex_unlock(&imem->base.mutex); 284
127 return 0; 285 return 0;
128} 286}
129 287
130int 288static void *
131nvkm_instmem_create_(struct nvkm_object *parent, struct nvkm_object *engine, 289nvkm_instmem_dtor(struct nvkm_subdev *subdev)
132 struct nvkm_oclass *oclass, int length, void **pobject)
133{ 290{
134 struct nvkm_instmem *imem; 291 struct nvkm_instmem *imem = nvkm_instmem(subdev);
135 int ret; 292 if (imem->func->dtor)
293 return imem->func->dtor(imem);
294 return imem;
295}
136 296
137 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "INSTMEM", 297static const struct nvkm_subdev_func
138 "instmem", length, pobject); 298nvkm_instmem = {
139 imem = *pobject; 299 .dtor = nvkm_instmem_dtor,
140 if (ret) 300 .oneinit = nvkm_instmem_oneinit,
141 return ret; 301 .init = nvkm_instmem_init,
302 .fini = nvkm_instmem_fini,
303};
142 304
305void
306nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
307 struct nvkm_device *device, int index,
308 struct nvkm_instmem *imem)
309{
310 nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
311 imem->func = func;
143 INIT_LIST_HEAD(&imem->list); 312 INIT_LIST_HEAD(&imem->list);
144 imem->alloc = nvkm_instmem_alloc;
145 return 0;
146} 313}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index dd0994d9ebfc..cd7feb1b25f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -37,32 +37,27 @@
37 * to use more "relaxed" allocation parameters when using the DMA API, since we 37 * to use more "relaxed" allocation parameters when using the DMA API, since we
38 * never need a kernel mapping. 38 * never need a kernel mapping.
39 */ 39 */
40#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
41#include "priv.h"
40 42
41#include <subdev/fb.h> 43#include <core/memory.h>
42#include <core/mm.h> 44#include <core/mm.h>
43#include <core/device.h> 45#include <core/tegra.h>
46#include <subdev/fb.h>
44 47
45#ifdef __KERNEL__ 48#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
46#include <linux/dma-attrs.h>
47#include <linux/iommu.h>
48#include <nouveau_platform.h>
49#endif
50 49
51#include "priv.h" 50struct gk20a_instobj {
52 51 struct nvkm_memory memory;
53struct gk20a_instobj_priv { 52 struct gk20a_instmem *imem;
54 struct nvkm_instobj base; 53 struct nvkm_mem mem;
55 /* Must be second member here - see nouveau_gpuobj_map_vm() */
56 struct nvkm_mem *mem;
57 /* Pointed by mem */
58 struct nvkm_mem _mem;
59}; 54};
60 55
61/* 56/*
62 * Used for objects allocated using the DMA API 57 * Used for objects allocated using the DMA API
63 */ 58 */
64struct gk20a_instobj_dma { 59struct gk20a_instobj_dma {
65 struct gk20a_instobj_priv base; 60 struct gk20a_instobj base;
66 61
67 void *cpuaddr; 62 void *cpuaddr;
68 dma_addr_t handle; 63 dma_addr_t handle;
@@ -73,14 +68,15 @@ struct gk20a_instobj_dma {
73 * Used for objects flattened using the IOMMU API 68 * Used for objects flattened using the IOMMU API
74 */ 69 */
75struct gk20a_instobj_iommu { 70struct gk20a_instobj_iommu {
76 struct gk20a_instobj_priv base; 71 struct gk20a_instobj base;
77 72
78 /* array of base.mem->size pages */ 73 /* array of base.mem->size pages */
79 struct page *pages[]; 74 struct page *pages[];
80}; 75};
81 76
82struct gk20a_instmem_priv { 77struct gk20a_instmem {
83 struct nvkm_instmem base; 78 struct nvkm_instmem base;
79 unsigned long lock_flags;
84 spinlock_t lock; 80 spinlock_t lock;
85 u64 addr; 81 u64 addr;
86 82
@@ -94,6 +90,42 @@ struct gk20a_instmem_priv {
94 struct dma_attrs attrs; 90 struct dma_attrs attrs;
95}; 91};
96 92
93static enum nvkm_memory_target
94gk20a_instobj_target(struct nvkm_memory *memory)
95{
96 return NVKM_MEM_TARGET_HOST;
97}
98
99static u64
100gk20a_instobj_addr(struct nvkm_memory *memory)
101{
102 return gk20a_instobj(memory)->mem.offset;
103
104}
105
106static u64
107gk20a_instobj_size(struct nvkm_memory *memory)
108{
109 return (u64)gk20a_instobj(memory)->mem.size << 12;
110}
111
112static void __iomem *
113gk20a_instobj_acquire(struct nvkm_memory *memory)
114{
115 struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
116 unsigned long flags;
117 spin_lock_irqsave(&imem->lock, flags);
118 imem->lock_flags = flags;
119 return NULL;
120}
121
122static void
123gk20a_instobj_release(struct nvkm_memory *memory)
124{
125 struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
126 spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
127}
128
97/* 129/*
98 * Use PRAMIN to read/write data and avoid coherency issues. 130 * Use PRAMIN to read/write data and avoid coherency issues.
99 * PRAMIN uses the GPU path and ensures data will always be coherent. 131 * PRAMIN uses the GPU path and ensures data will always be coherent.
@@ -104,160 +136,170 @@ struct gk20a_instmem_priv {
104 */ 136 */
105 137
106static u32 138static u32
107gk20a_instobj_rd32(struct nvkm_object *object, u64 offset) 139gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
108{ 140{
109 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object); 141 struct gk20a_instobj *node = gk20a_instobj(memory);
110 struct gk20a_instobj_priv *node = (void *)object; 142 struct gk20a_instmem *imem = node->imem;
111 unsigned long flags; 143 struct nvkm_device *device = imem->base.subdev.device;
112 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; 144 u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
113 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; 145 u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
114 u32 data; 146 u32 data;
115 147
116 spin_lock_irqsave(&priv->lock, flags); 148 if (unlikely(imem->addr != base)) {
117 if (unlikely(priv->addr != base)) { 149 nvkm_wr32(device, 0x001700, base >> 16);
118 nv_wr32(priv, 0x001700, base >> 16); 150 imem->addr = base;
119 priv->addr = base;
120 } 151 }
121 data = nv_rd32(priv, 0x700000 + addr); 152 data = nvkm_rd32(device, 0x700000 + addr);
122 spin_unlock_irqrestore(&priv->lock, flags);
123 return data; 153 return data;
124} 154}
125 155
126static void 156static void
127gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data) 157gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
128{ 158{
129 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object); 159 struct gk20a_instobj *node = gk20a_instobj(memory);
130 struct gk20a_instobj_priv *node = (void *)object; 160 struct gk20a_instmem *imem = node->imem;
131 unsigned long flags; 161 struct nvkm_device *device = imem->base.subdev.device;
132 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; 162 u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
133 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; 163 u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
134 164
135 spin_lock_irqsave(&priv->lock, flags); 165 if (unlikely(imem->addr != base)) {
136 if (unlikely(priv->addr != base)) { 166 nvkm_wr32(device, 0x001700, base >> 16);
137 nv_wr32(priv, 0x001700, base >> 16); 167 imem->addr = base;
138 priv->addr = base;
139 } 168 }
140 nv_wr32(priv, 0x700000 + addr, data); 169 nvkm_wr32(device, 0x700000 + addr, data);
141 spin_unlock_irqrestore(&priv->lock, flags); 170}
171
172static void
173gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
174{
175 struct gk20a_instobj *node = gk20a_instobj(memory);
176 nvkm_vm_map_at(vma, offset, &node->mem);
142} 177}
143 178
144static void 179static void
145gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node) 180gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
146{ 181{
147 struct gk20a_instobj_dma *node = (void *)_node; 182 struct gk20a_instobj_dma *node = (void *)_node;
148 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node); 183 struct gk20a_instmem *imem = _node->imem;
149 struct device *dev = nv_device_base(nv_device(priv)); 184 struct device *dev = imem->base.subdev.device->dev;
150 185
151 if (unlikely(!node->cpuaddr)) 186 if (unlikely(!node->cpuaddr))
152 return; 187 return;
153 188
154 dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr, 189 dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
155 node->handle, &priv->attrs); 190 node->handle, &imem->attrs);
156} 191}
157 192
158static void 193static void
159gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node) 194gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
160{ 195{
161 struct gk20a_instobj_iommu *node = (void *)_node; 196 struct gk20a_instobj_iommu *node = (void *)_node;
162 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node); 197 struct gk20a_instmem *imem = _node->imem;
163 struct nvkm_mm_node *r; 198 struct nvkm_mm_node *r;
164 int i; 199 int i;
165 200
166 if (unlikely(list_empty(&_node->mem->regions))) 201 if (unlikely(list_empty(&_node->mem.regions)))
167 return; 202 return;
168 203
169 r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node, 204 r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
170 rl_entry); 205 rl_entry);
171 206
172 /* clear bit 34 to unmap pages */ 207 /* clear bit 34 to unmap pages */
173 r->offset &= ~BIT(34 - priv->iommu_pgshift); 208 r->offset &= ~BIT(34 - imem->iommu_pgshift);
174 209
175 /* Unmap pages from GPU address space and free them */ 210 /* Unmap pages from GPU address space and free them */
176 for (i = 0; i < _node->mem->size; i++) { 211 for (i = 0; i < _node->mem.size; i++) {
177 iommu_unmap(priv->domain, 212 iommu_unmap(imem->domain,
178 (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE); 213 (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
179 __free_page(node->pages[i]); 214 __free_page(node->pages[i]);
180 } 215 }
181 216
182 /* Release area from GPU address space */ 217 /* Release area from GPU address space */
183 mutex_lock(priv->mm_mutex); 218 mutex_lock(imem->mm_mutex);
184 nvkm_mm_free(priv->mm, &r); 219 nvkm_mm_free(imem->mm, &r);
185 mutex_unlock(priv->mm_mutex); 220 mutex_unlock(imem->mm_mutex);
186} 221}
187 222
188static void 223static void *
189gk20a_instobj_dtor(struct nvkm_object *object) 224gk20a_instobj_dtor(struct nvkm_memory *memory)
190{ 225{
191 struct gk20a_instobj_priv *node = (void *)object; 226 struct gk20a_instobj *node = gk20a_instobj(memory);
192 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node); 227 struct gk20a_instmem *imem = node->imem;
193 228
194 if (priv->domain) 229 if (imem->domain)
195 gk20a_instobj_dtor_iommu(node); 230 gk20a_instobj_dtor_iommu(node);
196 else 231 else
197 gk20a_instobj_dtor_dma(node); 232 gk20a_instobj_dtor_dma(node);
198 233
199 nvkm_instobj_destroy(&node->base); 234 return node;
200} 235}
201 236
237static const struct nvkm_memory_func
238gk20a_instobj_func = {
239 .dtor = gk20a_instobj_dtor,
240 .target = gk20a_instobj_target,
241 .addr = gk20a_instobj_addr,
242 .size = gk20a_instobj_size,
243 .acquire = gk20a_instobj_acquire,
244 .release = gk20a_instobj_release,
245 .rd32 = gk20a_instobj_rd32,
246 .wr32 = gk20a_instobj_wr32,
247 .map = gk20a_instobj_map,
248};
249
202static int 250static int
203gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, 251gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
204 struct nvkm_oclass *oclass, u32 npages, u32 align, 252 struct gk20a_instobj **_node)
205 struct gk20a_instobj_priv **_node)
206{ 253{
207 struct gk20a_instobj_dma *node; 254 struct gk20a_instobj_dma *node;
208 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent); 255 struct nvkm_subdev *subdev = &imem->base.subdev;
209 struct device *dev = nv_device_base(nv_device(parent)); 256 struct device *dev = subdev->device->dev;
210 int ret;
211 257
212 ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node), 258 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
213 (void **)&node); 259 return -ENOMEM;
214 *_node = &node->base; 260 *_node = &node->base;
215 if (ret)
216 return ret;
217 261
218 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, 262 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
219 &node->handle, GFP_KERNEL, 263 &node->handle, GFP_KERNEL,
220 &priv->attrs); 264 &imem->attrs);
221 if (!node->cpuaddr) { 265 if (!node->cpuaddr) {
222 nv_error(priv, "cannot allocate DMA memory\n"); 266 nvkm_error(subdev, "cannot allocate DMA memory\n");
223 return -ENOMEM; 267 return -ENOMEM;
224 } 268 }
225 269
226 /* alignment check */ 270 /* alignment check */
227 if (unlikely(node->handle & (align - 1))) 271 if (unlikely(node->handle & (align - 1)))
228 nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n", 272 nvkm_warn(subdev,
229 &node->handle, align); 273 "memory not aligned as requested: %pad (0x%x)\n",
274 &node->handle, align);
230 275
231 /* present memory for being mapped using small pages */ 276 /* present memory for being mapped using small pages */
232 node->r.type = 12; 277 node->r.type = 12;
233 node->r.offset = node->handle >> 12; 278 node->r.offset = node->handle >> 12;
234 node->r.length = (npages << PAGE_SHIFT) >> 12; 279 node->r.length = (npages << PAGE_SHIFT) >> 12;
235 280
236 node->base._mem.offset = node->handle; 281 node->base.mem.offset = node->handle;
237 282
238 INIT_LIST_HEAD(&node->base._mem.regions); 283 INIT_LIST_HEAD(&node->base.mem.regions);
239 list_add_tail(&node->r.rl_entry, &node->base._mem.regions); 284 list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
240 285
241 return 0; 286 return 0;
242} 287}
243 288
244static int 289static int
245gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine, 290gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
246 struct nvkm_oclass *oclass, u32 npages, u32 align, 291 struct gk20a_instobj **_node)
247 struct gk20a_instobj_priv **_node)
248{ 292{
249 struct gk20a_instobj_iommu *node; 293 struct gk20a_instobj_iommu *node;
250 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent); 294 struct nvkm_subdev *subdev = &imem->base.subdev;
251 struct nvkm_mm_node *r; 295 struct nvkm_mm_node *r;
252 int ret; 296 int ret;
253 int i; 297 int i;
254 298
255 ret = nvkm_instobj_create_(parent, engine, oclass, 299 if (!(node = kzalloc(sizeof(*node) +
256 sizeof(*node) + sizeof(node->pages[0]) * npages, 300 sizeof( node->pages[0]) * npages, GFP_KERNEL)))
257 (void **)&node); 301 return -ENOMEM;
258 *_node = &node->base; 302 *_node = &node->base;
259 if (ret)
260 return ret;
261 303
262 /* Allocate backing memory */ 304 /* Allocate backing memory */
263 for (i = 0; i < npages; i++) { 305 for (i = 0; i < npages; i++) {
@@ -270,48 +312,48 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
270 node->pages[i] = p; 312 node->pages[i] = p;
271 } 313 }
272 314
273 mutex_lock(priv->mm_mutex); 315 mutex_lock(imem->mm_mutex);
274 /* Reserve area from GPU address space */ 316 /* Reserve area from GPU address space */
275 ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages, 317 ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
276 align >> priv->iommu_pgshift, &r); 318 align >> imem->iommu_pgshift, &r);
277 mutex_unlock(priv->mm_mutex); 319 mutex_unlock(imem->mm_mutex);
278 if (ret) { 320 if (ret) {
279 nv_error(priv, "virtual space is full!\n"); 321 nvkm_error(subdev, "virtual space is full!\n");
280 goto free_pages; 322 goto free_pages;
281 } 323 }
282 324
283 /* Map into GPU address space */ 325 /* Map into GPU address space */
284 for (i = 0; i < npages; i++) { 326 for (i = 0; i < npages; i++) {
285 struct page *p = node->pages[i]; 327 struct page *p = node->pages[i];
286 u32 offset = (r->offset + i) << priv->iommu_pgshift; 328 u32 offset = (r->offset + i) << imem->iommu_pgshift;
287 329
288 ret = iommu_map(priv->domain, offset, page_to_phys(p), 330 ret = iommu_map(imem->domain, offset, page_to_phys(p),
289 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); 331 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
290 if (ret < 0) { 332 if (ret < 0) {
291 nv_error(priv, "IOMMU mapping failure: %d\n", ret); 333 nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
292 334
293 while (i-- > 0) { 335 while (i-- > 0) {
294 offset -= PAGE_SIZE; 336 offset -= PAGE_SIZE;
295 iommu_unmap(priv->domain, offset, PAGE_SIZE); 337 iommu_unmap(imem->domain, offset, PAGE_SIZE);
296 } 338 }
297 goto release_area; 339 goto release_area;
298 } 340 }
299 } 341 }
300 342
301 /* Bit 34 tells that an address is to be resolved through the IOMMU */ 343 /* Bit 34 tells that an address is to be resolved through the IOMMU */
302 r->offset |= BIT(34 - priv->iommu_pgshift); 344 r->offset |= BIT(34 - imem->iommu_pgshift);
303 345
304 node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift; 346 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
305 347
306 INIT_LIST_HEAD(&node->base._mem.regions); 348 INIT_LIST_HEAD(&node->base.mem.regions);
307 list_add_tail(&r->rl_entry, &node->base._mem.regions); 349 list_add_tail(&r->rl_entry, &node->base.mem.regions);
308 350
309 return 0; 351 return 0;
310 352
311release_area: 353release_area:
312 mutex_lock(priv->mm_mutex); 354 mutex_lock(imem->mm_mutex);
313 nvkm_mm_free(priv->mm, &r); 355 nvkm_mm_free(imem->mm, &r);
314 mutex_unlock(priv->mm_mutex); 356 mutex_unlock(imem->mm_mutex);
315 357
316free_pages: 358free_pages:
317 for (i = 0; i < npages && node->pages[i] != NULL; i++) 359 for (i = 0; i < npages && node->pages[i] != NULL; i++)
@@ -321,120 +363,92 @@ free_pages:
321} 363}
322 364
323static int 365static int
324gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 366gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
325 struct nvkm_oclass *oclass, void *data, u32 _size, 367 struct nvkm_memory **pmemory)
326 struct nvkm_object **pobject)
327{ 368{
328 struct nvkm_instobj_args *args = data; 369 struct gk20a_instmem *imem = gk20a_instmem(base);
329 struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent); 370 struct gk20a_instobj *node = NULL;
330 struct gk20a_instobj_priv *node; 371 struct nvkm_subdev *subdev = &imem->base.subdev;
331 u32 size, align;
332 int ret; 372 int ret;
333 373
334 nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__, 374 nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
335 priv->domain ? "IOMMU" : "DMA", args->size, args->align); 375 imem->domain ? "IOMMU" : "DMA", size, align);
336 376
337 /* Round size and align to page bounds */ 377 /* Round size and align to page bounds */
338 size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE); 378 size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
339 align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE); 379 align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
340 380
341 if (priv->domain) 381 if (imem->domain)
342 ret = gk20a_instobj_ctor_iommu(parent, engine, oclass, 382 ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
343 size >> PAGE_SHIFT, align, &node); 383 align, &node);
344 else 384 else
345 ret = gk20a_instobj_ctor_dma(parent, engine, oclass, 385 ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
346 size >> PAGE_SHIFT, align, &node); 386 align, &node);
347 *pobject = nv_object(node); 387 *pmemory = node ? &node->memory : NULL;
348 if (ret) 388 if (ret)
349 return ret; 389 return ret;
350 390
351 node->mem = &node->_mem; 391 nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
392 node->imem = imem;
352 393
353 /* present memory for being mapped using small pages */ 394 /* present memory for being mapped using small pages */
354 node->mem->size = size >> 12; 395 node->mem.size = size >> 12;
355 node->mem->memtype = 0; 396 node->mem.memtype = 0;
356 node->mem->page_shift = 12; 397 node->mem.page_shift = 12;
357
358 node->base.addr = node->mem->offset;
359 node->base.size = size;
360 398
361 nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", 399 nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
362 size, align, node->mem->offset); 400 size, align, node->mem.offset);
363 401
364 return 0; 402 return 0;
365} 403}
366 404
367static struct nvkm_instobj_impl 405static void
368gk20a_instobj_oclass = { 406gk20a_instmem_fini(struct nvkm_instmem *base)
369 .base.ofuncs = &(struct nvkm_ofuncs) {
370 .ctor = gk20a_instobj_ctor,
371 .dtor = gk20a_instobj_dtor,
372 .init = _nvkm_instobj_init,
373 .fini = _nvkm_instobj_fini,
374 .rd32 = gk20a_instobj_rd32,
375 .wr32 = gk20a_instobj_wr32,
376 },
377};
378
379
380
381static int
382gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
383{ 407{
384 struct gk20a_instmem_priv *priv = (void *)object; 408 gk20a_instmem(base)->addr = ~0ULL;
385 priv->addr = ~0ULL;
386 return nvkm_instmem_fini(&priv->base, suspend);
387} 409}
388 410
389static int 411static const struct nvkm_instmem_func
390gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 412gk20a_instmem = {
391 struct nvkm_oclass *oclass, void *data, u32 size, 413 .fini = gk20a_instmem_fini,
392 struct nvkm_object **pobject) 414 .memory_new = gk20a_instobj_new,
393{ 415 .persistent = true,
394 struct gk20a_instmem_priv *priv; 416 .zero = false,
395 struct nouveau_platform_device *plat; 417};
396 int ret;
397 418
398 ret = nvkm_instmem_create(parent, engine, oclass, &priv); 419int
399 *pobject = nv_object(priv); 420gk20a_instmem_new(struct nvkm_device *device, int index,
400 if (ret) 421 struct nvkm_instmem **pimem)
401 return ret; 422{
423 struct nvkm_device_tegra *tdev = device->func->tegra(device);
424 struct gk20a_instmem *imem;
402 425
403 spin_lock_init(&priv->lock); 426 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
427 return -ENOMEM;
428 nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
429 spin_lock_init(&imem->lock);
430 *pimem = &imem->base;
404 431
405 plat = nv_device_to_platform(nv_device(parent)); 432 if (tdev->iommu.domain) {
406 if (plat->gpu->iommu.domain) { 433 imem->domain = tdev->iommu.domain;
407 priv->domain = plat->gpu->iommu.domain; 434 imem->mm = &tdev->iommu.mm;
408 priv->mm = plat->gpu->iommu.mm; 435 imem->iommu_pgshift = tdev->iommu.pgshift;
409 priv->iommu_pgshift = plat->gpu->iommu.pgshift; 436 imem->mm_mutex = &tdev->iommu.mutex;
410 priv->mm_mutex = &plat->gpu->iommu.mutex;
411 437
412 nv_info(priv, "using IOMMU\n"); 438 nvkm_info(&imem->base.subdev, "using IOMMU\n");
413 } else { 439 } else {
414 init_dma_attrs(&priv->attrs); 440 init_dma_attrs(&imem->attrs);
415 /* 441 /*
416 * We will access instmem through PRAMIN and thus do not need a 442 * We will access instmem through PRAMIN and thus do not need a
417 * consistent CPU pointer or kernel mapping 443 * consistent CPU pointer or kernel mapping
418 */ 444 */
419 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs); 445 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
420 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs); 446 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
421 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs); 447 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
422 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs); 448 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
423 449
424 nv_info(priv, "using DMA API\n"); 450 nvkm_info(&imem->base.subdev, "using DMA API\n");
425 } 451 }
426 452
427 return 0; 453 return 0;
428} 454}
429
430struct nvkm_oclass *
431gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
432 .base.handle = NV_SUBDEV(INSTMEM, 0xea),
433 .base.ofuncs = &(struct nvkm_ofuncs) {
434 .ctor = gk20a_instmem_ctor,
435 .dtor = _nvkm_instmem_dtor,
436 .init = _nvkm_instmem_init,
437 .fini = gk20a_instmem_fini,
438 },
439 .instobj = &gk20a_instobj_oclass.base,
440}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 282143f49d72..6133c8bb2d42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -21,173 +21,207 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
25#include "priv.h"
25 26
27#include <core/memory.h>
26#include <core/ramht.h> 28#include <core/ramht.h>
27 29
30struct nv04_instmem {
31 struct nvkm_instmem base;
32 struct nvkm_mm heap;
33};
34
28/****************************************************************************** 35/******************************************************************************
29 * instmem object implementation 36 * instmem object implementation
30 *****************************************************************************/ 37 *****************************************************************************/
38#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
31 39
32static u32 40struct nv04_instobj {
33nv04_instobj_rd32(struct nvkm_object *object, u64 addr) 41 struct nvkm_memory memory;
42 struct nv04_instmem *imem;
43 struct nvkm_mm_node *node;
44};
45
46static enum nvkm_memory_target
47nv04_instobj_target(struct nvkm_memory *memory)
34{ 48{
35 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); 49 return NVKM_MEM_TARGET_INST;
36 struct nv04_instobj_priv *node = (void *)object;
37 return nv_ro32(priv, node->mem->offset + addr);
38} 50}
39 51
40static void 52static u64
41nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data) 53nv04_instobj_addr(struct nvkm_memory *memory)
54{
55 return nv04_instobj(memory)->node->offset;
56}
57
58static u64
59nv04_instobj_size(struct nvkm_memory *memory)
60{
61 return nv04_instobj(memory)->node->length;
62}
63
64static void __iomem *
65nv04_instobj_acquire(struct nvkm_memory *memory)
42{ 66{
43 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); 67 struct nv04_instobj *iobj = nv04_instobj(memory);
44 struct nv04_instobj_priv *node = (void *)object; 68 struct nvkm_device *device = iobj->imem->base.subdev.device;
45 nv_wo32(priv, node->mem->offset + addr, data); 69 return device->pri + 0x700000 + iobj->node->offset;
46} 70}
47 71
48static void 72static void
49nv04_instobj_dtor(struct nvkm_object *object) 73nv04_instobj_release(struct nvkm_memory *memory)
50{ 74{
51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); 75}
52 struct nv04_instobj_priv *node = (void *)object;
53 struct nvkm_subdev *subdev = (void *)priv;
54 76
55 mutex_lock(&subdev->mutex); 77static u32
56 nvkm_mm_free(&priv->heap, &node->mem); 78nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
57 mutex_unlock(&subdev->mutex); 79{
80 struct nv04_instobj *iobj = nv04_instobj(memory);
81 struct nvkm_device *device = iobj->imem->base.subdev.device;
82 return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
83}
58 84
59 nvkm_instobj_destroy(&node->base); 85static void
86nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
87{
88 struct nv04_instobj *iobj = nv04_instobj(memory);
89 struct nvkm_device *device = iobj->imem->base.subdev.device;
90 nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
60} 91}
61 92
93static void *
94nv04_instobj_dtor(struct nvkm_memory *memory)
95{
96 struct nv04_instobj *iobj = nv04_instobj(memory);
97 mutex_lock(&iobj->imem->base.subdev.mutex);
98 nvkm_mm_free(&iobj->imem->heap, &iobj->node);
99 mutex_unlock(&iobj->imem->base.subdev.mutex);
100 return iobj;
101}
102
103static const struct nvkm_memory_func
104nv04_instobj_func = {
105 .dtor = nv04_instobj_dtor,
106 .target = nv04_instobj_target,
107 .size = nv04_instobj_size,
108 .addr = nv04_instobj_addr,
109 .acquire = nv04_instobj_acquire,
110 .release = nv04_instobj_release,
111 .rd32 = nv04_instobj_rd32,
112 .wr32 = nv04_instobj_wr32,
113};
114
62static int 115static int
63nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 116nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
64 struct nvkm_oclass *oclass, void *data, u32 size, 117 struct nvkm_memory **pmemory)
65 struct nvkm_object **pobject)
66{ 118{
67 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); 119 struct nv04_instmem *imem = nv04_instmem(base);
68 struct nv04_instobj_priv *node; 120 struct nv04_instobj *iobj;
69 struct nvkm_instobj_args *args = data;
70 struct nvkm_subdev *subdev = (void *)priv;
71 int ret; 121 int ret;
72 122
73 if (!args->align) 123 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
74 args->align = 1; 124 return -ENOMEM;
125 *pmemory = &iobj->memory;
75 126
76 ret = nvkm_instobj_create(parent, engine, oclass, &node); 127 nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
77 *pobject = nv_object(node); 128 iobj->imem = imem;
78 if (ret)
79 return ret;
80
81 mutex_lock(&subdev->mutex);
82 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
83 args->align, &node->mem);
84 mutex_unlock(&subdev->mutex);
85 if (ret)
86 return ret;
87 129
88 node->base.addr = node->mem->offset; 130 mutex_lock(&imem->base.subdev.mutex);
89 node->base.size = node->mem->length; 131 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
90 return 0; 132 align ? align : 1, &iobj->node);
133 mutex_unlock(&imem->base.subdev.mutex);
134 return ret;
91} 135}
92 136
93struct nvkm_instobj_impl
94nv04_instobj_oclass = {
95 .base.ofuncs = &(struct nvkm_ofuncs) {
96 .ctor = nv04_instobj_ctor,
97 .dtor = nv04_instobj_dtor,
98 .init = _nvkm_instobj_init,
99 .fini = _nvkm_instobj_fini,
100 .rd32 = nv04_instobj_rd32,
101 .wr32 = nv04_instobj_wr32,
102 },
103};
104
105/****************************************************************************** 137/******************************************************************************
106 * instmem subdev implementation 138 * instmem subdev implementation
107 *****************************************************************************/ 139 *****************************************************************************/
108 140
109static u32 141static u32
110nv04_instmem_rd32(struct nvkm_object *object, u64 addr) 142nv04_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
111{ 143{
112 return nv_rd32(object, 0x700000 + addr); 144 return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
113} 145}
114 146
115static void 147static void
116nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data) 148nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
117{
118 return nv_wr32(object, 0x700000 + addr, data);
119}
120
121void
122nv04_instmem_dtor(struct nvkm_object *object)
123{ 149{
124 struct nv04_instmem_priv *priv = (void *)object; 150 nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
125 nvkm_gpuobj_ref(NULL, &priv->ramfc);
126 nvkm_gpuobj_ref(NULL, &priv->ramro);
127 nvkm_ramht_ref(NULL, &priv->ramht);
128 nvkm_gpuobj_ref(NULL, &priv->vbios);
129 nvkm_mm_fini(&priv->heap);
130 if (priv->iomem)
131 iounmap(priv->iomem);
132 nvkm_instmem_destroy(&priv->base);
133} 151}
134 152
135static int 153static int
136nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 154nv04_instmem_oneinit(struct nvkm_instmem *base)
137 struct nvkm_oclass *oclass, void *data, u32 size,
138 struct nvkm_object **pobject)
139{ 155{
140 struct nv04_instmem_priv *priv; 156 struct nv04_instmem *imem = nv04_instmem(base);
157 struct nvkm_device *device = imem->base.subdev.device;
141 int ret; 158 int ret;
142 159
143 ret = nvkm_instmem_create(parent, engine, oclass, &priv);
144 *pobject = nv_object(priv);
145 if (ret)
146 return ret;
147
148 /* PRAMIN aperture maps over the end of VRAM, reserve it */ 160 /* PRAMIN aperture maps over the end of VRAM, reserve it */
149 priv->base.reserved = 512 * 1024; 161 imem->base.reserved = 512 * 1024;
150 162
151 ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1); 163 ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
152 if (ret) 164 if (ret)
153 return ret; 165 return ret;
154 166
155 /* 0x00000-0x10000: reserve for probable vbios image */ 167 /* 0x00000-0x10000: reserve for probable vbios image */
156 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, 168 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
157 &priv->vbios); 169 &imem->base.vbios);
158 if (ret) 170 if (ret)
159 return ret; 171 return ret;
160 172
161 /* 0x10000-0x18000: reserve for RAMHT */ 173 /* 0x10000-0x18000: reserve for RAMHT */
162 ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); 174 ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
163 if (ret) 175 if (ret)
164 return ret; 176 return ret;
165 177
166 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */ 178 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
167 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00800, 0, 179 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true,
168 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); 180 &imem->base.ramfc);
169 if (ret) 181 if (ret)
170 return ret; 182 return ret;
171 183
172 /* 0x18800-0x18a00: reserve for RAMRO */ 184 /* 0x18800-0x18a00: reserve for RAMRO */
173 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0, 185 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false,
174 &priv->ramro); 186 &imem->base.ramro);
175 if (ret) 187 if (ret)
176 return ret; 188 return ret;
177 189
178 return 0; 190 return 0;
179} 191}
180 192
181struct nvkm_oclass * 193static void *
182nv04_instmem_oclass = &(struct nvkm_instmem_impl) { 194nv04_instmem_dtor(struct nvkm_instmem *base)
183 .base.handle = NV_SUBDEV(INSTMEM, 0x04), 195{
184 .base.ofuncs = &(struct nvkm_ofuncs) { 196 struct nv04_instmem *imem = nv04_instmem(base);
185 .ctor = nv04_instmem_ctor, 197 nvkm_memory_del(&imem->base.ramfc);
186 .dtor = nv04_instmem_dtor, 198 nvkm_memory_del(&imem->base.ramro);
187 .init = _nvkm_instmem_init, 199 nvkm_ramht_del(&imem->base.ramht);
188 .fini = _nvkm_instmem_fini, 200 nvkm_memory_del(&imem->base.vbios);
189 .rd32 = nv04_instmem_rd32, 201 nvkm_mm_fini(&imem->heap);
190 .wr32 = nv04_instmem_wr32, 202 return imem;
191 }, 203}
192 .instobj = &nv04_instobj_oclass.base, 204
193}.base; 205static const struct nvkm_instmem_func
206nv04_instmem = {
207 .dtor = nv04_instmem_dtor,
208 .oneinit = nv04_instmem_oneinit,
209 .rd32 = nv04_instmem_rd32,
210 .wr32 = nv04_instmem_wr32,
211 .memory_new = nv04_instobj_new,
212 .persistent = false,
213 .zero = false,
214};
215
216int
217nv04_instmem_new(struct nvkm_device *device, int index,
218 struct nvkm_instmem **pimem)
219{
220 struct nv04_instmem *imem;
221
222 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
223 return -ENOMEM;
224 nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base);
225 *pimem = &imem->base;
226 return 0;
227}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
deleted file mode 100644
index 42b6c928047c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef __NV04_INSTMEM_H__
2#define __NV04_INSTMEM_H__
3#include "priv.h"
4
5#include <core/mm.h>
6
7extern struct nvkm_instobj_impl nv04_instobj_oclass;
8
9struct nv04_instmem_priv {
10 struct nvkm_instmem base;
11
12 void __iomem *iomem;
13 struct nvkm_mm heap;
14
15 struct nvkm_gpuobj *vbios;
16 struct nvkm_ramht *ramht;
17 struct nvkm_gpuobj *ramro;
18 struct nvkm_gpuobj *ramfc;
19};
20
21static inline struct nv04_instmem_priv *
22nv04_instmem(void *obj)
23{
24 return (void *)nvkm_instmem(obj);
25}
26
27struct nv04_instobj_priv {
28 struct nvkm_instobj base;
29 struct nvkm_mm_node *mem;
30};
31
32void nv04_instmem_dtor(struct nvkm_object *);
33
34int nv04_instmem_alloc(struct nvkm_instmem *, struct nvkm_object *,
35 u32 size, u32 align, struct nvkm_object **pobject);
36#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index b42b8588fc0e..c0543875e490 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -21,116 +21,239 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
25#include "priv.h"
25 26
27#include <core/memory.h>
26#include <core/ramht.h> 28#include <core/ramht.h>
27#include <engine/gr/nv40.h> 29#include <engine/gr/nv40.h>
28 30
31struct nv40_instmem {
32 struct nvkm_instmem base;
33 struct nvkm_mm heap;
34 void __iomem *iomem;
35};
36
29/****************************************************************************** 37/******************************************************************************
30 * instmem subdev implementation 38 * instmem object implementation
31 *****************************************************************************/ 39 *****************************************************************************/
40#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
41
42struct nv40_instobj {
43 struct nvkm_memory memory;
44 struct nv40_instmem *imem;
45 struct nvkm_mm_node *node;
46};
47
48static enum nvkm_memory_target
49nv40_instobj_target(struct nvkm_memory *memory)
50{
51 return NVKM_MEM_TARGET_INST;
52}
53
54static u64
55nv40_instobj_addr(struct nvkm_memory *memory)
56{
57 return nv40_instobj(memory)->node->offset;
58}
59
60static u64
61nv40_instobj_size(struct nvkm_memory *memory)
62{
63 return nv40_instobj(memory)->node->length;
64}
65
66static void __iomem *
67nv40_instobj_acquire(struct nvkm_memory *memory)
68{
69 struct nv40_instobj *iobj = nv40_instobj(memory);
70 return iobj->imem->iomem + iobj->node->offset;
71}
72
73static void
74nv40_instobj_release(struct nvkm_memory *memory)
75{
76}
32 77
33static u32 78static u32
34nv40_instmem_rd32(struct nvkm_object *object, u64 addr) 79nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
35{ 80{
36 struct nv04_instmem_priv *priv = (void *)object; 81 struct nv40_instobj *iobj = nv40_instobj(memory);
37 return ioread32_native(priv->iomem + addr); 82 return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
38} 83}
39 84
40static void 85static void
41nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data) 86nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
87{
88 struct nv40_instobj *iobj = nv40_instobj(memory);
89 iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
90}
91
92static void *
93nv40_instobj_dtor(struct nvkm_memory *memory)
42{ 94{
43 struct nv04_instmem_priv *priv = (void *)object; 95 struct nv40_instobj *iobj = nv40_instobj(memory);
44 iowrite32_native(data, priv->iomem + addr); 96 mutex_lock(&iobj->imem->base.subdev.mutex);
97 nvkm_mm_free(&iobj->imem->heap, &iobj->node);
98 mutex_unlock(&iobj->imem->base.subdev.mutex);
99 return iobj;
45} 100}
46 101
102static const struct nvkm_memory_func
103nv40_instobj_func = {
104 .dtor = nv40_instobj_dtor,
105 .target = nv40_instobj_target,
106 .size = nv40_instobj_size,
107 .addr = nv40_instobj_addr,
108 .acquire = nv40_instobj_acquire,
109 .release = nv40_instobj_release,
110 .rd32 = nv40_instobj_rd32,
111 .wr32 = nv40_instobj_wr32,
112};
113
47static int 114static int
48nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 115nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
49 struct nvkm_oclass *oclass, void *data, u32 size, 116 struct nvkm_memory **pmemory)
50 struct nvkm_object **pobject)
51{ 117{
52 struct nvkm_device *device = nv_device(parent); 118 struct nv40_instmem *imem = nv40_instmem(base);
53 struct nv04_instmem_priv *priv; 119 struct nv40_instobj *iobj;
54 int ret, bar, vs; 120 int ret;
55 121
56 ret = nvkm_instmem_create(parent, engine, oclass, &priv); 122 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
57 *pobject = nv_object(priv); 123 return -ENOMEM;
58 if (ret) 124 *pmemory = &iobj->memory;
59 return ret;
60 125
61 /* map bar */ 126 nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
62 if (nv_device_resource_len(device, 2)) 127 iobj->imem = imem;
63 bar = 2;
64 else
65 bar = 3;
66 128
67 priv->iomem = ioremap(nv_device_resource_start(device, bar), 129 mutex_lock(&imem->base.subdev.mutex);
68 nv_device_resource_len(device, bar)); 130 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
69 if (!priv->iomem) { 131 align ? align : 1, &iobj->node);
70 nv_error(priv, "unable to map PRAMIN BAR\n"); 132 mutex_unlock(&imem->base.subdev.mutex);
71 return -EFAULT; 133 return ret;
72 } 134}
135
136/******************************************************************************
137 * instmem subdev implementation
138 *****************************************************************************/
139
140static u32
141nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr)
142{
143 return ioread32_native(nv40_instmem(base)->iomem + addr);
144}
145
146static void
147nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data)
148{
149 iowrite32_native(data, nv40_instmem(base)->iomem + addr);
150}
151
152static int
153nv40_instmem_oneinit(struct nvkm_instmem *base)
154{
155 struct nv40_instmem *imem = nv40_instmem(base);
156 struct nvkm_device *device = imem->base.subdev.device;
157 int ret, vs;
73 158
74 /* PRAMIN aperture maps over the end of vram, reserve enough space 159 /* PRAMIN aperture maps over the end of vram, reserve enough space
75 * to fit graphics contexts for every channel, the magics come 160 * to fit graphics contexts for every channel, the magics come
76 * from engine/gr/nv40.c 161 * from engine/gr/nv40.c
77 */ 162 */
78 vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8); 163 vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
79 if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs; 164 if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
80 else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs; 165 else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
81 else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs; 166 else if (nv44_gr_class(device)) imem->base.reserved = 0x4980 * vs;
82 else priv->base.reserved = 0x4a40 * vs; 167 else imem->base.reserved = 0x4a40 * vs;
83 priv->base.reserved += 16 * 1024; 168 imem->base.reserved += 16 * 1024;
84 priv->base.reserved *= 32; /* per-channel */ 169 imem->base.reserved *= 32; /* per-channel */
85 priv->base.reserved += 512 * 1024; /* pci(e)gart table */ 170 imem->base.reserved += 512 * 1024; /* pci(e)gart table */
86 priv->base.reserved += 512 * 1024; /* object storage */ 171 imem->base.reserved += 512 * 1024; /* object storage */
87 172 imem->base.reserved = round_up(imem->base.reserved, 4096);
88 priv->base.reserved = round_up(priv->base.reserved, 4096); 173
89 174 ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
90 ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
91 if (ret) 175 if (ret)
92 return ret; 176 return ret;
93 177
94 /* 0x00000-0x10000: reserve for probable vbios image */ 178 /* 0x00000-0x10000: reserve for probable vbios image */
95 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, 179 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
96 &priv->vbios); 180 &imem->base.vbios);
97 if (ret) 181 if (ret)
98 return ret; 182 return ret;
99 183
100 /* 0x10000-0x18000: reserve for RAMHT */ 184 /* 0x10000-0x18000: reserve for RAMHT */
101 ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); 185 ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
102 if (ret) 186 if (ret)
103 return ret; 187 return ret;
104 188
105 /* 0x18000-0x18200: reserve for RAMRO 189 /* 0x18000-0x18200: reserve for RAMRO
106 * 0x18200-0x20000: padding 190 * 0x18200-0x20000: padding
107 */ 191 */
108 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0, 192 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
109 &priv->ramro); 193 &imem->base.ramro);
110 if (ret) 194 if (ret)
111 return ret; 195 return ret;
112 196
113 /* 0x20000-0x21000: reserve for RAMFC 197 /* 0x20000-0x21000: reserve for RAMFC
114 * 0x21000-0x40000: padding and some unknown crap 198 * 0x21000-0x40000: padding and some unknown crap
115 */ 199 */
116 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, 200 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
117 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); 201 &imem->base.ramfc);
118 if (ret) 202 if (ret)
119 return ret; 203 return ret;
120 204
121 return 0; 205 return 0;
122} 206}
123 207
124struct nvkm_oclass * 208static void *
125nv40_instmem_oclass = &(struct nvkm_instmem_impl) { 209nv40_instmem_dtor(struct nvkm_instmem *base)
126 .base.handle = NV_SUBDEV(INSTMEM, 0x40), 210{
127 .base.ofuncs = &(struct nvkm_ofuncs) { 211 struct nv40_instmem *imem = nv40_instmem(base);
128 .ctor = nv40_instmem_ctor, 212 nvkm_memory_del(&imem->base.ramfc);
129 .dtor = nv04_instmem_dtor, 213 nvkm_memory_del(&imem->base.ramro);
130 .init = _nvkm_instmem_init, 214 nvkm_ramht_del(&imem->base.ramht);
131 .fini = _nvkm_instmem_fini, 215 nvkm_memory_del(&imem->base.vbios);
132 .rd32 = nv40_instmem_rd32, 216 nvkm_mm_fini(&imem->heap);
133 .wr32 = nv40_instmem_wr32, 217 if (imem->iomem)
134 }, 218 iounmap(imem->iomem);
135 .instobj = &nv04_instobj_oclass.base, 219 return imem;
136}.base; 220}
221
222static const struct nvkm_instmem_func
223nv40_instmem = {
224 .dtor = nv40_instmem_dtor,
225 .oneinit = nv40_instmem_oneinit,
226 .rd32 = nv40_instmem_rd32,
227 .wr32 = nv40_instmem_wr32,
228 .memory_new = nv40_instobj_new,
229 .persistent = false,
230 .zero = false,
231};
232
233int
234nv40_instmem_new(struct nvkm_device *device, int index,
235 struct nvkm_instmem **pimem)
236{
237 struct nv40_instmem *imem;
238 int bar;
239
240 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
241 return -ENOMEM;
242 nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base);
243 *pimem = &imem->base;
244
245 /* map bar */
246 if (device->func->resource_size(device, 2))
247 bar = 2;
248 else
249 bar = 3;
250
251 imem->iomem = ioremap(device->func->resource_addr(device, bar),
252 device->func->resource_size(device, bar));
253 if (!imem->iomem) {
254 nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
255 return -EFAULT;
256 }
257
258 return 0;
259}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 8404143f93ee..6d512c062ae3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -21,149 +21,229 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
24#include "priv.h" 25#include "priv.h"
25 26
27#include <core/memory.h>
28#include <subdev/bar.h>
26#include <subdev/fb.h> 29#include <subdev/fb.h>
30#include <subdev/mmu.h>
27 31
28struct nv50_instmem_priv { 32struct nv50_instmem {
29 struct nvkm_instmem base; 33 struct nvkm_instmem base;
34 unsigned long lock_flags;
30 spinlock_t lock; 35 spinlock_t lock;
31 u64 addr; 36 u64 addr;
32}; 37};
33 38
34struct nv50_instobj_priv {
35 struct nvkm_instobj base;
36 struct nvkm_mem *mem;
37};
38
39/****************************************************************************** 39/******************************************************************************
40 * instmem object implementation 40 * instmem object implementation
41 *****************************************************************************/ 41 *****************************************************************************/
42#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
42 43
43static u32 44struct nv50_instobj {
44nv50_instobj_rd32(struct nvkm_object *object, u64 offset) 45 struct nvkm_memory memory;
46 struct nv50_instmem *imem;
47 struct nvkm_mem *mem;
48 struct nvkm_vma bar;
49 void *map;
50};
51
52static enum nvkm_memory_target
53nv50_instobj_target(struct nvkm_memory *memory)
54{
55 return NVKM_MEM_TARGET_VRAM;
56}
57
58static u64
59nv50_instobj_addr(struct nvkm_memory *memory)
60{
61 return nv50_instobj(memory)->mem->offset;
62}
63
64static u64
65nv50_instobj_size(struct nvkm_memory *memory)
66{
67 return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
68}
69
70static void
71nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
72{
73 struct nv50_instobj *iobj = nv50_instobj(memory);
74 struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
75 struct nvkm_device *device = subdev->device;
76 u64 size = nvkm_memory_size(memory);
77 void __iomem *map;
78 int ret;
79
80 iobj->map = ERR_PTR(-ENOMEM);
81
82 ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
83 if (ret == 0) {
84 map = ioremap(device->func->resource_addr(device, 3) +
85 (u32)iobj->bar.offset, size);
86 if (map) {
87 nvkm_memory_map(memory, &iobj->bar, 0);
88 iobj->map = map;
89 } else {
90 nvkm_warn(subdev, "PRAMIN ioremap failed\n");
91 nvkm_vm_put(&iobj->bar);
92 }
93 } else {
94 nvkm_warn(subdev, "PRAMIN exhausted\n");
95 }
96}
97
98static void
99nv50_instobj_release(struct nvkm_memory *memory)
45{ 100{
46 struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object); 101 struct nv50_instmem *imem = nv50_instobj(memory)->imem;
47 struct nv50_instobj_priv *node = (void *)object; 102 spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
103}
104
105static void __iomem *
106nv50_instobj_acquire(struct nvkm_memory *memory)
107{
108 struct nv50_instobj *iobj = nv50_instobj(memory);
109 struct nv50_instmem *imem = iobj->imem;
110 struct nvkm_bar *bar = imem->base.subdev.device->bar;
111 struct nvkm_vm *vm;
48 unsigned long flags; 112 unsigned long flags;
49 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; 113
50 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; 114 if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
115 nvkm_memory_boot(memory, vm);
116 if (!IS_ERR_OR_NULL(iobj->map))
117 return iobj->map;
118
119 spin_lock_irqsave(&imem->lock, flags);
120 imem->lock_flags = flags;
121 return NULL;
122}
123
124static u32
125nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
126{
127 struct nv50_instobj *iobj = nv50_instobj(memory);
128 struct nv50_instmem *imem = iobj->imem;
129 struct nvkm_device *device = imem->base.subdev.device;
130 u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
131 u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
51 u32 data; 132 u32 data;
52 133
53 spin_lock_irqsave(&priv->lock, flags); 134 if (unlikely(imem->addr != base)) {
54 if (unlikely(priv->addr != base)) { 135 nvkm_wr32(device, 0x001700, base >> 16);
55 nv_wr32(priv, 0x001700, base >> 16); 136 imem->addr = base;
56 priv->addr = base;
57 } 137 }
58 data = nv_rd32(priv, 0x700000 + addr); 138 data = nvkm_rd32(device, 0x700000 + addr);
59 spin_unlock_irqrestore(&priv->lock, flags);
60 return data; 139 return data;
61} 140}
62 141
63static void 142static void
64nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data) 143nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
65{ 144{
66 struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object); 145 struct nv50_instobj *iobj = nv50_instobj(memory);
67 struct nv50_instobj_priv *node = (void *)object; 146 struct nv50_instmem *imem = iobj->imem;
68 unsigned long flags; 147 struct nvkm_device *device = imem->base.subdev.device;
69 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; 148 u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
70 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; 149 u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
71 150
72 spin_lock_irqsave(&priv->lock, flags); 151 if (unlikely(imem->addr != base)) {
73 if (unlikely(priv->addr != base)) { 152 nvkm_wr32(device, 0x001700, base >> 16);
74 nv_wr32(priv, 0x001700, base >> 16); 153 imem->addr = base;
75 priv->addr = base;
76 } 154 }
77 nv_wr32(priv, 0x700000 + addr, data); 155 nvkm_wr32(device, 0x700000 + addr, data);
78 spin_unlock_irqrestore(&priv->lock, flags);
79} 156}
80 157
81static void 158static void
82nv50_instobj_dtor(struct nvkm_object *object) 159nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
83{ 160{
84 struct nv50_instobj_priv *node = (void *)object; 161 struct nv50_instobj *iobj = nv50_instobj(memory);
85 struct nvkm_fb *pfb = nvkm_fb(object); 162 nvkm_vm_map_at(vma, offset, iobj->mem);
86 pfb->ram->put(pfb, &node->mem);
87 nvkm_instobj_destroy(&node->base);
88} 163}
89 164
165static void *
166nv50_instobj_dtor(struct nvkm_memory *memory)
167{
168 struct nv50_instobj *iobj = nv50_instobj(memory);
169 struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
170 if (!IS_ERR_OR_NULL(iobj->map)) {
171 nvkm_vm_put(&iobj->bar);
172 iounmap(iobj->map);
173 }
174 ram->func->put(ram, &iobj->mem);
175 return iobj;
176}
177
178static const struct nvkm_memory_func
179nv50_instobj_func = {
180 .dtor = nv50_instobj_dtor,
181 .target = nv50_instobj_target,
182 .size = nv50_instobj_size,
183 .addr = nv50_instobj_addr,
184 .boot = nv50_instobj_boot,
185 .acquire = nv50_instobj_acquire,
186 .release = nv50_instobj_release,
187 .rd32 = nv50_instobj_rd32,
188 .wr32 = nv50_instobj_wr32,
189 .map = nv50_instobj_map,
190};
191
90static int 192static int
91nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 193nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
92 struct nvkm_oclass *oclass, void *data, u32 size, 194 struct nvkm_memory **pmemory)
93 struct nvkm_object **pobject)
94{ 195{
95 struct nvkm_fb *pfb = nvkm_fb(parent); 196 struct nv50_instmem *imem = nv50_instmem(base);
96 struct nvkm_instobj_args *args = data; 197 struct nv50_instobj *iobj;
97 struct nv50_instobj_priv *node; 198 struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
98 int ret; 199 int ret;
99 200
100 args->size = max((args->size + 4095) & ~4095, (u32)4096); 201 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
101 args->align = max((args->align + 4095) & ~4095, (u32)4096); 202 return -ENOMEM;
203 *pmemory = &iobj->memory;
102 204
103 ret = nvkm_instobj_create(parent, engine, oclass, &node); 205 nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
104 *pobject = nv_object(node); 206 iobj->imem = imem;
105 if (ret) 207
106 return ret; 208 size = max((size + 4095) & ~4095, (u32)4096);
209 align = max((align + 4095) & ~4095, (u32)4096);
107 210
108 ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem); 211 ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
109 if (ret) 212 if (ret)
110 return ret; 213 return ret;
111 214
112 node->base.addr = node->mem->offset; 215 iobj->mem->page_shift = 12;
113 node->base.size = node->mem->size << 12;
114 node->mem->page_shift = 12;
115 return 0; 216 return 0;
116} 217}
117 218
118static struct nvkm_instobj_impl
119nv50_instobj_oclass = {
120 .base.ofuncs = &(struct nvkm_ofuncs) {
121 .ctor = nv50_instobj_ctor,
122 .dtor = nv50_instobj_dtor,
123 .init = _nvkm_instobj_init,
124 .fini = _nvkm_instobj_fini,
125 .rd32 = nv50_instobj_rd32,
126 .wr32 = nv50_instobj_wr32,
127 },
128};
129
130/****************************************************************************** 219/******************************************************************************
131 * instmem subdev implementation 220 * instmem subdev implementation
132 *****************************************************************************/ 221 *****************************************************************************/
133 222
134static int 223static void
135nv50_instmem_fini(struct nvkm_object *object, bool suspend) 224nv50_instmem_fini(struct nvkm_instmem *base)
136{ 225{
137 struct nv50_instmem_priv *priv = (void *)object; 226 nv50_instmem(base)->addr = ~0ULL;
138 priv->addr = ~0ULL;
139 return nvkm_instmem_fini(&priv->base, suspend);
140} 227}
141 228
142static int 229static const struct nvkm_instmem_func
143nv50_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 230nv50_instmem = {
144 struct nvkm_oclass *oclass, void *data, u32 size, 231 .fini = nv50_instmem_fini,
145 struct nvkm_object **pobject) 232 .memory_new = nv50_instobj_new,
146{ 233 .persistent = false,
147 struct nv50_instmem_priv *priv; 234 .zero = false,
148 int ret; 235};
149 236
150 ret = nvkm_instmem_create(parent, engine, oclass, &priv); 237int
151 *pobject = nv_object(priv); 238nv50_instmem_new(struct nvkm_device *device, int index,
152 if (ret) 239 struct nvkm_instmem **pimem)
153 return ret; 240{
241 struct nv50_instmem *imem;
154 242
155 spin_lock_init(&priv->lock); 243 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
244 return -ENOMEM;
245 nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
246 spin_lock_init(&imem->lock);
247 *pimem = &imem->base;
156 return 0; 248 return 0;
157} 249}
158
159struct nvkm_oclass *
160nv50_instmem_oclass = &(struct nvkm_instmem_impl) {
161 .base.handle = NV_SUBDEV(INSTMEM, 0x50),
162 .base.ofuncs = &(struct nvkm_ofuncs) {
163 .ctor = nv50_instmem_ctor,
164 .dtor = _nvkm_instmem_dtor,
165 .init = _nvkm_instmem_init,
166 .fini = nv50_instmem_fini,
167 },
168 .instobj = &nv50_instobj_oclass.base,
169}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index b10e292e5607..ace4471864a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -1,54 +1,20 @@
1#ifndef __NVKM_INSTMEM_PRIV_H__ 1#ifndef __NVKM_INSTMEM_PRIV_H__
2#define __NVKM_INSTMEM_PRIV_H__ 2#define __NVKM_INSTMEM_PRIV_H__
3#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
3#include <subdev/instmem.h> 4#include <subdev/instmem.h>
4 5
5struct nvkm_instobj_impl { 6struct nvkm_instmem_func {
6 struct nvkm_oclass base; 7 void *(*dtor)(struct nvkm_instmem *);
8 int (*oneinit)(struct nvkm_instmem *);
9 void (*fini)(struct nvkm_instmem *);
10 u32 (*rd32)(struct nvkm_instmem *, u32 addr);
11 void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
12 int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align,
13 bool zero, struct nvkm_memory **);
14 bool persistent;
15 bool zero;
7}; 16};
8 17
9struct nvkm_instobj_args { 18void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
10 u32 size; 19 int index, struct nvkm_instmem *);
11 u32 align;
12};
13
14#define nvkm_instobj_create(p,e,o,d) \
15 nvkm_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
16#define nvkm_instobj_destroy(p) ({ \
17 struct nvkm_instobj *iobj = (p); \
18 _nvkm_instobj_dtor(nv_object(iobj)); \
19})
20#define nvkm_instobj_init(p) \
21 nvkm_object_init(&(p)->base)
22#define nvkm_instobj_fini(p,s) \
23 nvkm_object_fini(&(p)->base, (s))
24
25int nvkm_instobj_create_(struct nvkm_object *, struct nvkm_object *,
26 struct nvkm_oclass *, int, void **);
27void _nvkm_instobj_dtor(struct nvkm_object *);
28#define _nvkm_instobj_init nvkm_object_init
29#define _nvkm_instobj_fini nvkm_object_fini
30
31struct nvkm_instmem_impl {
32 struct nvkm_oclass base;
33 struct nvkm_oclass *instobj;
34};
35
36#define nvkm_instmem_create(p,e,o,d) \
37 nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
38#define nvkm_instmem_destroy(p) \
39 nvkm_subdev_destroy(&(p)->base)
40#define nvkm_instmem_init(p) ({ \
41 struct nvkm_instmem *imem = (p); \
42 _nvkm_instmem_init(nv_object(imem)); \
43})
44#define nvkm_instmem_fini(p,s) ({ \
45 struct nvkm_instmem *imem = (p); \
46 _nvkm_instmem_fini(nv_object(imem), (s)); \
47})
48
49int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *,
50 struct nvkm_oclass *, int, void **);
51#define _nvkm_instmem_dtor _nvkm_subdev_dtor
52int _nvkm_instmem_init(struct nvkm_object *);
53int _nvkm_instmem_fini(struct nvkm_object *, bool);
54#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 2fb87fbfd11c..930d25b6e63c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -23,102 +23,110 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26static int 26#include <subdev/fb.h>
27
28int
27nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode) 29nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
28{ 30{
29 struct nvkm_ltc_priv *priv = (void *)ltc; 31 int ret = nvkm_mm_head(&ltc->tags, 0, 1, n, n, 1, pnode);
30 int ret;
31
32 ret = nvkm_mm_head(&priv->tags, 0, 1, n, n, 1, pnode);
33 if (ret) 32 if (ret)
34 *pnode = NULL; 33 *pnode = NULL;
35
36 return ret; 34 return ret;
37} 35}
38 36
39static void 37void
40nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode) 38nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
41{ 39{
42 struct nvkm_ltc_priv *priv = (void *)ltc; 40 nvkm_mm_free(&ltc->tags, pnode);
43 nvkm_mm_free(&priv->tags, pnode);
44} 41}
45 42
46static void 43void
47nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count) 44nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
48{ 45{
49 const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
50 struct nvkm_ltc_priv *priv = (void *)ltc;
51 const u32 limit = first + count - 1; 46 const u32 limit = first + count - 1;
52 47
53 BUG_ON((first > limit) || (limit >= priv->num_tags)); 48 BUG_ON((first > limit) || (limit >= ltc->num_tags));
54 49
55 impl->cbc_clear(priv, first, limit); 50 ltc->func->cbc_clear(ltc, first, limit);
56 impl->cbc_wait(priv); 51 ltc->func->cbc_wait(ltc);
57} 52}
58 53
59static int 54int
60nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4]) 55nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4])
61{ 56{
62 const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc); 57 memcpy(ltc->zbc_color[index], color, sizeof(ltc->zbc_color[index]));
63 struct nvkm_ltc_priv *priv = (void *)ltc; 58 ltc->func->zbc_clear_color(ltc, index, color);
64 memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index]));
65 impl->zbc_clear_color(priv, index, color);
66 return index; 59 return index;
67} 60}
68 61
69static int 62int
70nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth) 63nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
71{ 64{
72 const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc); 65 ltc->zbc_depth[index] = depth;
73 struct nvkm_ltc_priv *priv = (void *)ltc; 66 ltc->func->zbc_clear_depth(ltc, index, depth);
74 priv->zbc_depth[index] = depth;
75 impl->zbc_clear_depth(priv, index, depth);
76 return index; 67 return index;
77} 68}
78 69
79int 70static void
80_nvkm_ltc_init(struct nvkm_object *object) 71nvkm_ltc_intr(struct nvkm_subdev *subdev)
81{ 72{
82 const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object); 73 struct nvkm_ltc *ltc = nvkm_ltc(subdev);
83 struct nvkm_ltc_priv *priv = (void *)object; 74 ltc->func->intr(ltc);
84 int ret, i; 75}
85 76
86 ret = nvkm_subdev_init(&priv->base.base); 77static int
87 if (ret) 78nvkm_ltc_oneinit(struct nvkm_subdev *subdev)
88 return ret; 79{
80 struct nvkm_ltc *ltc = nvkm_ltc(subdev);
81 return ltc->func->oneinit(ltc);
82}
83
84static int
85nvkm_ltc_init(struct nvkm_subdev *subdev)
86{
87 struct nvkm_ltc *ltc = nvkm_ltc(subdev);
88 int i;
89 89
90 for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) { 90 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
91 impl->zbc_clear_color(priv, i, priv->zbc_color[i]); 91 ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
92 impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]); 92 ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
93 } 93 }
94 94
95 ltc->func->init(ltc);
95 return 0; 96 return 0;
96} 97}
97 98
99static void *
100nvkm_ltc_dtor(struct nvkm_subdev *subdev)
101{
102 struct nvkm_ltc *ltc = nvkm_ltc(subdev);
103 struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
104 nvkm_mm_fini(&ltc->tags);
105 if (ram)
106 nvkm_mm_free(&ram->vram, &ltc->tag_ram);
107 return ltc;
108}
109
110static const struct nvkm_subdev_func
111nvkm_ltc = {
112 .dtor = nvkm_ltc_dtor,
113 .oneinit = nvkm_ltc_oneinit,
114 .init = nvkm_ltc_init,
115 .intr = nvkm_ltc_intr,
116};
117
98int 118int
99nvkm_ltc_create_(struct nvkm_object *parent, struct nvkm_object *engine, 119nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
100 struct nvkm_oclass *oclass, int length, void **pobject) 120 int index, struct nvkm_ltc **pltc)
101{ 121{
102 const struct nvkm_ltc_impl *impl = (void *)oclass; 122 struct nvkm_ltc *ltc;
103 struct nvkm_ltc_priv *priv;
104 int ret;
105 123
106 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PLTCG", 124 if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
107 "l2c", length, pobject); 125 return -ENOMEM;
108 priv = *pobject; 126
109 if (ret) 127 nvkm_subdev_ctor(&nvkm_ltc, device, index, 0, &ltc->subdev);
110 return ret; 128 ltc->func = func;
111 129 ltc->zbc_min = 1; /* reserve 0 for disabled */
112 memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color)); 130 ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
113 memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth));
114
115 priv->base.base.intr = impl->intr;
116 priv->base.tags_alloc = nvkm_ltc_tags_alloc;
117 priv->base.tags_free = nvkm_ltc_tags_free;
118 priv->base.tags_clear = nvkm_ltc_tags_clear;
119 priv->base.zbc_min = 1; /* reserve 0 for disabled */
120 priv->base.zbc_max = min(impl->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
121 priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
122 priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
123 return 0; 131 return 0;
124} 132}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 7fb5ea0314cb..45ac765b753e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -28,38 +28,47 @@
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29 29
30void 30void
31gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit) 31gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
32{ 32{
33 nv_wr32(priv, 0x17e8cc, start); 33 struct nvkm_device *device = ltc->subdev.device;
34 nv_wr32(priv, 0x17e8d0, limit); 34 nvkm_wr32(device, 0x17e8cc, start);
35 nv_wr32(priv, 0x17e8c8, 0x00000004); 35 nvkm_wr32(device, 0x17e8d0, limit);
36 nvkm_wr32(device, 0x17e8c8, 0x00000004);
36} 37}
37 38
38void 39void
39gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv) 40gf100_ltc_cbc_wait(struct nvkm_ltc *ltc)
40{ 41{
42 struct nvkm_device *device = ltc->subdev.device;
41 int c, s; 43 int c, s;
42 for (c = 0; c < priv->ltc_nr; c++) { 44 for (c = 0; c < ltc->ltc_nr; c++) {
43 for (s = 0; s < priv->lts_nr; s++) 45 for (s = 0; s < ltc->lts_nr; s++) {
44 nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0); 46 const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400);
47 nvkm_msec(device, 2000,
48 if (!nvkm_rd32(device, addr))
49 break;
50 );
51 }
45 } 52 }
46} 53}
47 54
48void 55void
49gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4]) 56gf100_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
50{ 57{
51 nv_mask(priv, 0x17ea44, 0x0000000f, i); 58 struct nvkm_device *device = ltc->subdev.device;
52 nv_wr32(priv, 0x17ea48, color[0]); 59 nvkm_mask(device, 0x17ea44, 0x0000000f, i);
53 nv_wr32(priv, 0x17ea4c, color[1]); 60 nvkm_wr32(device, 0x17ea48, color[0]);
54 nv_wr32(priv, 0x17ea50, color[2]); 61 nvkm_wr32(device, 0x17ea4c, color[1]);
55 nv_wr32(priv, 0x17ea54, color[3]); 62 nvkm_wr32(device, 0x17ea50, color[2]);
63 nvkm_wr32(device, 0x17ea54, color[3]);
56} 64}
57 65
58void 66void
59gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth) 67gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
60{ 68{
61 nv_mask(priv, 0x17ea44, 0x0000000f, i); 69 struct nvkm_device *device = ltc->subdev.device;
62 nv_wr32(priv, 0x17ea58, depth); 70 nvkm_mask(device, 0x17ea44, 0x0000000f, i);
71 nvkm_wr32(device, 0x17ea58, depth);
63} 72}
64 73
65static const struct nvkm_bitfield 74static const struct nvkm_bitfield
@@ -81,88 +90,60 @@ gf100_ltc_lts_intr_name[] = {
81}; 90};
82 91
83static void 92static void
84gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts) 93gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s)
85{ 94{
86 u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400); 95 struct nvkm_subdev *subdev = &ltc->subdev;
87 u32 intr = nv_rd32(priv, base + 0x020); 96 struct nvkm_device *device = subdev->device;
97 u32 base = 0x141000 + (c * 0x2000) + (s * 0x400);
98 u32 intr = nvkm_rd32(device, base + 0x020);
88 u32 stat = intr & 0x0000ffff; 99 u32 stat = intr & 0x0000ffff;
100 char msg[128];
89 101
90 if (stat) { 102 if (stat) {
91 nv_info(priv, "LTC%d_LTS%d:", ltc, lts); 103 nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
92 nvkm_bitfield_print(gf100_ltc_lts_intr_name, stat); 104 nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg);
93 pr_cont("\n");
94 } 105 }
95 106
96 nv_wr32(priv, base + 0x020, intr); 107 nvkm_wr32(device, base + 0x020, intr);
97} 108}
98 109
99void 110void
100gf100_ltc_intr(struct nvkm_subdev *subdev) 111gf100_ltc_intr(struct nvkm_ltc *ltc)
101{ 112{
102 struct nvkm_ltc_priv *priv = (void *)subdev; 113 struct nvkm_device *device = ltc->subdev.device;
103 u32 mask; 114 u32 mask;
104 115
105 mask = nv_rd32(priv, 0x00017c); 116 mask = nvkm_rd32(device, 0x00017c);
106 while (mask) { 117 while (mask) {
107 u32 lts, ltc = __ffs(mask); 118 u32 s, c = __ffs(mask);
108 for (lts = 0; lts < priv->lts_nr; lts++) 119 for (s = 0; s < ltc->lts_nr; s++)
109 gf100_ltc_lts_intr(priv, ltc, lts); 120 gf100_ltc_lts_intr(ltc, c, s);
110 mask &= ~(1 << ltc); 121 mask &= ~(1 << c);
111 } 122 }
112} 123}
113 124
114static int
115gf100_ltc_init(struct nvkm_object *object)
116{
117 struct nvkm_ltc_priv *priv = (void *)object;
118 u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
119 int ret;
120
121 ret = nvkm_ltc_init(priv);
122 if (ret)
123 return ret;
124
125 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
126 nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
127 nv_wr32(priv, 0x17e8d4, priv->tag_base);
128 nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
129 return 0;
130}
131
132void
133gf100_ltc_dtor(struct nvkm_object *object)
134{
135 struct nvkm_fb *pfb = nvkm_fb(object);
136 struct nvkm_ltc_priv *priv = (void *)object;
137
138 nvkm_mm_fini(&priv->tags);
139 if (pfb->ram)
140 nvkm_mm_free(&pfb->vram, &priv->tag_ram);
141
142 nvkm_ltc_destroy(priv);
143}
144
145/* TODO: Figure out tag memory details and drop the over-cautious allocation. 125/* TODO: Figure out tag memory details and drop the over-cautious allocation.
146 */ 126 */
147int 127int
148gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv) 128gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
149{ 129{
130 struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
150 u32 tag_size, tag_margin, tag_align; 131 u32 tag_size, tag_margin, tag_align;
151 int ret; 132 int ret;
152 133
153 /* No VRAM, no tags for now. */ 134 /* No VRAM, no tags for now. */
154 if (!pfb->ram) { 135 if (!ram) {
155 priv->num_tags = 0; 136 ltc->num_tags = 0;
156 goto mm_init; 137 goto mm_init;
157 } 138 }
158 139
159 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 140 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
160 priv->num_tags = (pfb->ram->size >> 17) / 4; 141 ltc->num_tags = (ram->size >> 17) / 4;
161 if (priv->num_tags > (1 << 17)) 142 if (ltc->num_tags > (1 << 17))
162 priv->num_tags = 1 << 17; /* we have 17 bits in PTE */ 143 ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
163 priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */ 144 ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
164 145
165 tag_align = priv->ltc_nr * 0x800; 146 tag_align = ltc->ltc_nr * 0x800;
166 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align; 147 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
167 148
168 /* 4 part 4 sub: 0x2000 bytes for 56 tags */ 149 /* 4 part 4 sub: 0x2000 bytes for 56 tags */
@@ -173,72 +154,71 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
173 * 154 *
174 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %. 155 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
175 */ 156 */
176 tag_size = (priv->num_tags / 64) * 0x6000 + tag_margin; 157 tag_size = (ltc->num_tags / 64) * 0x6000 + tag_margin;
177 tag_size += tag_align; 158 tag_size += tag_align;
178 tag_size = (tag_size + 0xfff) >> 12; /* round up */ 159 tag_size = (tag_size + 0xfff) >> 12; /* round up */
179 160
180 ret = nvkm_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1, 161 ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
181 &priv->tag_ram); 162 &ltc->tag_ram);
182 if (ret) { 163 if (ret) {
183 priv->num_tags = 0; 164 ltc->num_tags = 0;
184 } else { 165 } else {
185 u64 tag_base = ((u64)priv->tag_ram->offset << 12) + tag_margin; 166 u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
186 167
187 tag_base += tag_align - 1; 168 tag_base += tag_align - 1;
188 ret = do_div(tag_base, tag_align); 169 do_div(tag_base, tag_align);
189 170
190 priv->tag_base = tag_base; 171 ltc->tag_base = tag_base;
191 } 172 }
192 173
193mm_init: 174mm_init:
194 ret = nvkm_mm_init(&priv->tags, 0, priv->num_tags, 1); 175 return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1);
195 return ret;
196} 176}
197 177
198int 178int
199gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 179gf100_ltc_oneinit(struct nvkm_ltc *ltc)
200 struct nvkm_oclass *oclass, void *data, u32 size,
201 struct nvkm_object **pobject)
202{ 180{
203 struct nvkm_fb *pfb = nvkm_fb(parent); 181 struct nvkm_device *device = ltc->subdev.device;
204 struct nvkm_ltc_priv *priv; 182 const u32 parts = nvkm_rd32(device, 0x022438);
205 u32 parts, mask; 183 const u32 mask = nvkm_rd32(device, 0x022554);
206 int ret, i; 184 const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28;
207 185 int i;
208 ret = nvkm_ltc_create(parent, engine, oclass, &priv); 186
209 *pobject = nv_object(priv);
210 if (ret)
211 return ret;
212
213 parts = nv_rd32(priv, 0x022438);
214 mask = nv_rd32(priv, 0x022554);
215 for (i = 0; i < parts; i++) { 187 for (i = 0; i < parts; i++) {
216 if (!(mask & (1 << i))) 188 if (!(mask & (1 << i)))
217 priv->ltc_nr++; 189 ltc->ltc_nr++;
218 } 190 }
219 priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28; 191 ltc->lts_nr = slice;
192
193 return gf100_ltc_oneinit_tag_ram(ltc);
194}
220 195
221 ret = gf100_ltc_init_tag_ram(pfb, priv); 196static void
222 if (ret) 197gf100_ltc_init(struct nvkm_ltc *ltc)
223 return ret; 198{
199 struct nvkm_device *device = ltc->subdev.device;
200 u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
224 201
225 nv_subdev(priv)->intr = gf100_ltc_intr; 202 nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
226 return 0; 203 nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
204 nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
205 nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
227} 206}
228 207
229struct nvkm_oclass * 208static const struct nvkm_ltc_func
230gf100_ltc_oclass = &(struct nvkm_ltc_impl) { 209gf100_ltc = {
231 .base.handle = NV_SUBDEV(LTC, 0xc0), 210 .oneinit = gf100_ltc_oneinit,
232 .base.ofuncs = &(struct nvkm_ofuncs) { 211 .init = gf100_ltc_init,
233 .ctor = gf100_ltc_ctor,
234 .dtor = gf100_ltc_dtor,
235 .init = gf100_ltc_init,
236 .fini = _nvkm_ltc_fini,
237 },
238 .intr = gf100_ltc_intr, 212 .intr = gf100_ltc_intr,
239 .cbc_clear = gf100_ltc_cbc_clear, 213 .cbc_clear = gf100_ltc_cbc_clear,
240 .cbc_wait = gf100_ltc_cbc_wait, 214 .cbc_wait = gf100_ltc_cbc_wait,
241 .zbc = 16, 215 .zbc = 16,
242 .zbc_clear_color = gf100_ltc_zbc_clear_color, 216 .zbc_clear_color = gf100_ltc_zbc_clear_color,
243 .zbc_clear_depth = gf100_ltc_zbc_clear_depth, 217 .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
244}.base; 218};
219
220int
221gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
222{
223 return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
224}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index d53959b5ec67..839e6b4c597b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -23,37 +23,32 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26static int 26static void
27gk104_ltc_init(struct nvkm_object *object) 27gk104_ltc_init(struct nvkm_ltc *ltc)
28{ 28{
29 struct nvkm_ltc_priv *priv = (void *)object; 29 struct nvkm_device *device = ltc->subdev.device;
30 u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001); 30 u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
31 int ret;
32 31
33 ret = nvkm_ltc_init(priv); 32 nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
34 if (ret) 33 nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
35 return ret; 34 nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
36 35 nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
37 nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
38 nv_wr32(priv, 0x17e000, priv->ltc_nr);
39 nv_wr32(priv, 0x17e8d4, priv->tag_base);
40 nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
41 return 0;
42} 36}
43 37
44struct nvkm_oclass * 38static const struct nvkm_ltc_func
45gk104_ltc_oclass = &(struct nvkm_ltc_impl) { 39gk104_ltc = {
46 .base.handle = NV_SUBDEV(LTC, 0xe4), 40 .oneinit = gf100_ltc_oneinit,
47 .base.ofuncs = &(struct nvkm_ofuncs) { 41 .init = gk104_ltc_init,
48 .ctor = gf100_ltc_ctor,
49 .dtor = gf100_ltc_dtor,
50 .init = gk104_ltc_init,
51 .fini = _nvkm_ltc_fini,
52 },
53 .intr = gf100_ltc_intr, 42 .intr = gf100_ltc_intr,
54 .cbc_clear = gf100_ltc_cbc_clear, 43 .cbc_clear = gf100_ltc_cbc_clear,
55 .cbc_wait = gf100_ltc_cbc_wait, 44 .cbc_wait = gf100_ltc_cbc_wait,
56 .zbc = 16, 45 .zbc = 16,
57 .zbc_clear_color = gf100_ltc_zbc_clear_color, 46 .zbc_clear_color = gf100_ltc_zbc_clear_color,
58 .zbc_clear_depth = gf100_ltc_zbc_clear_depth, 47 .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
59}.base; 48};
49
50int
51gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
52{
53 return nvkm_ltc_new_(&gk104_ltc, device, index, pltc);
54}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 6b3f6f4ce107..389331bb63ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -27,127 +27,121 @@
27#include <subdev/timer.h> 27#include <subdev/timer.h>
28 28
29static void 29static void
30gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit) 30gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
31{ 31{
32 nv_wr32(priv, 0x17e270, start); 32 struct nvkm_device *device = ltc->subdev.device;
33 nv_wr32(priv, 0x17e274, limit); 33 nvkm_wr32(device, 0x17e270, start);
34 nv_wr32(priv, 0x17e26c, 0x00000004); 34 nvkm_wr32(device, 0x17e274, limit);
35 nvkm_wr32(device, 0x17e26c, 0x00000004);
35} 36}
36 37
37static void 38static void
38gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv) 39gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
39{ 40{
41 struct nvkm_device *device = ltc->subdev.device;
40 int c, s; 42 int c, s;
41 for (c = 0; c < priv->ltc_nr; c++) { 43 for (c = 0; c < ltc->ltc_nr; c++) {
42 for (s = 0; s < priv->lts_nr; s++) 44 for (s = 0; s < ltc->lts_nr; s++) {
43 nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0); 45 const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200);
46 nvkm_msec(device, 2000,
47 if (!nvkm_rd32(device, addr))
48 break;
49 );
50 }
44 } 51 }
45} 52}
46 53
47static void 54static void
48gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4]) 55gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
49{ 56{
50 nv_mask(priv, 0x17e338, 0x0000000f, i); 57 struct nvkm_device *device = ltc->subdev.device;
51 nv_wr32(priv, 0x17e33c, color[0]); 58 nvkm_mask(device, 0x17e338, 0x0000000f, i);
52 nv_wr32(priv, 0x17e340, color[1]); 59 nvkm_wr32(device, 0x17e33c, color[0]);
53 nv_wr32(priv, 0x17e344, color[2]); 60 nvkm_wr32(device, 0x17e340, color[1]);
54 nv_wr32(priv, 0x17e348, color[3]); 61 nvkm_wr32(device, 0x17e344, color[2]);
62 nvkm_wr32(device, 0x17e348, color[3]);
55} 63}
56 64
57static void 65static void
58gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth) 66gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
59{ 67{
60 nv_mask(priv, 0x17e338, 0x0000000f, i); 68 struct nvkm_device *device = ltc->subdev.device;
61 nv_wr32(priv, 0x17e34c, depth); 69 nvkm_mask(device, 0x17e338, 0x0000000f, i);
70 nvkm_wr32(device, 0x17e34c, depth);
62} 71}
63 72
64static void 73static void
65gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts) 74gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
66{ 75{
67 u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400); 76 struct nvkm_subdev *subdev = &ltc->subdev;
68 u32 stat = nv_rd32(priv, base + 0x00c); 77 struct nvkm_device *device = subdev->device;
78 u32 base = 0x140000 + (c * 0x2000) + (s * 0x400);
79 u32 stat = nvkm_rd32(device, base + 0x00c);
69 80
70 if (stat) { 81 if (stat) {
71 nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat); 82 nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
72 nv_wr32(priv, base + 0x00c, stat); 83 nvkm_wr32(device, base + 0x00c, stat);
73 } 84 }
74} 85}
75 86
76static void 87static void
77gm107_ltc_intr(struct nvkm_subdev *subdev) 88gm107_ltc_intr(struct nvkm_ltc *ltc)
78{ 89{
79 struct nvkm_ltc_priv *priv = (void *)subdev; 90 struct nvkm_device *device = ltc->subdev.device;
80 u32 mask; 91 u32 mask;
81 92
82 mask = nv_rd32(priv, 0x00017c); 93 mask = nvkm_rd32(device, 0x00017c);
83 while (mask) { 94 while (mask) {
84 u32 lts, ltc = __ffs(mask); 95 u32 s, c = __ffs(mask);
85 for (lts = 0; lts < priv->lts_nr; lts++) 96 for (s = 0; s < ltc->lts_nr; s++)
86 gm107_ltc_lts_isr(priv, ltc, lts); 97 gm107_ltc_lts_isr(ltc, c, s);
87 mask &= ~(1 << ltc); 98 mask &= ~(1 << c);
88 } 99 }
89} 100}
90 101
91static int 102static int
92gm107_ltc_init(struct nvkm_object *object) 103gm107_ltc_oneinit(struct nvkm_ltc *ltc)
93{ 104{
94 struct nvkm_ltc_priv *priv = (void *)object; 105 struct nvkm_device *device = ltc->subdev.device;
95 u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001); 106 const u32 parts = nvkm_rd32(device, 0x022438);
96 int ret; 107 const u32 mask = nvkm_rd32(device, 0x021c14);
97 108 const u32 slice = nvkm_rd32(device, 0x17e280) >> 28;
98 ret = nvkm_ltc_init(priv); 109 int i;
99 if (ret)
100 return ret;
101
102 nv_wr32(priv, 0x17e27c, priv->ltc_nr);
103 nv_wr32(priv, 0x17e278, priv->tag_base);
104 nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
105 return 0;
106}
107 110
108static int
109gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
110 struct nvkm_oclass *oclass, void *data, u32 size,
111 struct nvkm_object **pobject)
112{
113 struct nvkm_fb *pfb = nvkm_fb(parent);
114 struct nvkm_ltc_priv *priv;
115 u32 parts, mask;
116 int ret, i;
117
118 ret = nvkm_ltc_create(parent, engine, oclass, &priv);
119 *pobject = nv_object(priv);
120 if (ret)
121 return ret;
122
123 parts = nv_rd32(priv, 0x022438);
124 mask = nv_rd32(priv, 0x021c14);
125 for (i = 0; i < parts; i++) { 111 for (i = 0; i < parts; i++) {
126 if (!(mask & (1 << i))) 112 if (!(mask & (1 << i)))
127 priv->ltc_nr++; 113 ltc->ltc_nr++;
128 } 114 }
129 priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28; 115 ltc->lts_nr = slice;
116
117 return gf100_ltc_oneinit_tag_ram(ltc);
118}
130 119
131 ret = gf100_ltc_init_tag_ram(pfb, priv); 120static void
132 if (ret) 121gm107_ltc_init(struct nvkm_ltc *ltc)
133 return ret; 122{
123 struct nvkm_device *device = ltc->subdev.device;
124 u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
134 125
135 return 0; 126 nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
127 nvkm_wr32(device, 0x17e278, ltc->tag_base);
128 nvkm_mask(device, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
136} 129}
137 130
138struct nvkm_oclass * 131static const struct nvkm_ltc_func
139gm107_ltc_oclass = &(struct nvkm_ltc_impl) { 132gm107_ltc = {
140 .base.handle = NV_SUBDEV(LTC, 0xff), 133 .oneinit = gm107_ltc_oneinit,
141 .base.ofuncs = &(struct nvkm_ofuncs) { 134 .init = gm107_ltc_init,
142 .ctor = gm107_ltc_ctor,
143 .dtor = gf100_ltc_dtor,
144 .init = gm107_ltc_init,
145 .fini = _nvkm_ltc_fini,
146 },
147 .intr = gm107_ltc_intr, 135 .intr = gm107_ltc_intr,
148 .cbc_clear = gm107_ltc_cbc_clear, 136 .cbc_clear = gm107_ltc_cbc_clear,
149 .cbc_wait = gm107_ltc_cbc_wait, 137 .cbc_wait = gm107_ltc_cbc_wait,
150 .zbc = 16, 138 .zbc = 16,
151 .zbc_clear_color = gm107_ltc_zbc_clear_color, 139 .zbc_clear_color = gm107_ltc_zbc_clear_color,
152 .zbc_clear_depth = gm107_ltc_zbc_clear_depth, 140 .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
153}.base; 141};
142
143int
144gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
145{
146 return nvkm_ltc_new_(&gm107_ltc, device, index, pltc);
147}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 09537d7b6783..4e05037cc99f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,69 +1,29 @@
1#ifndef __NVKM_LTC_PRIV_H__ 1#ifndef __NVKM_LTC_PRIV_H__
2#define __NVKM_LTC_PRIV_H__ 2#define __NVKM_LTC_PRIV_H__
3#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
3#include <subdev/ltc.h> 4#include <subdev/ltc.h>
4 5
5#include <core/mm.h> 6int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
6struct nvkm_fb; 7 int index, struct nvkm_ltc **);
7 8
8struct nvkm_ltc_priv { 9struct nvkm_ltc_func {
9 struct nvkm_ltc base; 10 int (*oneinit)(struct nvkm_ltc *);
10 u32 ltc_nr; 11 void (*init)(struct nvkm_ltc *);
11 u32 lts_nr; 12 void (*intr)(struct nvkm_ltc *);
12 13
13 u32 num_tags; 14 void (*cbc_clear)(struct nvkm_ltc *, u32 start, u32 limit);
14 u32 tag_base; 15 void (*cbc_wait)(struct nvkm_ltc *);
15 struct nvkm_mm tags;
16 struct nvkm_mm_node *tag_ram;
17
18 u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
19 u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
20};
21
22#define nvkm_ltc_create(p,e,o,d) \
23 nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d)
24#define nvkm_ltc_destroy(p) ({ \
25 struct nvkm_ltc_priv *_priv = (p); \
26 _nvkm_ltc_dtor(nv_object(_priv)); \
27})
28#define nvkm_ltc_init(p) ({ \
29 struct nvkm_ltc_priv *_priv = (p); \
30 _nvkm_ltc_init(nv_object(_priv)); \
31})
32#define nvkm_ltc_fini(p,s) ({ \
33 struct nvkm_ltc_priv *_priv = (p); \
34 _nvkm_ltc_fini(nv_object(_priv), (s)); \
35})
36
37int nvkm_ltc_create_(struct nvkm_object *, struct nvkm_object *,
38 struct nvkm_oclass *, int, void **);
39
40#define _nvkm_ltc_dtor _nvkm_subdev_dtor
41int _nvkm_ltc_init(struct nvkm_object *);
42#define _nvkm_ltc_fini _nvkm_subdev_fini
43
44int gf100_ltc_ctor(struct nvkm_object *, struct nvkm_object *,
45 struct nvkm_oclass *, void *, u32,
46 struct nvkm_object **);
47void gf100_ltc_dtor(struct nvkm_object *);
48int gf100_ltc_init_tag_ram(struct nvkm_fb *, struct nvkm_ltc_priv *);
49int gf100_ltc_tags_alloc(struct nvkm_ltc *, u32, struct nvkm_mm_node **);
50void gf100_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
51
52struct nvkm_ltc_impl {
53 struct nvkm_oclass base;
54 void (*intr)(struct nvkm_subdev *);
55
56 void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
57 void (*cbc_wait)(struct nvkm_ltc_priv *);
58 16
59 int zbc; 17 int zbc;
60 void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]); 18 void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]);
61 void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32); 19 void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32);
62}; 20};
63 21
64void gf100_ltc_intr(struct nvkm_subdev *); 22int gf100_ltc_oneinit(struct nvkm_ltc *);
65void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32); 23int gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *);
66void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *); 24void gf100_ltc_intr(struct nvkm_ltc *);
67void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]); 25void gf100_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
68void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32); 26void gf100_ltc_cbc_wait(struct nvkm_ltc *);
27void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
28void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
69#endif 29#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 721643f04bb5..bef325dcb4d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -1,11 +1,7 @@
1nvkm-y += nvkm/subdev/mc/base.o 1nvkm-y += nvkm/subdev/mc/base.o
2nvkm-y += nvkm/subdev/mc/nv04.o 2nvkm-y += nvkm/subdev/mc/nv04.o
3nvkm-y += nvkm/subdev/mc/nv40.o
4nvkm-y += nvkm/subdev/mc/nv44.o 3nvkm-y += nvkm/subdev/mc/nv44.o
5nvkm-y += nvkm/subdev/mc/nv4c.o
6nvkm-y += nvkm/subdev/mc/nv50.o 4nvkm-y += nvkm/subdev/mc/nv50.o
7nvkm-y += nvkm/subdev/mc/g94.o
8nvkm-y += nvkm/subdev/mc/g98.o 5nvkm-y += nvkm/subdev/mc/g98.o
9nvkm-y += nvkm/subdev/mc/gf100.o 6nvkm-y += nvkm/subdev/mc/gf100.o
10nvkm-y += nvkm/subdev/mc/gf106.o
11nvkm-y += nvkm/subdev/mc/gk20a.o 7nvkm-y += nvkm/subdev/mc/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 5b051a26653e..954fbbe56c4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -23,147 +23,101 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/device.h>
27#include <core/option.h> 26#include <core/option.h>
28 27
29static inline void 28void
30nvkm_mc_unk260(struct nvkm_mc *pmc, u32 data) 29nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
31{ 30{
32 const struct nvkm_mc_oclass *impl = (void *)nv_oclass(pmc); 31 if (mc->func->unk260)
33 if (impl->unk260) 32 mc->func->unk260(mc, data);
34 impl->unk260(pmc, data);
35} 33}
36 34
37static inline u32 35void
38nvkm_mc_intr_mask(struct nvkm_mc *pmc) 36nvkm_mc_intr_unarm(struct nvkm_mc *mc)
39{ 37{
40 u32 intr = nv_rd32(pmc, 0x000100); 38 return mc->func->intr_unarm(mc);
41 if (intr == 0xffffffff) /* likely fallen off the bus */
42 intr = 0x00000000;
43 return intr;
44} 39}
45 40
46static irqreturn_t 41void
47nvkm_mc_intr(int irq, void *arg) 42nvkm_mc_intr_rearm(struct nvkm_mc *mc)
48{ 43{
49 struct nvkm_mc *pmc = arg; 44 return mc->func->intr_rearm(mc);
50 const struct nvkm_mc_oclass *oclass = (void *)nv_object(pmc)->oclass; 45}
51 const struct nvkm_mc_intr *map = oclass->intr;
52 struct nvkm_subdev *unit;
53 u32 intr;
54 46
55 nv_wr32(pmc, 0x000140, 0x00000000); 47static u32
56 nv_rd32(pmc, 0x000140); 48nvkm_mc_intr_mask(struct nvkm_mc *mc)
57 intr = nvkm_mc_intr_mask(pmc); 49{
58 if (pmc->use_msi) 50 u32 intr = mc->func->intr_mask(mc);
59 oclass->msi_rearm(pmc); 51 if (WARN_ON_ONCE(intr == 0xffffffff))
52 intr = 0; /* likely fallen off the bus */
53 return intr;
54}
60 55
61 if (intr) { 56void
62 u32 stat = intr = nvkm_mc_intr_mask(pmc); 57nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
63 while (map->stat) { 58{
64 if (intr & map->stat) { 59 struct nvkm_device *device = mc->subdev.device;
65 unit = nvkm_subdev(pmc, map->unit); 60 struct nvkm_subdev *subdev;
66 if (unit && unit->intr) 61 const struct nvkm_mc_intr *map = mc->func->intr;
67 unit->intr(unit); 62 u32 stat, intr;
68 stat &= ~map->stat; 63
69 } 64 stat = intr = nvkm_mc_intr_mask(mc);
70 map++; 65 while (map->stat) {
66 if (intr & map->stat) {
67 subdev = nvkm_device_subdev(device, map->unit);
68 if (subdev)
69 nvkm_subdev_intr(subdev);
70 stat &= ~map->stat;
71 } 71 }
72 72 map++;
73 if (stat)
74 nv_error(pmc, "unknown intr 0x%08x\n", stat);
75 } 73 }
76 74
77 nv_wr32(pmc, 0x000140, 0x00000001); 75 if (stat)
78 return intr ? IRQ_HANDLED : IRQ_NONE; 76 nvkm_error(&mc->subdev, "intr %08x\n", stat);
77 *handled = intr != 0;
79} 78}
80 79
81int 80static int
82_nvkm_mc_fini(struct nvkm_object *object, bool suspend) 81nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
83{ 82{
84 struct nvkm_mc *pmc = (void *)object; 83 struct nvkm_mc *mc = nvkm_mc(subdev);
85 nv_wr32(pmc, 0x000140, 0x00000000); 84 nvkm_mc_intr_unarm(mc);
86 return nvkm_subdev_fini(&pmc->base, suspend); 85 return 0;
87} 86}
88 87
89int 88static int
90_nvkm_mc_init(struct nvkm_object *object) 89nvkm_mc_init(struct nvkm_subdev *subdev)
91{ 90{
92 struct nvkm_mc *pmc = (void *)object; 91 struct nvkm_mc *mc = nvkm_mc(subdev);
93 int ret = nvkm_subdev_init(&pmc->base); 92 if (mc->func->init)
94 if (ret) 93 mc->func->init(mc);
95 return ret; 94 nvkm_mc_intr_rearm(mc);
96 nv_wr32(pmc, 0x000140, 0x00000001);
97 return 0; 95 return 0;
98} 96}
99 97
100void 98static void *
101_nvkm_mc_dtor(struct nvkm_object *object) 99nvkm_mc_dtor(struct nvkm_subdev *subdev)
102{ 100{
103 struct nvkm_device *device = nv_device(object); 101 return nvkm_mc(subdev);
104 struct nvkm_mc *pmc = (void *)object;
105 free_irq(pmc->irq, pmc);
106 if (pmc->use_msi)
107 pci_disable_msi(device->pdev);
108 nvkm_subdev_destroy(&pmc->base);
109} 102}
110 103
104static const struct nvkm_subdev_func
105nvkm_mc = {
106 .dtor = nvkm_mc_dtor,
107 .init = nvkm_mc_init,
108 .fini = nvkm_mc_fini,
109};
110
111int 111int
112nvkm_mc_create_(struct nvkm_object *parent, struct nvkm_object *engine, 112nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
113 struct nvkm_oclass *bclass, int length, void **pobject) 113 int index, struct nvkm_mc **pmc)
114{ 114{
115 const struct nvkm_mc_oclass *oclass = (void *)bclass; 115 struct nvkm_mc *mc;
116 struct nvkm_device *device = nv_device(parent);
117 struct nvkm_mc *pmc;
118 int ret;
119
120 ret = nvkm_subdev_create_(parent, engine, bclass, 0, "PMC",
121 "master", length, pobject);
122 pmc = *pobject;
123 if (ret)
124 return ret;
125
126 pmc->unk260 = nvkm_mc_unk260;
127
128 if (nv_device_is_pci(device)) {
129 switch (device->pdev->device & 0x0ff0) {
130 case 0x00f0:
131 case 0x02e0:
132 /* BR02? NFI how these would be handled yet exactly */
133 break;
134 default:
135 switch (device->chipset) {
136 case 0xaa:
137 /* reported broken, nv also disable it */
138 break;
139 default:
140 pmc->use_msi = true;
141 break;
142 }
143 }
144
145 pmc->use_msi = nvkm_boolopt(device->cfgopt, "NvMSI",
146 pmc->use_msi);
147
148 if (pmc->use_msi && oclass->msi_rearm) {
149 pmc->use_msi = pci_enable_msi(device->pdev) == 0;
150 if (pmc->use_msi) {
151 nv_info(pmc, "MSI interrupts enabled\n");
152 oclass->msi_rearm(pmc);
153 }
154 } else {
155 pmc->use_msi = false;
156 }
157 }
158
159 ret = nv_device_get_irq(device, true);
160 if (ret < 0)
161 return ret;
162 pmc->irq = ret;
163 116
164 ret = request_irq(pmc->irq, nvkm_mc_intr, IRQF_SHARED, "nvkm", pmc); 117 if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
165 if (ret < 0) 118 return -ENOMEM;
166 return ret;
167 119
120 nvkm_subdev_ctor(&nvkm_mc, device, index, 0, &mc->subdev);
121 mc->func = func;
168 return 0; 122 return 0;
169} 123}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 8ab7f1272a14..7344ad659105 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -21,38 +21,40 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25 25
26static const struct nvkm_mc_intr 26static const struct nvkm_mc_intr
27g98_mc_intr[] = { 27g98_mc_intr[] = {
28 { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */ 28 { 0x04000000, NVKM_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */
29 { 0x00000001, NVDEV_ENGINE_MSPPP }, 29 { 0x00000001, NVKM_ENGINE_MSPPP },
30 { 0x00000100, NVDEV_ENGINE_FIFO }, 30 { 0x00000100, NVKM_ENGINE_FIFO },
31 { 0x00001000, NVDEV_ENGINE_GR }, 31 { 0x00001000, NVKM_ENGINE_GR },
32 { 0x00004000, NVDEV_ENGINE_SEC }, /* NV84:NVA3 */ 32 { 0x00004000, NVKM_ENGINE_SEC }, /* NV84:NVA3 */
33 { 0x00008000, NVDEV_ENGINE_MSVLD }, 33 { 0x00008000, NVKM_ENGINE_MSVLD },
34 { 0x00020000, NVDEV_ENGINE_MSPDEC }, 34 { 0x00020000, NVKM_ENGINE_MSPDEC },
35 { 0x00040000, NVDEV_SUBDEV_PMU }, /* NVA3:NVC0 */ 35 { 0x00040000, NVKM_SUBDEV_PMU }, /* NVA3:NVC0 */
36 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */ 36 { 0x00080000, NVKM_SUBDEV_THERM }, /* NVA3:NVC0 */
37 { 0x00100000, NVDEV_SUBDEV_TIMER }, 37 { 0x00100000, NVKM_SUBDEV_TIMER },
38 { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */ 38 { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
39 { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */ 39 { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
40 { 0x00400000, NVDEV_ENGINE_CE0 }, /* NVA3- */ 40 { 0x00400000, NVKM_ENGINE_CE0 }, /* NVA3- */
41 { 0x10000000, NVDEV_SUBDEV_BUS }, 41 { 0x10000000, NVKM_SUBDEV_BUS },
42 { 0x80000000, NVDEV_ENGINE_SW }, 42 { 0x80000000, NVKM_ENGINE_SW },
43 { 0x0042d101, NVDEV_SUBDEV_FB }, 43 { 0x0042d101, NVKM_SUBDEV_FB },
44 {}, 44 {},
45}; 45};
46 46
47struct nvkm_oclass * 47static const struct nvkm_mc_func
48g98_mc_oclass = &(struct nvkm_mc_oclass) { 48g98_mc = {
49 .base.handle = NV_SUBDEV(MC, 0x98), 49 .init = nv50_mc_init,
50 .base.ofuncs = &(struct nvkm_ofuncs) {
51 .ctor = nv04_mc_ctor,
52 .dtor = _nvkm_mc_dtor,
53 .init = nv50_mc_init,
54 .fini = _nvkm_mc_fini,
55 },
56 .intr = g98_mc_intr, 50 .intr = g98_mc_intr,
57 .msi_rearm = nv40_mc_msi_rearm, 51 .intr_unarm = nv04_mc_intr_unarm,
58}.base; 52 .intr_rearm = nv04_mc_intr_rearm,
53 .intr_mask = nv04_mc_intr_mask,
54};
55
56int
57g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
58{
59 return nvkm_mc_new_(&g98_mc, device, index, pmc);
60}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 2425984b045e..122fe69e83e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -21,56 +21,77 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25 25
26const struct nvkm_mc_intr 26const struct nvkm_mc_intr
27gf100_mc_intr[] = { 27gf100_mc_intr[] = {
28 { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */ 28 { 0x04000000, NVKM_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */
29 { 0x00000001, NVDEV_ENGINE_MSPPP }, 29 { 0x00000001, NVKM_ENGINE_MSPPP },
30 { 0x00000020, NVDEV_ENGINE_CE0 }, 30 { 0x00000020, NVKM_ENGINE_CE0 },
31 { 0x00000040, NVDEV_ENGINE_CE1 }, 31 { 0x00000040, NVKM_ENGINE_CE1 },
32 { 0x00000080, NVDEV_ENGINE_CE2 }, 32 { 0x00000080, NVKM_ENGINE_CE2 },
33 { 0x00000100, NVDEV_ENGINE_FIFO }, 33 { 0x00000100, NVKM_ENGINE_FIFO },
34 { 0x00001000, NVDEV_ENGINE_GR }, 34 { 0x00001000, NVKM_ENGINE_GR },
35 { 0x00002000, NVDEV_SUBDEV_FB }, 35 { 0x00002000, NVKM_SUBDEV_FB },
36 { 0x00008000, NVDEV_ENGINE_MSVLD }, 36 { 0x00008000, NVKM_ENGINE_MSVLD },
37 { 0x00040000, NVDEV_SUBDEV_THERM }, 37 { 0x00040000, NVKM_SUBDEV_THERM },
38 { 0x00020000, NVDEV_ENGINE_MSPDEC }, 38 { 0x00020000, NVKM_ENGINE_MSPDEC },
39 { 0x00100000, NVDEV_SUBDEV_TIMER }, 39 { 0x00100000, NVKM_SUBDEV_TIMER },
40 { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */ 40 { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
41 { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */ 41 { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
42 { 0x01000000, NVDEV_SUBDEV_PMU }, 42 { 0x01000000, NVKM_SUBDEV_PMU },
43 { 0x02000000, NVDEV_SUBDEV_LTC }, 43 { 0x02000000, NVKM_SUBDEV_LTC },
44 { 0x08000000, NVDEV_SUBDEV_FB }, 44 { 0x08000000, NVKM_SUBDEV_FB },
45 { 0x10000000, NVDEV_SUBDEV_BUS }, 45 { 0x10000000, NVKM_SUBDEV_BUS },
46 { 0x40000000, NVDEV_SUBDEV_IBUS }, 46 { 0x40000000, NVKM_SUBDEV_IBUS },
47 { 0x80000000, NVDEV_ENGINE_SW }, 47 { 0x80000000, NVKM_ENGINE_SW },
48 {}, 48 {},
49}; 49};
50 50
51static void 51void
52gf100_mc_msi_rearm(struct nvkm_mc *pmc) 52gf100_mc_intr_unarm(struct nvkm_mc *mc)
53{
54 struct nvkm_device *device = mc->subdev.device;
55 nvkm_wr32(device, 0x000140, 0x00000000);
56 nvkm_wr32(device, 0x000144, 0x00000000);
57 nvkm_rd32(device, 0x000140);
58}
59
60void
61gf100_mc_intr_rearm(struct nvkm_mc *mc)
62{
63 struct nvkm_device *device = mc->subdev.device;
64 nvkm_wr32(device, 0x000140, 0x00000001);
65 nvkm_wr32(device, 0x000144, 0x00000001);
66}
67
68u32
69gf100_mc_intr_mask(struct nvkm_mc *mc)
53{ 70{
54 struct nv04_mc_priv *priv = (void *)pmc; 71 struct nvkm_device *device = mc->subdev.device;
55 nv_wr32(priv, 0x088704, 0x00000000); 72 u32 intr0 = nvkm_rd32(device, 0x000100);
73 u32 intr1 = nvkm_rd32(device, 0x000104);
74 return intr0 | intr1;
56} 75}
57 76
58void 77void
59gf100_mc_unk260(struct nvkm_mc *pmc, u32 data) 78gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
60{ 79{
61 nv_wr32(pmc, 0x000260, data); 80 nvkm_wr32(mc->subdev.device, 0x000260, data);
62} 81}
63 82
64struct nvkm_oclass * 83static const struct nvkm_mc_func
65gf100_mc_oclass = &(struct nvkm_mc_oclass) { 84gf100_mc = {
66 .base.handle = NV_SUBDEV(MC, 0xc0), 85 .init = nv50_mc_init,
67 .base.ofuncs = &(struct nvkm_ofuncs) {
68 .ctor = nv04_mc_ctor,
69 .dtor = _nvkm_mc_dtor,
70 .init = nv50_mc_init,
71 .fini = _nvkm_mc_fini,
72 },
73 .intr = gf100_mc_intr, 86 .intr = gf100_mc_intr,
74 .msi_rearm = gf100_mc_msi_rearm, 87 .intr_unarm = gf100_mc_intr_unarm,
88 .intr_rearm = gf100_mc_intr_rearm,
89 .intr_mask = gf100_mc_intr_mask,
75 .unk260 = gf100_mc_unk260, 90 .unk260 = gf100_mc_unk260,
76}.base; 91};
92
93int
94gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
95{
96 return nvkm_mc_new_(&gf100_mc, device, index, pmc);
97}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index 43b27742956d..d92efb33bcc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -21,17 +21,19 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25 25
26struct nvkm_oclass * 26static const struct nvkm_mc_func
27gk20a_mc_oclass = &(struct nvkm_mc_oclass) { 27gk20a_mc = {
28 .base.handle = NV_SUBDEV(MC, 0xea), 28 .init = nv50_mc_init,
29 .base.ofuncs = &(struct nvkm_ofuncs) {
30 .ctor = nv04_mc_ctor,
31 .dtor = _nvkm_mc_dtor,
32 .init = nv50_mc_init,
33 .fini = _nvkm_mc_fini,
34 },
35 .intr = gf100_mc_intr, 29 .intr = gf100_mc_intr,
36 .msi_rearm = nv40_mc_msi_rearm, 30 .intr_unarm = gf100_mc_intr_unarm,
37}.base; 31 .intr_rearm = gf100_mc_intr_rearm,
32 .intr_mask = gf100_mc_intr_mask,
33};
34
35int
36gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
37{
38 return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
39}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 32713827b4dc..d282ec1555f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -21,58 +21,63 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25 25
26const struct nvkm_mc_intr 26const struct nvkm_mc_intr
27nv04_mc_intr[] = { 27nv04_mc_intr[] = {
28 { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */ 28 { 0x00000001, NVKM_ENGINE_MPEG }, /* NV17- MPEG/ME */
29 { 0x00000100, NVDEV_ENGINE_FIFO }, 29 { 0x00000100, NVKM_ENGINE_FIFO },
30 { 0x00001000, NVDEV_ENGINE_GR }, 30 { 0x00001000, NVKM_ENGINE_GR },
31 { 0x00010000, NVDEV_ENGINE_DISP }, 31 { 0x00010000, NVKM_ENGINE_DISP },
32 { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */ 32 { 0x00020000, NVKM_ENGINE_VP }, /* NV40- */
33 { 0x00100000, NVDEV_SUBDEV_TIMER }, 33 { 0x00100000, NVKM_SUBDEV_TIMER },
34 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */ 34 { 0x01000000, NVKM_ENGINE_DISP }, /* NV04- PCRTC0 */
35 { 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */ 35 { 0x02000000, NVKM_ENGINE_DISP }, /* NV11- PCRTC1 */
36 { 0x10000000, NVDEV_SUBDEV_BUS }, 36 { 0x10000000, NVKM_SUBDEV_BUS },
37 { 0x80000000, NVDEV_ENGINE_SW }, 37 { 0x80000000, NVKM_ENGINE_SW },
38 {} 38 {}
39}; 39};
40 40
41int 41void
42nv04_mc_init(struct nvkm_object *object) 42nv04_mc_intr_unarm(struct nvkm_mc *mc)
43{ 43{
44 struct nv04_mc_priv *priv = (void *)object; 44 struct nvkm_device *device = mc->subdev.device;
45 45 nvkm_wr32(device, 0x000140, 0x00000000);
46 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */ 46 nvkm_rd32(device, 0x000140);
47 nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
48
49 return nvkm_mc_init(&priv->base);
50} 47}
51 48
52int 49void
53nv04_mc_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 50nv04_mc_intr_rearm(struct nvkm_mc *mc)
54 struct nvkm_oclass *oclass, void *data, u32 size,
55 struct nvkm_object **pobject)
56{ 51{
57 struct nv04_mc_priv *priv; 52 struct nvkm_device *device = mc->subdev.device;
58 int ret; 53 nvkm_wr32(device, 0x000140, 0x00000001);
54}
59 55
60 ret = nvkm_mc_create(parent, engine, oclass, &priv); 56u32
61 *pobject = nv_object(priv); 57nv04_mc_intr_mask(struct nvkm_mc *mc)
62 if (ret) 58{
63 return ret; 59 return nvkm_rd32(mc->subdev.device, 0x000100);
60}
64 61
65 return 0; 62void
63nv04_mc_init(struct nvkm_mc *mc)
64{
65 struct nvkm_device *device = mc->subdev.device;
66 nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
67 nvkm_wr32(device, 0x001850, 0x00000001); /* disable rom access */
66} 68}
67 69
68struct nvkm_oclass * 70static const struct nvkm_mc_func
69nv04_mc_oclass = &(struct nvkm_mc_oclass) { 71nv04_mc = {
70 .base.handle = NV_SUBDEV(MC, 0x04), 72 .init = nv04_mc_init,
71 .base.ofuncs = &(struct nvkm_ofuncs) {
72 .ctor = nv04_mc_ctor,
73 .dtor = _nvkm_mc_dtor,
74 .init = nv04_mc_init,
75 .fini = _nvkm_mc_fini,
76 },
77 .intr = nv04_mc_intr, 73 .intr = nv04_mc_intr,
78}.base; 74 .intr_unarm = nv04_mc_intr_unarm,
75 .intr_rearm = nv04_mc_intr_rearm,
76 .intr_mask = nv04_mc_intr_mask,
77};
78
79int
80nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
81{
82 return nvkm_mc_new_(&nv04_mc, device, index, pmc);
83}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
deleted file mode 100644
index 411de3d08ab6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __NVKM_MC_NV04_H__
2#define __NVKM_MC_NV04_H__
3#include "priv.h"
4
5struct nv04_mc_priv {
6 struct nvkm_mc base;
7};
8
9int nv04_mc_ctor(struct nvkm_object *, struct nvkm_object *,
10 struct nvkm_oclass *, void *, u32,
11 struct nvkm_object **);
12
13extern const struct nvkm_mc_intr nv04_mc_intr[];
14int nv04_mc_init(struct nvkm_object *);
15void nv40_mc_msi_rearm(struct nvkm_mc *);
16int nv44_mc_init(struct nvkm_object *object);
17int nv50_mc_init(struct nvkm_object *);
18extern const struct nvkm_mc_intr nv50_mc_intr[];
19extern const struct nvkm_mc_intr gf100_mc_intr[];
20#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 2c7f7c701a2b..9a3ac9965be0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -21,33 +21,33 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25 25
26int 26void
27nv44_mc_init(struct nvkm_object *object) 27nv44_mc_init(struct nvkm_mc *mc)
28{ 28{
29 struct nv04_mc_priv *priv = (void *)object; 29 struct nvkm_device *device = mc->subdev.device;
30 u32 tmp = nv_rd32(priv, 0x10020c); 30 u32 tmp = nvkm_rd32(device, 0x10020c);
31
32 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
33 31
34 nv_wr32(priv, 0x001700, tmp); 32 nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
35 nv_wr32(priv, 0x001704, 0);
36 nv_wr32(priv, 0x001708, 0);
37 nv_wr32(priv, 0x00170c, tmp);
38 33
39 return nvkm_mc_init(&priv->base); 34 nvkm_wr32(device, 0x001700, tmp);
35 nvkm_wr32(device, 0x001704, 0);
36 nvkm_wr32(device, 0x001708, 0);
37 nvkm_wr32(device, 0x00170c, tmp);
40} 38}
41 39
42struct nvkm_oclass * 40static const struct nvkm_mc_func
43nv44_mc_oclass = &(struct nvkm_mc_oclass) { 41nv44_mc = {
44 .base.handle = NV_SUBDEV(MC, 0x44), 42 .init = nv44_mc_init,
45 .base.ofuncs = &(struct nvkm_ofuncs) {
46 .ctor = nv04_mc_ctor,
47 .dtor = _nvkm_mc_dtor,
48 .init = nv44_mc_init,
49 .fini = _nvkm_mc_fini,
50 },
51 .intr = nv04_mc_intr, 43 .intr = nv04_mc_intr,
52 .msi_rearm = nv40_mc_msi_rearm, 44 .intr_unarm = nv04_mc_intr_unarm,
53}.base; 45 .intr_rearm = nv04_mc_intr_rearm,
46 .intr_mask = nv04_mc_intr_mask,
47};
48
49int
50nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
51{
52 return nvkm_mc_new_(&nv44_mc, device, index, pmc);
53}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 40e3019e1fde..5f27d7b8fddd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -21,52 +21,44 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25
26#include <core/device.h>
27 25
28const struct nvkm_mc_intr 26const struct nvkm_mc_intr
29nv50_mc_intr[] = { 27nv50_mc_intr[] = {
30 { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */ 28 { 0x04000000, NVKM_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
31 { 0x00000001, NVDEV_ENGINE_MPEG }, 29 { 0x00000001, NVKM_ENGINE_MPEG },
32 { 0x00000100, NVDEV_ENGINE_FIFO }, 30 { 0x00000100, NVKM_ENGINE_FIFO },
33 { 0x00001000, NVDEV_ENGINE_GR }, 31 { 0x00001000, NVKM_ENGINE_GR },
34 { 0x00004000, NVDEV_ENGINE_CIPHER }, /* NV84- */ 32 { 0x00004000, NVKM_ENGINE_CIPHER }, /* NV84- */
35 { 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */ 33 { 0x00008000, NVKM_ENGINE_BSP }, /* NV84- */
36 { 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */ 34 { 0x00020000, NVKM_ENGINE_VP }, /* NV84- */
37 { 0x00100000, NVDEV_SUBDEV_TIMER }, 35 { 0x00100000, NVKM_SUBDEV_TIMER },
38 { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */ 36 { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
39 { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */ 37 { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
40 { 0x10000000, NVDEV_SUBDEV_BUS }, 38 { 0x10000000, NVKM_SUBDEV_BUS },
41 { 0x80000000, NVDEV_ENGINE_SW }, 39 { 0x80000000, NVKM_ENGINE_SW },
42 { 0x0002d101, NVDEV_SUBDEV_FB }, 40 { 0x0002d101, NVKM_SUBDEV_FB },
43 {}, 41 {},
44}; 42};
45 43
46static void 44void
47nv50_mc_msi_rearm(struct nvkm_mc *pmc) 45nv50_mc_init(struct nvkm_mc *mc)
48{ 46{
49 struct nvkm_device *device = nv_device(pmc); 47 struct nvkm_device *device = mc->subdev.device;
50 pci_write_config_byte(device->pdev, 0x68, 0xff); 48 nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
51} 49}
52 50
51static const struct nvkm_mc_func
52nv50_mc = {
53 .init = nv50_mc_init,
54 .intr = nv50_mc_intr,
55 .intr_unarm = nv04_mc_intr_unarm,
56 .intr_rearm = nv04_mc_intr_rearm,
57 .intr_mask = nv04_mc_intr_mask,
58};
59
53int 60int
54nv50_mc_init(struct nvkm_object *object) 61nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
55{ 62{
56 struct nv04_mc_priv *priv = (void *)object; 63 return nvkm_mc_new_(&nv50_mc, device, index, pmc);
57 nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
58 return nvkm_mc_init(&priv->base);
59} 64}
60
61struct nvkm_oclass *
62nv50_mc_oclass = &(struct nvkm_mc_oclass) {
63 .base.handle = NV_SUBDEV(MC, 0x50),
64 .base.ofuncs = &(struct nvkm_ofuncs) {
65 .ctor = nv04_mc_ctor,
66 .dtor = _nvkm_mc_dtor,
67 .init = nv50_mc_init,
68 .fini = _nvkm_mc_fini,
69 },
70 .intr = nv50_mc_intr,
71 .msi_rearm = nv50_mc_msi_rearm,
72}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index d2cad07afd1a..307f6c692287 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -1,36 +1,42 @@
1#ifndef __NVKM_MC_PRIV_H__ 1#ifndef __NVKM_MC_PRIV_H__
2#define __NVKM_MC_PRIV_H__ 2#define __NVKM_MC_PRIV_H__
3#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
3#include <subdev/mc.h> 4#include <subdev/mc.h>
4 5
5#define nvkm_mc_create(p,e,o,d) \ 6int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
6 nvkm_mc_create_((p), (e), (o), sizeof(**d), (void **)d) 7 int index, struct nvkm_mc **);
7#define nvkm_mc_destroy(p) ({ \
8 struct nvkm_mc *pmc = (p); _nvkm_mc_dtor(nv_object(pmc)); \
9})
10#define nvkm_mc_init(p) ({ \
11 struct nvkm_mc *pmc = (p); _nvkm_mc_init(nv_object(pmc)); \
12})
13#define nvkm_mc_fini(p,s) ({ \
14 struct nvkm_mc *pmc = (p); _nvkm_mc_fini(nv_object(pmc), (s)); \
15})
16
17int nvkm_mc_create_(struct nvkm_object *, struct nvkm_object *,
18 struct nvkm_oclass *, int, void **);
19void _nvkm_mc_dtor(struct nvkm_object *);
20int _nvkm_mc_init(struct nvkm_object *);
21int _nvkm_mc_fini(struct nvkm_object *, bool);
22 8
23struct nvkm_mc_intr { 9struct nvkm_mc_intr {
24 u32 stat; 10 u32 stat;
25 u32 unit; 11 u32 unit;
26}; 12};
27 13
28struct nvkm_mc_oclass { 14struct nvkm_mc_func {
29 struct nvkm_oclass base; 15 void (*init)(struct nvkm_mc *);
30 const struct nvkm_mc_intr *intr; 16 const struct nvkm_mc_intr *intr;
31 void (*msi_rearm)(struct nvkm_mc *); 17 /* disable reporting of interrupts to host */
18 void (*intr_unarm)(struct nvkm_mc *);
19 /* enable reporting of interrupts to host */
20 void (*intr_rearm)(struct nvkm_mc *);
21 /* retrieve pending interrupt mask (NV_PMC_INTR) */
22 u32 (*intr_mask)(struct nvkm_mc *);
32 void (*unk260)(struct nvkm_mc *, u32); 23 void (*unk260)(struct nvkm_mc *, u32);
33}; 24};
34 25
26void nv04_mc_init(struct nvkm_mc *);
27extern const struct nvkm_mc_intr nv04_mc_intr[];
28void nv04_mc_intr_unarm(struct nvkm_mc *);
29void nv04_mc_intr_rearm(struct nvkm_mc *);
30u32 nv04_mc_intr_mask(struct nvkm_mc *);
31
32void nv44_mc_init(struct nvkm_mc *);
33
34void nv50_mc_init(struct nvkm_mc *);
35extern const struct nvkm_mc_intr nv50_mc_intr[];
36
37extern const struct nvkm_mc_intr gf100_mc_intr[];
38void gf100_mc_intr_unarm(struct nvkm_mc *);
39void gf100_mc_intr_rearm(struct nvkm_mc *);
40u32 gf100_mc_intr_mask(struct nvkm_mc *);
35void gf100_mc_unk260(struct nvkm_mc *, u32); 41void gf100_mc_unk260(struct nvkm_mc *, u32);
36#endif 42#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 277b6ec04e24..e04a2296ecd0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -21,10 +21,10 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/mmu.h> 24#include "priv.h"
25#include <subdev/fb.h>
26 25
27#include <core/gpuobj.h> 26#include <core/gpuobj.h>
27#include <subdev/fb.h>
28 28
29void 29void
30nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) 30nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
@@ -32,12 +32,12 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
32 struct nvkm_vm *vm = vma->vm; 32 struct nvkm_vm *vm = vma->vm;
33 struct nvkm_mmu *mmu = vm->mmu; 33 struct nvkm_mmu *mmu = vm->mmu;
34 struct nvkm_mm_node *r; 34 struct nvkm_mm_node *r;
35 int big = vma->node->type != mmu->spg_shift; 35 int big = vma->node->type != mmu->func->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12); 36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12; 37 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; 38 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; 39 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (mmu->pgt_bits - bits); 40 u32 max = 1 << (mmu->func->pgt_bits - bits);
41 u32 end, len; 41 u32 end, len;
42 42
43 delta = 0; 43 delta = 0;
@@ -46,14 +46,14 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
46 u32 num = r->length >> bits; 46 u32 num = r->length >> bits;
47 47
48 while (num) { 48 while (num) {
49 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; 49 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
50 50
51 end = (pte + num); 51 end = (pte + num);
52 if (unlikely(end >= max)) 52 if (unlikely(end >= max))
53 end = max; 53 end = max;
54 len = end - pte; 54 len = end - pte;
55 55
56 mmu->map(vma, pgt, node, pte, len, phys, delta); 56 mmu->func->map(vma, pgt, node, pte, len, phys, delta);
57 57
58 num -= len; 58 num -= len;
59 pte += len; 59 pte += len;
@@ -67,7 +67,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
67 } 67 }
68 } 68 }
69 69
70 mmu->flush(vm); 70 mmu->func->flush(vm);
71} 71}
72 72
73static void 73static void
@@ -76,20 +76,20 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
76{ 76{
77 struct nvkm_vm *vm = vma->vm; 77 struct nvkm_vm *vm = vma->vm;
78 struct nvkm_mmu *mmu = vm->mmu; 78 struct nvkm_mmu *mmu = vm->mmu;
79 int big = vma->node->type != mmu->spg_shift; 79 int big = vma->node->type != mmu->func->spg_shift;
80 u32 offset = vma->node->offset + (delta >> 12); 80 u32 offset = vma->node->offset + (delta >> 12);
81 u32 bits = vma->node->type - 12; 81 u32 bits = vma->node->type - 12;
82 u32 num = length >> vma->node->type; 82 u32 num = length >> vma->node->type;
83 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; 83 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
84 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; 84 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
85 u32 max = 1 << (mmu->pgt_bits - bits); 85 u32 max = 1 << (mmu->func->pgt_bits - bits);
86 unsigned m, sglen; 86 unsigned m, sglen;
87 u32 end, len; 87 u32 end, len;
88 int i; 88 int i;
89 struct scatterlist *sg; 89 struct scatterlist *sg;
90 90
91 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) { 91 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
92 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; 92 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
93 sglen = sg_dma_len(sg) >> PAGE_SHIFT; 93 sglen = sg_dma_len(sg) >> PAGE_SHIFT;
94 94
95 end = pte + sglen; 95 end = pte + sglen;
@@ -100,7 +100,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
100 for (m = 0; m < len; m++) { 100 for (m = 0; m < len; m++) {
101 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 101 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
102 102
103 mmu->map_sg(vma, pgt, mem, pte, 1, &addr); 103 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
104 num--; 104 num--;
105 pte++; 105 pte++;
106 106
@@ -115,7 +115,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
115 for (; m < sglen; m++) { 115 for (; m < sglen; m++) {
116 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 116 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
117 117
118 mmu->map_sg(vma, pgt, mem, pte, 1, &addr); 118 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
119 num--; 119 num--;
120 pte++; 120 pte++;
121 if (num == 0) 121 if (num == 0)
@@ -125,7 +125,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
125 125
126 } 126 }
127finish: 127finish:
128 mmu->flush(vm); 128 mmu->func->flush(vm);
129} 129}
130 130
131static void 131static void
@@ -135,24 +135,24 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
135 struct nvkm_vm *vm = vma->vm; 135 struct nvkm_vm *vm = vma->vm;
136 struct nvkm_mmu *mmu = vm->mmu; 136 struct nvkm_mmu *mmu = vm->mmu;
137 dma_addr_t *list = mem->pages; 137 dma_addr_t *list = mem->pages;
138 int big = vma->node->type != mmu->spg_shift; 138 int big = vma->node->type != mmu->func->spg_shift;
139 u32 offset = vma->node->offset + (delta >> 12); 139 u32 offset = vma->node->offset + (delta >> 12);
140 u32 bits = vma->node->type - 12; 140 u32 bits = vma->node->type - 12;
141 u32 num = length >> vma->node->type; 141 u32 num = length >> vma->node->type;
142 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; 142 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
143 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; 143 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
144 u32 max = 1 << (mmu->pgt_bits - bits); 144 u32 max = 1 << (mmu->func->pgt_bits - bits);
145 u32 end, len; 145 u32 end, len;
146 146
147 while (num) { 147 while (num) {
148 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; 148 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
149 149
150 end = (pte + num); 150 end = (pte + num);
151 if (unlikely(end >= max)) 151 if (unlikely(end >= max))
152 end = max; 152 end = max;
153 len = end - pte; 153 len = end - pte;
154 154
155 mmu->map_sg(vma, pgt, mem, pte, len, list); 155 mmu->func->map_sg(vma, pgt, mem, pte, len, list);
156 156
157 num -= len; 157 num -= len;
158 pte += len; 158 pte += len;
@@ -163,7 +163,7 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
163 } 163 }
164 } 164 }
165 165
166 mmu->flush(vm); 166 mmu->func->flush(vm);
167} 167}
168 168
169void 169void
@@ -183,24 +183,24 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
183{ 183{
184 struct nvkm_vm *vm = vma->vm; 184 struct nvkm_vm *vm = vma->vm;
185 struct nvkm_mmu *mmu = vm->mmu; 185 struct nvkm_mmu *mmu = vm->mmu;
186 int big = vma->node->type != mmu->spg_shift; 186 int big = vma->node->type != mmu->func->spg_shift;
187 u32 offset = vma->node->offset + (delta >> 12); 187 u32 offset = vma->node->offset + (delta >> 12);
188 u32 bits = vma->node->type - 12; 188 u32 bits = vma->node->type - 12;
189 u32 num = length >> vma->node->type; 189 u32 num = length >> vma->node->type;
190 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; 190 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
191 u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; 191 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
192 u32 max = 1 << (mmu->pgt_bits - bits); 192 u32 max = 1 << (mmu->func->pgt_bits - bits);
193 u32 end, len; 193 u32 end, len;
194 194
195 while (num) { 195 while (num) {
196 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; 196 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
197 197
198 end = (pte + num); 198 end = (pte + num);
199 if (unlikely(end >= max)) 199 if (unlikely(end >= max))
200 end = max; 200 end = max;
201 len = end - pte; 201 len = end - pte;
202 202
203 mmu->unmap(pgt, pte, len); 203 mmu->func->unmap(vma, pgt, pte, len);
204 204
205 num -= len; 205 num -= len;
206 pte += len; 206 pte += len;
@@ -210,7 +210,7 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
210 } 210 }
211 } 211 }
212 212
213 mmu->flush(vm); 213 mmu->func->flush(vm);
214} 214}
215 215
216void 216void
@@ -225,7 +225,7 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
225 struct nvkm_mmu *mmu = vm->mmu; 225 struct nvkm_mmu *mmu = vm->mmu;
226 struct nvkm_vm_pgd *vpgd; 226 struct nvkm_vm_pgd *vpgd;
227 struct nvkm_vm_pgt *vpgt; 227 struct nvkm_vm_pgt *vpgt;
228 struct nvkm_gpuobj *pgt; 228 struct nvkm_memory *pgt;
229 u32 pde; 229 u32 pde;
230 230
231 for (pde = fpde; pde <= lpde; pde++) { 231 for (pde = fpde; pde <= lpde; pde++) {
@@ -233,16 +233,14 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
233 if (--vpgt->refcount[big]) 233 if (--vpgt->refcount[big])
234 continue; 234 continue;
235 235
236 pgt = vpgt->obj[big]; 236 pgt = vpgt->mem[big];
237 vpgt->obj[big] = NULL; 237 vpgt->mem[big] = NULL;
238 238
239 list_for_each_entry(vpgd, &vm->pgd_list, head) { 239 list_for_each_entry(vpgd, &vm->pgd_list, head) {
240 mmu->map_pgt(vpgd->obj, pde, vpgt->obj); 240 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
241 } 241 }
242 242
243 mutex_unlock(&nv_subdev(mmu)->mutex); 243 nvkm_memory_del(&pgt);
244 nvkm_gpuobj_ref(NULL, &pgt);
245 mutex_lock(&nv_subdev(mmu)->mutex);
246 } 244 }
247} 245}
248 246
@@ -252,34 +250,23 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
252 struct nvkm_mmu *mmu = vm->mmu; 250 struct nvkm_mmu *mmu = vm->mmu;
253 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 251 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
254 struct nvkm_vm_pgd *vpgd; 252 struct nvkm_vm_pgd *vpgd;
255 struct nvkm_gpuobj *pgt; 253 int big = (type != mmu->func->spg_shift);
256 int big = (type != mmu->spg_shift);
257 u32 pgt_size; 254 u32 pgt_size;
258 int ret; 255 int ret;
259 256
260 pgt_size = (1 << (mmu->pgt_bits + 12)) >> type; 257 pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
261 pgt_size *= 8; 258 pgt_size *= 8;
262 259
263 mutex_unlock(&nv_subdev(mmu)->mutex); 260 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
264 ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000, 261 pgt_size, 0x1000, true, &vpgt->mem[big]);
265 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
266 mutex_lock(&nv_subdev(mmu)->mutex);
267 if (unlikely(ret)) 262 if (unlikely(ret))
268 return ret; 263 return ret;
269 264
270 /* someone beat us to filling the PDE while we didn't have the lock */
271 if (unlikely(vpgt->refcount[big]++)) {
272 mutex_unlock(&nv_subdev(mmu)->mutex);
273 nvkm_gpuobj_ref(NULL, &pgt);
274 mutex_lock(&nv_subdev(mmu)->mutex);
275 return 0;
276 }
277
278 vpgt->obj[big] = pgt;
279 list_for_each_entry(vpgd, &vm->pgd_list, head) { 265 list_for_each_entry(vpgd, &vm->pgd_list, head) {
280 mmu->map_pgt(vpgd->obj, pde, vpgt->obj); 266 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
281 } 267 }
282 268
269 vpgt->refcount[big]++;
283 return 0; 270 return 0;
284} 271}
285 272
@@ -293,20 +280,20 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
293 u32 fpde, lpde, pde; 280 u32 fpde, lpde, pde;
294 int ret; 281 int ret;
295 282
296 mutex_lock(&nv_subdev(mmu)->mutex); 283 mutex_lock(&vm->mutex);
297 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, 284 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
298 &vma->node); 285 &vma->node);
299 if (unlikely(ret != 0)) { 286 if (unlikely(ret != 0)) {
300 mutex_unlock(&nv_subdev(mmu)->mutex); 287 mutex_unlock(&vm->mutex);
301 return ret; 288 return ret;
302 } 289 }
303 290
304 fpde = (vma->node->offset >> mmu->pgt_bits); 291 fpde = (vma->node->offset >> mmu->func->pgt_bits);
305 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; 292 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
306 293
307 for (pde = fpde; pde <= lpde; pde++) { 294 for (pde = fpde; pde <= lpde; pde++) {
308 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 295 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
309 int big = (vma->node->type != mmu->spg_shift); 296 int big = (vma->node->type != mmu->func->spg_shift);
310 297
311 if (likely(vpgt->refcount[big])) { 298 if (likely(vpgt->refcount[big])) {
312 vpgt->refcount[big]++; 299 vpgt->refcount[big]++;
@@ -318,11 +305,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
318 if (pde != fpde) 305 if (pde != fpde)
319 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); 306 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
320 nvkm_mm_free(&vm->mm, &vma->node); 307 nvkm_mm_free(&vm->mm, &vma->node);
321 mutex_unlock(&nv_subdev(mmu)->mutex); 308 mutex_unlock(&vm->mutex);
322 return ret; 309 return ret;
323 } 310 }
324 } 311 }
325 mutex_unlock(&nv_subdev(mmu)->mutex); 312 mutex_unlock(&vm->mutex);
326 313
327 vma->vm = NULL; 314 vma->vm = NULL;
328 nvkm_vm_ref(vm, &vma->vm, NULL); 315 nvkm_vm_ref(vm, &vma->vm, NULL);
@@ -334,27 +321,49 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
334void 321void
335nvkm_vm_put(struct nvkm_vma *vma) 322nvkm_vm_put(struct nvkm_vma *vma)
336{ 323{
337 struct nvkm_vm *vm = vma->vm; 324 struct nvkm_mmu *mmu;
338 struct nvkm_mmu *mmu = vm->mmu; 325 struct nvkm_vm *vm;
339 u32 fpde, lpde; 326 u32 fpde, lpde;
340 327
341 if (unlikely(vma->node == NULL)) 328 if (unlikely(vma->node == NULL))
342 return; 329 return;
343 fpde = (vma->node->offset >> mmu->pgt_bits); 330 vm = vma->vm;
344 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; 331 mmu = vm->mmu;
332
333 fpde = (vma->node->offset >> mmu->func->pgt_bits);
334 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
345 335
346 mutex_lock(&nv_subdev(mmu)->mutex); 336 mutex_lock(&vm->mutex);
347 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); 337 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
348 nvkm_mm_free(&vm->mm, &vma->node); 338 nvkm_mm_free(&vm->mm, &vma->node);
349 mutex_unlock(&nv_subdev(mmu)->mutex); 339 mutex_unlock(&vm->mutex);
350 340
351 nvkm_vm_ref(NULL, &vma->vm, NULL); 341 nvkm_vm_ref(NULL, &vma->vm, NULL);
352} 342}
353 343
354int 344int
345nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
346{
347 struct nvkm_mmu *mmu = vm->mmu;
348 struct nvkm_memory *pgt;
349 int ret;
350
351 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
352 (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
353 if (ret == 0) {
354 vm->pgt[0].refcount[0] = 1;
355 vm->pgt[0].mem[0] = pgt;
356 nvkm_memory_boot(pgt, vm);
357 }
358
359 return ret;
360}
361
362int
355nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, 363nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
356 u32 block, struct nvkm_vm **pvm) 364 u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
357{ 365{
366 static struct lock_class_key _key;
358 struct nvkm_vm *vm; 367 struct nvkm_vm *vm;
359 u64 mm_length = (offset + length) - mm_offset; 368 u64 mm_length = (offset + length) - mm_offset;
360 int ret; 369 int ret;
@@ -363,11 +372,12 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
363 if (!vm) 372 if (!vm)
364 return -ENOMEM; 373 return -ENOMEM;
365 374
375 __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
366 INIT_LIST_HEAD(&vm->pgd_list); 376 INIT_LIST_HEAD(&vm->pgd_list);
367 vm->mmu = mmu; 377 vm->mmu = mmu;
368 kref_init(&vm->refcount); 378 kref_init(&vm->refcount);
369 vm->fpde = offset >> (mmu->pgt_bits + 12); 379 vm->fpde = offset >> (mmu->func->pgt_bits + 12);
370 vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12); 380 vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
371 381
372 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); 382 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
373 if (!vm->pgt) { 383 if (!vm->pgt) {
@@ -390,10 +400,12 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
390 400
391int 401int
392nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset, 402nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
393 struct nvkm_vm **pvm) 403 struct lock_class_key *key, struct nvkm_vm **pvm)
394{ 404{
395 struct nvkm_mmu *mmu = nvkm_mmu(device); 405 struct nvkm_mmu *mmu = device->mmu;
396 return mmu->create(mmu, offset, length, mm_offset, pvm); 406 if (!mmu->func->create)
407 return -EINVAL;
408 return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
397} 409}
398 410
399static int 411static int
@@ -410,38 +422,33 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
410 if (!vpgd) 422 if (!vpgd)
411 return -ENOMEM; 423 return -ENOMEM;
412 424
413 nvkm_gpuobj_ref(pgd, &vpgd->obj); 425 vpgd->obj = pgd;
414 426
415 mutex_lock(&nv_subdev(mmu)->mutex); 427 mutex_lock(&vm->mutex);
416 for (i = vm->fpde; i <= vm->lpde; i++) 428 for (i = vm->fpde; i <= vm->lpde; i++)
417 mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); 429 mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
418 list_add(&vpgd->head, &vm->pgd_list); 430 list_add(&vpgd->head, &vm->pgd_list);
419 mutex_unlock(&nv_subdev(mmu)->mutex); 431 mutex_unlock(&vm->mutex);
420 return 0; 432 return 0;
421} 433}
422 434
423static void 435static void
424nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) 436nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
425{ 437{
426 struct nvkm_mmu *mmu = vm->mmu;
427 struct nvkm_vm_pgd *vpgd, *tmp; 438 struct nvkm_vm_pgd *vpgd, *tmp;
428 struct nvkm_gpuobj *pgd = NULL;
429 439
430 if (!mpgd) 440 if (!mpgd)
431 return; 441 return;
432 442
433 mutex_lock(&nv_subdev(mmu)->mutex); 443 mutex_lock(&vm->mutex);
434 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 444 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
435 if (vpgd->obj == mpgd) { 445 if (vpgd->obj == mpgd) {
436 pgd = vpgd->obj;
437 list_del(&vpgd->head); 446 list_del(&vpgd->head);
438 kfree(vpgd); 447 kfree(vpgd);
439 break; 448 break;
440 } 449 }
441 } 450 }
442 mutex_unlock(&nv_subdev(mmu)->mutex); 451 mutex_unlock(&vm->mutex);
443
444 nvkm_gpuobj_ref(NULL, &pgd);
445} 452}
446 453
447static void 454static void
@@ -478,3 +485,58 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
478 *ptr = ref; 485 *ptr = ref;
479 return 0; 486 return 0;
480} 487}
488
489static int
490nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
491{
492 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
493 if (mmu->func->oneinit)
494 return mmu->func->oneinit(mmu);
495 return 0;
496}
497
498static int
499nvkm_mmu_init(struct nvkm_subdev *subdev)
500{
501 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
502 if (mmu->func->init)
503 mmu->func->init(mmu);
504 return 0;
505}
506
507static void *
508nvkm_mmu_dtor(struct nvkm_subdev *subdev)
509{
510 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
511 if (mmu->func->dtor)
512 return mmu->func->dtor(mmu);
513 return mmu;
514}
515
516static const struct nvkm_subdev_func
517nvkm_mmu = {
518 .dtor = nvkm_mmu_dtor,
519 .oneinit = nvkm_mmu_oneinit,
520 .init = nvkm_mmu_init,
521};
522
523void
524nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
525 int index, struct nvkm_mmu *mmu)
526{
527 nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
528 mmu->func = func;
529 mmu->limit = func->limit;
530 mmu->dma_bits = func->dma_bits;
531 mmu->lpg_shift = func->lpg_shift;
532}
533
534int
535nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
536 int index, struct nvkm_mmu **pmmu)
537{
538 if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
539 return -ENOMEM;
540 nvkm_mmu_ctor(func, device, index, *pmmu);
541 return 0;
542}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 294cda37f068..7ac507c927bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,19 +21,14 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/mmu.h> 24#include "priv.h"
25#include <subdev/bar.h> 25
26#include <subdev/fb.h> 26#include <subdev/fb.h>
27#include <subdev/ltc.h> 27#include <subdev/ltc.h>
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29 29
30#include <core/gpuobj.h> 30#include <core/gpuobj.h>
31 31
32struct gf100_mmu_priv {
33 struct nvkm_mmu base;
34};
35
36
37/* Map from compressed to corresponding uncompressed storage type. 32/* Map from compressed to corresponding uncompressed storage type.
38 * The value 0xff represents an invalid storage type. 33 * The value 0xff represents an invalid storage type.
39 */ 34 */
@@ -75,17 +70,19 @@ const u8 gf100_pte_storage_type_map[256] =
75 70
76 71
77static void 72static void
78gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2]) 73gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
79{ 74{
80 u32 pde[2] = { 0, 0 }; 75 u32 pde[2] = { 0, 0 };
81 76
82 if (pgt[0]) 77 if (pgt[0])
83 pde[1] = 0x00000001 | (pgt[0]->addr >> 8); 78 pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
84 if (pgt[1]) 79 if (pgt[1])
85 pde[0] = 0x00000001 | (pgt[1]->addr >> 8); 80 pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
86 81
87 nv_wo32(pgd, (index * 8) + 0, pde[0]); 82 nvkm_kmap(pgd);
88 nv_wo32(pgd, (index * 8) + 4, pde[1]); 83 nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
84 nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
85 nvkm_done(pgd);
89} 86}
90 87
91static inline u64 88static inline u64
@@ -103,7 +100,7 @@ gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
103} 100}
104 101
105static void 102static void
106gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, 103gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
107 struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 104 struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
108{ 105{
109 u64 next = 1 << (vma->node->type - 8); 106 u64 next = 1 << (vma->node->type - 8);
@@ -112,126 +109,113 @@ gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
112 pte <<= 3; 109 pte <<= 3;
113 110
114 if (mem->tag) { 111 if (mem->tag) {
115 struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu); 112 struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
116 u32 tag = mem->tag->offset + (delta >> 17); 113 u32 tag = mem->tag->offset + (delta >> 17);
117 phys |= (u64)tag << (32 + 12); 114 phys |= (u64)tag << (32 + 12);
118 next |= (u64)1 << (32 + 12); 115 next |= (u64)1 << (32 + 12);
119 ltc->tags_clear(ltc, tag, cnt); 116 nvkm_ltc_tags_clear(ltc, tag, cnt);
120 } 117 }
121 118
119 nvkm_kmap(pgt);
122 while (cnt--) { 120 while (cnt--) {
123 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 121 nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
124 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 122 nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
125 phys += next; 123 phys += next;
126 pte += 8; 124 pte += 8;
127 } 125 }
126 nvkm_done(pgt);
128} 127}
129 128
130static void 129static void
131gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, 130gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
132 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 131 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
133{ 132{
134 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; 133 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
135 /* compressed storage types are invalid for system memory */ 134 /* compressed storage types are invalid for system memory */
136 u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff]; 135 u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
137 136
137 nvkm_kmap(pgt);
138 pte <<= 3; 138 pte <<= 3;
139 while (cnt--) { 139 while (cnt--) {
140 u64 phys = gf100_vm_addr(vma, *list++, memtype, target); 140 u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
141 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 141 nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
142 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 142 nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
143 pte += 8; 143 pte += 8;
144 } 144 }
145 nvkm_done(pgt);
145} 146}
146 147
147static void 148static void
148gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) 149gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
149{ 150{
151 nvkm_kmap(pgt);
150 pte <<= 3; 152 pte <<= 3;
151 while (cnt--) { 153 while (cnt--) {
152 nv_wo32(pgt, pte + 0, 0x00000000); 154 nvkm_wo32(pgt, pte + 0, 0x00000000);
153 nv_wo32(pgt, pte + 4, 0x00000000); 155 nvkm_wo32(pgt, pte + 4, 0x00000000);
154 pte += 8; 156 pte += 8;
155 } 157 }
158 nvkm_done(pgt);
156} 159}
157 160
158static void 161static void
159gf100_vm_flush(struct nvkm_vm *vm) 162gf100_vm_flush(struct nvkm_vm *vm)
160{ 163{
161 struct gf100_mmu_priv *priv = (void *)vm->mmu; 164 struct nvkm_mmu *mmu = vm->mmu;
162 struct nvkm_bar *bar = nvkm_bar(priv); 165 struct nvkm_device *device = mmu->subdev.device;
163 struct nvkm_vm_pgd *vpgd; 166 struct nvkm_vm_pgd *vpgd;
164 u32 type; 167 u32 type;
165 168
166 bar->flush(bar);
167
168 type = 0x00000001; /* PAGE_ALL */ 169 type = 0x00000001; /* PAGE_ALL */
169 if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR])) 170 if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
170 type |= 0x00000004; /* HUB_ONLY */ 171 type |= 0x00000004; /* HUB_ONLY */
171 172
172 mutex_lock(&nv_subdev(priv)->mutex); 173 mutex_lock(&mmu->subdev.mutex);
173 list_for_each_entry(vpgd, &vm->pgd_list, head) { 174 list_for_each_entry(vpgd, &vm->pgd_list, head) {
174 /* looks like maybe a "free flush slots" counter, the 175 /* looks like maybe a "free flush slots" counter, the
175 * faster you write to 0x100cbc to more it decreases 176 * faster you write to 0x100cbc to more it decreases
176 */ 177 */
177 if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) { 178 nvkm_msec(device, 2000,
178 nv_error(priv, "vm timeout 0: 0x%08x %d\n", 179 if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
179 nv_rd32(priv, 0x100c80), type); 180 break;
180 } 181 );
181 182
182 nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8); 183 nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
183 nv_wr32(priv, 0x100cbc, 0x80000000 | type); 184 nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
184 185
185 /* wait for flush to be queued? */ 186 /* wait for flush to be queued? */
186 if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) { 187 nvkm_msec(device, 2000,
187 nv_error(priv, "vm timeout 1: 0x%08x %d\n", 188 if (nvkm_rd32(device, 0x100c80) & 0x00008000)
188 nv_rd32(priv, 0x100c80), type); 189 break;
189 } 190 );
190 } 191 }
191 mutex_unlock(&nv_subdev(priv)->mutex); 192 mutex_unlock(&mmu->subdev.mutex);
192} 193}
193 194
194static int 195static int
195gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, 196gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
196 struct nvkm_vm **pvm) 197 struct lock_class_key *key, struct nvkm_vm **pvm)
197{ 198{
198 return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm); 199 return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
199} 200}
200 201
201static int 202static const struct nvkm_mmu_func
202gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 203gf100_mmu = {
203 struct nvkm_oclass *oclass, void *data, u32 size, 204 .limit = (1ULL << 40),
204 struct nvkm_object **pobject) 205 .dma_bits = 40,
206 .pgt_bits = 27 - 12,
207 .spg_shift = 12,
208 .lpg_shift = 17,
209 .create = gf100_vm_create,
210 .map_pgt = gf100_vm_map_pgt,
211 .map = gf100_vm_map,
212 .map_sg = gf100_vm_map_sg,
213 .unmap = gf100_vm_unmap,
214 .flush = gf100_vm_flush,
215};
216
217int
218gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
205{ 219{
206 struct gf100_mmu_priv *priv; 220 return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
207 int ret;
208
209 ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
210 *pobject = nv_object(priv);
211 if (ret)
212 return ret;
213
214 priv->base.limit = 1ULL << 40;
215 priv->base.dma_bits = 40;
216 priv->base.pgt_bits = 27 - 12;
217 priv->base.spg_shift = 12;
218 priv->base.lpg_shift = 17;
219 priv->base.create = gf100_vm_create;
220 priv->base.map_pgt = gf100_vm_map_pgt;
221 priv->base.map = gf100_vm_map;
222 priv->base.map_sg = gf100_vm_map_sg;
223 priv->base.unmap = gf100_vm_unmap;
224 priv->base.flush = gf100_vm_flush;
225 return 0;
226} 221}
227
228struct nvkm_oclass
229gf100_mmu_oclass = {
230 .handle = NV_SUBDEV(MMU, 0xc0),
231 .ofuncs = &(struct nvkm_ofuncs) {
232 .ctor = gf100_mmu_ctor,
233 .dtor = _nvkm_mmu_dtor,
234 .init = _nvkm_mmu_init,
235 .fini = _nvkm_mmu_fini,
236 },
237};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index fe93ea2711c9..37927c3fdc3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -23,7 +23,6 @@
23 */ 23 */
24#include "nv04.h" 24#include "nv04.h"
25 25
26#include <core/device.h>
27#include <core/gpuobj.h> 26#include <core/gpuobj.h>
28 27
29#define NV04_PDMA_SIZE (128 * 1024 * 1024) 28#define NV04_PDMA_SIZE (128 * 1024 * 1024)
@@ -34,30 +33,34 @@
34 ******************************************************************************/ 33 ******************************************************************************/
35 34
36static void 35static void
37nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, 36nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
38 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 37 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
39{ 38{
40 pte = 0x00008 + (pte * 4); 39 pte = 0x00008 + (pte * 4);
40 nvkm_kmap(pgt);
41 while (cnt) { 41 while (cnt) {
42 u32 page = PAGE_SIZE / NV04_PDMA_PAGE; 42 u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
43 u32 phys = (u32)*list++; 43 u32 phys = (u32)*list++;
44 while (cnt && page--) { 44 while (cnt && page--) {
45 nv_wo32(pgt, pte, phys | 3); 45 nvkm_wo32(pgt, pte, phys | 3);
46 phys += NV04_PDMA_PAGE; 46 phys += NV04_PDMA_PAGE;
47 pte += 4; 47 pte += 4;
48 cnt -= 1; 48 cnt -= 1;
49 } 49 }
50 } 50 }
51 nvkm_done(pgt);
51} 52}
52 53
53static void 54static void
54nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) 55nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
55{ 56{
56 pte = 0x00008 + (pte * 4); 57 pte = 0x00008 + (pte * 4);
58 nvkm_kmap(pgt);
57 while (cnt--) { 59 while (cnt--) {
58 nv_wo32(pgt, pte, 0x00000000); 60 nvkm_wo32(pgt, pte, 0x00000000);
59 pte += 4; 61 pte += 4;
60 } 62 }
63 nvkm_done(pgt);
61} 64}
62 65
63static void 66static void
@@ -66,86 +69,81 @@ nv04_vm_flush(struct nvkm_vm *vm)
66} 69}
67 70
68/******************************************************************************* 71/*******************************************************************************
69 * VM object
70 ******************************************************************************/
71
72int
73nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart,
74 struct nvkm_vm **pvm)
75{
76 return -EINVAL;
77}
78
79/*******************************************************************************
80 * MMU subdev 72 * MMU subdev
81 ******************************************************************************/ 73 ******************************************************************************/
82 74
83static int 75static int
84nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 76nv04_mmu_oneinit(struct nvkm_mmu *base)
85 struct nvkm_oclass *oclass, void *data, u32 size,
86 struct nvkm_object **pobject)
87{ 77{
88 struct nv04_mmu_priv *priv; 78 struct nv04_mmu *mmu = nv04_mmu(base);
89 struct nvkm_gpuobj *dma; 79 struct nvkm_device *device = mmu->base.subdev.device;
80 struct nvkm_memory *dma;
90 int ret; 81 int ret;
91 82
92 ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART", 83 ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
93 "pcigart", &priv); 84 &mmu->vm);
94 *pobject = nv_object(priv);
95 if (ret)
96 return ret;
97
98 priv->base.create = nv04_vm_create;
99 priv->base.limit = NV04_PDMA_SIZE;
100 priv->base.dma_bits = 32;
101 priv->base.pgt_bits = 32 - 12;
102 priv->base.spg_shift = 12;
103 priv->base.lpg_shift = 12;
104 priv->base.map_sg = nv04_vm_map_sg;
105 priv->base.unmap = nv04_vm_unmap;
106 priv->base.flush = nv04_vm_flush;
107
108 ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
109 &priv->vm);
110 if (ret) 85 if (ret)
111 return ret; 86 return ret;
112 87
113 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 88 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
114 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8, 89 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
115 16, NVOBJ_FLAG_ZERO_ALLOC, 90 16, true, &dma);
116 &priv->vm->pgt[0].obj[0]); 91 mmu->vm->pgt[0].mem[0] = dma;
117 dma = priv->vm->pgt[0].obj[0]; 92 mmu->vm->pgt[0].refcount[0] = 1;
118 priv->vm->pgt[0].refcount[0] = 1;
119 if (ret) 93 if (ret)
120 return ret; 94 return ret;
121 95
122 nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */ 96 nvkm_kmap(dma);
123 nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1); 97 nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
98 nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
99 nvkm_done(dma);
124 return 0; 100 return 0;
125} 101}
126 102
127void 103void *
128nv04_mmu_dtor(struct nvkm_object *object) 104nv04_mmu_dtor(struct nvkm_mmu *base)
129{ 105{
130 struct nv04_mmu_priv *priv = (void *)object; 106 struct nv04_mmu *mmu = nv04_mmu(base);
131 if (priv->vm) { 107 struct nvkm_device *device = mmu->base.subdev.device;
132 nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]); 108 if (mmu->vm) {
133 nvkm_vm_ref(NULL, &priv->vm, NULL); 109 nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
110 nvkm_vm_ref(NULL, &mmu->vm, NULL);
134 } 111 }
135 if (priv->nullp) { 112 if (mmu->nullp) {
136 pci_free_consistent(nv_device(priv)->pdev, 16 * 1024, 113 dma_free_coherent(device->dev, 16 * 1024,
137 priv->nullp, priv->null); 114 mmu->nullp, mmu->null);
138 } 115 }
139 nvkm_mmu_destroy(&priv->base); 116 return mmu;
117}
118
119int
120nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
121 int index, struct nvkm_mmu **pmmu)
122{
123 struct nv04_mmu *mmu;
124 if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
125 return -ENOMEM;
126 *pmmu = &mmu->base;
127 nvkm_mmu_ctor(func, device, index, &mmu->base);
128 return 0;
140} 129}
141 130
142struct nvkm_oclass 131const struct nvkm_mmu_func
143nv04_mmu_oclass = { 132nv04_mmu = {
144 .handle = NV_SUBDEV(MMU, 0x04), 133 .oneinit = nv04_mmu_oneinit,
145 .ofuncs = &(struct nvkm_ofuncs) { 134 .dtor = nv04_mmu_dtor,
146 .ctor = nv04_mmu_ctor, 135 .limit = NV04_PDMA_SIZE,
147 .dtor = nv04_mmu_dtor, 136 .dma_bits = 32,
148 .init = _nvkm_mmu_init, 137 .pgt_bits = 32 - 12,
149 .fini = _nvkm_mmu_fini, 138 .spg_shift = 12,
150 }, 139 .lpg_shift = 12,
140 .map_sg = nv04_vm_map_sg,
141 .unmap = nv04_vm_unmap,
142 .flush = nv04_vm_flush,
151}; 143};
144
145int
146nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
147{
148 return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
149}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
index 7bf6f4b38f1d..363e33b296d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
@@ -1,19 +1,18 @@
1#ifndef __NV04_MMU_PRIV__ 1#ifndef __NV04_MMU_PRIV__
2#define __NV04_MMU_PRIV__ 2#define __NV04_MMU_PRIV__
3#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
4#include "priv.h"
3 5
4#include <subdev/mmu.h> 6struct nv04_mmu {
5
6struct nv04_mmu_priv {
7 struct nvkm_mmu base; 7 struct nvkm_mmu base;
8 struct nvkm_vm *vm; 8 struct nvkm_vm *vm;
9 dma_addr_t null; 9 dma_addr_t null;
10 void *nullp; 10 void *nullp;
11}; 11};
12 12
13static inline struct nv04_mmu_priv * 13int nv04_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
14nv04_mmu(void *obj) 14 int index, struct nvkm_mmu **);
15{ 15void *nv04_mmu_dtor(struct nvkm_mmu *);
16 return (void *)nvkm_mmu(obj);
17}
18 16
17extern const struct nvkm_mmu_func nv04_mmu;
19#endif 18#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index 61ee3ab11660..c6a26f907009 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -23,7 +23,6 @@
23 */ 23 */
24#include "nv04.h" 24#include "nv04.h"
25 25
26#include <core/device.h>
27#include <core/gpuobj.h> 26#include <core/gpuobj.h>
28#include <core/option.h> 27#include <core/option.h>
29#include <subdev/timer.h> 28#include <subdev/timer.h>
@@ -36,45 +35,50 @@
36 ******************************************************************************/ 35 ******************************************************************************/
37 36
38static void 37static void
39nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, 38nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
40 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 39 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
41{ 40{
42 pte = pte * 4; 41 pte = pte * 4;
42 nvkm_kmap(pgt);
43 while (cnt) { 43 while (cnt) {
44 u32 page = PAGE_SIZE / NV41_GART_PAGE; 44 u32 page = PAGE_SIZE / NV41_GART_PAGE;
45 u64 phys = (u64)*list++; 45 u64 phys = (u64)*list++;
46 while (cnt && page--) { 46 while (cnt && page--) {
47 nv_wo32(pgt, pte, (phys >> 7) | 1); 47 nvkm_wo32(pgt, pte, (phys >> 7) | 1);
48 phys += NV41_GART_PAGE; 48 phys += NV41_GART_PAGE;
49 pte += 4; 49 pte += 4;
50 cnt -= 1; 50 cnt -= 1;
51 } 51 }
52 } 52 }
53 nvkm_done(pgt);
53} 54}
54 55
55static void 56static void
56nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) 57nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
57{ 58{
58 pte = pte * 4; 59 pte = pte * 4;
60 nvkm_kmap(pgt);
59 while (cnt--) { 61 while (cnt--) {
60 nv_wo32(pgt, pte, 0x00000000); 62 nvkm_wo32(pgt, pte, 0x00000000);
61 pte += 4; 63 pte += 4;
62 } 64 }
65 nvkm_done(pgt);
63} 66}
64 67
65static void 68static void
66nv41_vm_flush(struct nvkm_vm *vm) 69nv41_vm_flush(struct nvkm_vm *vm)
67{ 70{
68 struct nv04_mmu_priv *priv = (void *)vm->mmu; 71 struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
69 72 struct nvkm_device *device = mmu->base.subdev.device;
70 mutex_lock(&nv_subdev(priv)->mutex); 73
71 nv_wr32(priv, 0x100810, 0x00000022); 74 mutex_lock(&mmu->base.subdev.mutex);
72 if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) { 75 nvkm_wr32(device, 0x100810, 0x00000022);
73 nv_warn(priv, "flush timeout, 0x%08x\n", 76 nvkm_msec(device, 2000,
74 nv_rd32(priv, 0x100810)); 77 if (nvkm_rd32(device, 0x100810) & 0x00000020)
75 } 78 break;
76 nv_wr32(priv, 0x100810, 0x00000000); 79 );
77 mutex_unlock(&nv_subdev(priv)->mutex); 80 nvkm_wr32(device, 0x100810, 0x00000000);
81 mutex_unlock(&mmu->base.subdev.mutex);
78} 82}
79 83
80/******************************************************************************* 84/*******************************************************************************
@@ -82,76 +86,56 @@ nv41_vm_flush(struct nvkm_vm *vm)
82 ******************************************************************************/ 86 ******************************************************************************/
83 87
84static int 88static int
85nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 89nv41_mmu_oneinit(struct nvkm_mmu *base)
86 struct nvkm_oclass *oclass, void *data, u32 size,
87 struct nvkm_object **pobject)
88{ 90{
89 struct nvkm_device *device = nv_device(parent); 91 struct nv04_mmu *mmu = nv04_mmu(base);
90 struct nv04_mmu_priv *priv; 92 struct nvkm_device *device = mmu->base.subdev.device;
91 int ret; 93 int ret;
92 94
93 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || 95 ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
94 !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) { 96 &mmu->vm);
95 return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
96 data, size, pobject);
97 }
98
99 ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
100 "pciegart", &priv);
101 *pobject = nv_object(priv);
102 if (ret)
103 return ret;
104
105 priv->base.create = nv04_vm_create;
106 priv->base.limit = NV41_GART_SIZE;
107 priv->base.dma_bits = 39;
108 priv->base.pgt_bits = 32 - 12;
109 priv->base.spg_shift = 12;
110 priv->base.lpg_shift = 12;
111 priv->base.map_sg = nv41_vm_map_sg;
112 priv->base.unmap = nv41_vm_unmap;
113 priv->base.flush = nv41_vm_flush;
114
115 ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
116 &priv->vm);
117 if (ret)
118 return ret;
119
120 ret = nvkm_gpuobj_new(nv_object(priv), NULL,
121 (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
122 NVOBJ_FLAG_ZERO_ALLOC,
123 &priv->vm->pgt[0].obj[0]);
124 priv->vm->pgt[0].refcount[0] = 1;
125 if (ret) 97 if (ret)
126 return ret; 98 return ret;
127 99
128 return 0; 100 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
101 (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
102 &mmu->vm->pgt[0].mem[0]);
103 mmu->vm->pgt[0].refcount[0] = 1;
104 return ret;
129} 105}
130 106
131static int 107static void
132nv41_mmu_init(struct nvkm_object *object) 108nv41_mmu_init(struct nvkm_mmu *base)
133{ 109{
134 struct nv04_mmu_priv *priv = (void *)object; 110 struct nv04_mmu *mmu = nv04_mmu(base);
135 struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0]; 111 struct nvkm_device *device = mmu->base.subdev.device;
136 int ret; 112 struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
137 113 nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
138 ret = nvkm_mmu_init(&priv->base); 114 nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
139 if (ret) 115 nvkm_wr32(device, 0x100820, 0x00000000);
140 return ret;
141
142 nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
143 nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
144 nv_wr32(priv, 0x100820, 0x00000000);
145 return 0;
146} 116}
147 117
148struct nvkm_oclass 118static const struct nvkm_mmu_func
149nv41_mmu_oclass = { 119nv41_mmu = {
150 .handle = NV_SUBDEV(MMU, 0x41), 120 .dtor = nv04_mmu_dtor,
151 .ofuncs = &(struct nvkm_ofuncs) { 121 .oneinit = nv41_mmu_oneinit,
152 .ctor = nv41_mmu_ctor, 122 .init = nv41_mmu_init,
153 .dtor = nv04_mmu_dtor, 123 .limit = NV41_GART_SIZE,
154 .init = nv41_mmu_init, 124 .dma_bits = 39,
155 .fini = _nvkm_mmu_fini, 125 .pgt_bits = 32 - 12,
156 }, 126 .spg_shift = 12,
127 .lpg_shift = 12,
128 .map_sg = nv41_vm_map_sg,
129 .unmap = nv41_vm_unmap,
130 .flush = nv41_vm_flush,
157}; 131};
132
133int
134nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
135{
136 if (device->type == NVKM_DEVICE_AGP ||
137 !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
138 return nv04_mmu_new(device, index, pmmu);
139
140 return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
141}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index b90ded1887aa..a648c2395545 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -23,7 +23,6 @@
23 */ 23 */
24#include "nv04.h" 24#include "nv04.h"
25 25
26#include <core/device.h>
27#include <core/gpuobj.h> 26#include <core/gpuobj.h>
28#include <core/option.h> 27#include <core/option.h>
29#include <subdev/timer.h> 28#include <subdev/timer.h>
@@ -36,16 +35,16 @@
36 ******************************************************************************/ 35 ******************************************************************************/
37 36
38static void 37static void
39nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null, 38nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
40 dma_addr_t *list, u32 pte, u32 cnt) 39 dma_addr_t *list, u32 pte, u32 cnt)
41{ 40{
42 u32 base = (pte << 2) & ~0x0000000f; 41 u32 base = (pte << 2) & ~0x0000000f;
43 u32 tmp[4]; 42 u32 tmp[4];
44 43
45 tmp[0] = nv_ro32(pgt, base + 0x0); 44 tmp[0] = nvkm_ro32(pgt, base + 0x0);
46 tmp[1] = nv_ro32(pgt, base + 0x4); 45 tmp[1] = nvkm_ro32(pgt, base + 0x4);
47 tmp[2] = nv_ro32(pgt, base + 0x8); 46 tmp[2] = nvkm_ro32(pgt, base + 0x8);
48 tmp[3] = nv_ro32(pgt, base + 0xc); 47 tmp[3] = nvkm_ro32(pgt, base + 0xc);
49 48
50 while (cnt--) { 49 while (cnt--) {
51 u32 addr = list ? (*list++ >> 12) : (null >> 12); 50 u32 addr = list ? (*list++ >> 12) : (null >> 12);
@@ -75,24 +74,25 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
75 } 74 }
76 } 75 }
77 76
78 nv_wo32(pgt, base + 0x0, tmp[0]); 77 nvkm_wo32(pgt, base + 0x0, tmp[0]);
79 nv_wo32(pgt, base + 0x4, tmp[1]); 78 nvkm_wo32(pgt, base + 0x4, tmp[1]);
80 nv_wo32(pgt, base + 0x8, tmp[2]); 79 nvkm_wo32(pgt, base + 0x8, tmp[2]);
81 nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000); 80 nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
82} 81}
83 82
84static void 83static void
85nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, 84nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
86 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 85 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
87{ 86{
88 struct nv04_mmu_priv *priv = (void *)vma->vm->mmu; 87 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
89 u32 tmp[4]; 88 u32 tmp[4];
90 int i; 89 int i;
91 90
91 nvkm_kmap(pgt);
92 if (pte & 3) { 92 if (pte & 3) {
93 u32 max = 4 - (pte & 3); 93 u32 max = 4 - (pte & 3);
94 u32 part = (cnt > max) ? max : cnt; 94 u32 part = (cnt > max) ? max : cnt;
95 nv44_vm_fill(pgt, priv->null, list, pte, part); 95 nv44_vm_fill(pgt, mmu->null, list, pte, part);
96 pte += part; 96 pte += part;
97 list += part; 97 list += part;
98 cnt -= part; 98 cnt -= part;
@@ -101,51 +101,57 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
101 while (cnt >= 4) { 101 while (cnt >= 4) {
102 for (i = 0; i < 4; i++) 102 for (i = 0; i < 4; i++)
103 tmp[i] = *list++ >> 12; 103 tmp[i] = *list++ >> 12;
104 nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27); 104 nvkm_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
105 nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22); 105 nvkm_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
106 nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17); 106 nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
107 nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000); 107 nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
108 cnt -= 4; 108 cnt -= 4;
109 } 109 }
110 110
111 if (cnt) 111 if (cnt)
112 nv44_vm_fill(pgt, priv->null, list, pte, cnt); 112 nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
113 nvkm_done(pgt);
113} 114}
114 115
115static void 116static void
116nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) 117nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
117{ 118{
118 struct nv04_mmu_priv *priv = (void *)nvkm_mmu(pgt); 119 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
119 120
121 nvkm_kmap(pgt);
120 if (pte & 3) { 122 if (pte & 3) {
121 u32 max = 4 - (pte & 3); 123 u32 max = 4 - (pte & 3);
122 u32 part = (cnt > max) ? max : cnt; 124 u32 part = (cnt > max) ? max : cnt;
123 nv44_vm_fill(pgt, priv->null, NULL, pte, part); 125 nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
124 pte += part; 126 pte += part;
125 cnt -= part; 127 cnt -= part;
126 } 128 }
127 129
128 while (cnt >= 4) { 130 while (cnt >= 4) {
129 nv_wo32(pgt, pte++ * 4, 0x00000000); 131 nvkm_wo32(pgt, pte++ * 4, 0x00000000);
130 nv_wo32(pgt, pte++ * 4, 0x00000000); 132 nvkm_wo32(pgt, pte++ * 4, 0x00000000);
131 nv_wo32(pgt, pte++ * 4, 0x00000000); 133 nvkm_wo32(pgt, pte++ * 4, 0x00000000);
132 nv_wo32(pgt, pte++ * 4, 0x00000000); 134 nvkm_wo32(pgt, pte++ * 4, 0x00000000);
133 cnt -= 4; 135 cnt -= 4;
134 } 136 }
135 137
136 if (cnt) 138 if (cnt)
137 nv44_vm_fill(pgt, priv->null, NULL, pte, cnt); 139 nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
140 nvkm_done(pgt);
138} 141}
139 142
140static void 143static void
141nv44_vm_flush(struct nvkm_vm *vm) 144nv44_vm_flush(struct nvkm_vm *vm)
142{ 145{
143 struct nv04_mmu_priv *priv = (void *)vm->mmu; 146 struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
144 nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE); 147 struct nvkm_device *device = mmu->base.subdev.device;
145 nv_wr32(priv, 0x100808, 0x00000020); 148 nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
146 if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001)) 149 nvkm_wr32(device, 0x100808, 0x00000020);
147 nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808)); 150 nvkm_msec(device, 2000,
148 nv_wr32(priv, 0x100808, 0x00000000); 151 if (nvkm_rd32(device, 0x100808) & 0x00000001)
152 break;
153 );
154 nvkm_wr32(device, 0x100808, 0x00000000);
149} 155}
150 156
151/******************************************************************************* 157/*******************************************************************************
@@ -153,95 +159,78 @@ nv44_vm_flush(struct nvkm_vm *vm)
153 ******************************************************************************/ 159 ******************************************************************************/
154 160
155static int 161static int
156nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 162nv44_mmu_oneinit(struct nvkm_mmu *base)
157 struct nvkm_oclass *oclass, void *data, u32 size,
158 struct nvkm_object **pobject)
159{ 163{
160 struct nvkm_device *device = nv_device(parent); 164 struct nv04_mmu *mmu = nv04_mmu(base);
161 struct nv04_mmu_priv *priv; 165 struct nvkm_device *device = mmu->base.subdev.device;
162 int ret; 166 int ret;
163 167
164 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || 168 mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
165 !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) { 169 &mmu->null, GFP_KERNEL);
166 return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass, 170 if (!mmu->nullp) {
167 data, size, pobject); 171 nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
168 } 172 mmu->null = 0;
169
170 ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
171 "pciegart", &priv);
172 *pobject = nv_object(priv);
173 if (ret)
174 return ret;
175
176 priv->base.create = nv04_vm_create;
177 priv->base.limit = NV44_GART_SIZE;
178 priv->base.dma_bits = 39;
179 priv->base.pgt_bits = 32 - 12;
180 priv->base.spg_shift = 12;
181 priv->base.lpg_shift = 12;
182 priv->base.map_sg = nv44_vm_map_sg;
183 priv->base.unmap = nv44_vm_unmap;
184 priv->base.flush = nv44_vm_flush;
185
186 priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
187 if (!priv->nullp) {
188 nv_error(priv, "unable to allocate dummy pages\n");
189 return -ENOMEM;
190 } 173 }
191 174
192 ret = nvkm_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096, 175 ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
193 &priv->vm); 176 &mmu->vm);
194 if (ret) 177 if (ret)
195 return ret; 178 return ret;
196 179
197 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 180 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
198 (NV44_GART_SIZE / NV44_GART_PAGE) * 4, 181 (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
199 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC, 182 512 * 1024, true,
200 &priv->vm->pgt[0].obj[0]); 183 &mmu->vm->pgt[0].mem[0]);
201 priv->vm->pgt[0].refcount[0] = 1; 184 mmu->vm->pgt[0].refcount[0] = 1;
202 if (ret) 185 return ret;
203 return ret;
204
205 return 0;
206} 186}
207 187
208static int 188static void
209nv44_mmu_init(struct nvkm_object *object) 189nv44_mmu_init(struct nvkm_mmu *base)
210{ 190{
211 struct nv04_mmu_priv *priv = (void *)object; 191 struct nv04_mmu *mmu = nv04_mmu(base);
212 struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0]; 192 struct nvkm_device *device = mmu->base.subdev.device;
193 struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
213 u32 addr; 194 u32 addr;
214 int ret;
215
216 ret = nvkm_mmu_init(&priv->base);
217 if (ret)
218 return ret;
219 195
220 /* calculate vram address of this PRAMIN block, object must be 196 /* calculate vram address of this PRAMIN block, object must be
221 * allocated on 512KiB alignment, and not exceed a total size 197 * allocated on 512KiB alignment, and not exceed a total size
222 * of 512KiB for this to work correctly 198 * of 512KiB for this to work correctly
223 */ 199 */
224 addr = nv_rd32(priv, 0x10020c); 200 addr = nvkm_rd32(device, 0x10020c);
225 addr -= ((gart->addr >> 19) + 1) << 19; 201 addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
226 202
227 nv_wr32(priv, 0x100850, 0x80000000); 203 nvkm_wr32(device, 0x100850, 0x80000000);
228 nv_wr32(priv, 0x100818, priv->null); 204 nvkm_wr32(device, 0x100818, mmu->null);
229 nv_wr32(priv, 0x100804, NV44_GART_SIZE); 205 nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
230 nv_wr32(priv, 0x100850, 0x00008000); 206 nvkm_wr32(device, 0x100850, 0x00008000);
231 nv_mask(priv, 0x10008c, 0x00000200, 0x00000200); 207 nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
232 nv_wr32(priv, 0x100820, 0x00000000); 208 nvkm_wr32(device, 0x100820, 0x00000000);
233 nv_wr32(priv, 0x10082c, 0x00000001); 209 nvkm_wr32(device, 0x10082c, 0x00000001);
234 nv_wr32(priv, 0x100800, addr | 0x00000010); 210 nvkm_wr32(device, 0x100800, addr | 0x00000010);
235 return 0;
236} 211}
237 212
238struct nvkm_oclass 213static const struct nvkm_mmu_func
239nv44_mmu_oclass = { 214nv44_mmu = {
240 .handle = NV_SUBDEV(MMU, 0x44), 215 .dtor = nv04_mmu_dtor,
241 .ofuncs = &(struct nvkm_ofuncs) { 216 .oneinit = nv44_mmu_oneinit,
242 .ctor = nv44_mmu_ctor, 217 .init = nv44_mmu_init,
243 .dtor = nv04_mmu_dtor, 218 .limit = NV44_GART_SIZE,
244 .init = nv44_mmu_init, 219 .dma_bits = 39,
245 .fini = _nvkm_mmu_fini, 220 .pgt_bits = 32 - 12,
246 }, 221 .spg_shift = 12,
222 .lpg_shift = 12,
223 .map_sg = nv44_vm_map_sg,
224 .unmap = nv44_vm_unmap,
225 .flush = nv44_vm_flush,
247}; 226};
227
228int
229nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
230{
231 if (device->type == NVKM_DEVICE_AGP ||
232 !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
233 return nv04_mmu_new(device, index, pmmu);
234
235 return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
236}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index b83550fa7f96..a1f8d65f0276 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,31 +21,28 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/mmu.h> 24#include "priv.h"
25#include <subdev/bar.h>
26#include <subdev/fb.h>
27#include <subdev/timer.h>
28 25
29#include <core/engine.h>
30#include <core/gpuobj.h> 26#include <core/gpuobj.h>
31 27#include <subdev/fb.h>
32struct nv50_mmu_priv { 28#include <subdev/timer.h>
33 struct nvkm_mmu base; 29#include <engine/gr.h>
34};
35 30
36static void 31static void
37nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2]) 32nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
38{ 33{
39 u64 phys = 0xdeadcafe00000000ULL; 34 u64 phys = 0xdeadcafe00000000ULL;
40 u32 coverage = 0; 35 u32 coverage = 0;
41 36
42 if (pgt[0]) { 37 if (pgt[0]) {
43 phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */ 38 /* present, 4KiB pages */
44 coverage = (pgt[0]->size >> 3) << 12; 39 phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
40 coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
45 } else 41 } else
46 if (pgt[1]) { 42 if (pgt[1]) {
47 phys = 0x00000001 | pgt[1]->addr; /* present */ 43 /* present, 64KiB pages */
48 coverage = (pgt[1]->size >> 3) << 16; 44 phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
45 coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
49 } 46 }
50 47
51 if (phys & 1) { 48 if (phys & 1) {
@@ -57,8 +54,10 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
57 phys |= 0x20; 54 phys |= 0x20;
58 } 55 }
59 56
60 nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); 57 nvkm_kmap(pgd);
61 nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); 58 nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
59 nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
60 nvkm_done(pgd);
62} 61}
63 62
64static inline u64 63static inline u64
@@ -75,17 +74,18 @@ vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
75} 74}
76 75
77static void 76static void
78nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, 77nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
79 struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 78 struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
80{ 79{
80 struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram;
81 u32 comp = (mem->memtype & 0x180) >> 7; 81 u32 comp = (mem->memtype & 0x180) >> 7;
82 u32 block, target; 82 u32 block, target;
83 int i; 83 int i;
84 84
85 /* IGPs don't have real VRAM, re-target to stolen system memory */ 85 /* IGPs don't have real VRAM, re-target to stolen system memory */
86 target = 0; 86 target = 0;
87 if (nvkm_fb(vma->vm->mmu)->ram->stolen) { 87 if (ram->stolen) {
88 phys += nvkm_fb(vma->vm->mmu)->ram->stolen; 88 phys += ram->stolen;
89 target = 3; 89 target = 3;
90 } 90 }
91 91
@@ -93,6 +93,7 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
93 pte <<= 3; 93 pte <<= 3;
94 cnt <<= 3; 94 cnt <<= 3;
95 95
96 nvkm_kmap(pgt);
96 while (cnt) { 97 while (cnt) {
97 u32 offset_h = upper_32_bits(phys); 98 u32 offset_h = upper_32_bits(phys);
98 u32 offset_l = lower_32_bits(phys); 99 u32 offset_l = lower_32_bits(phys);
@@ -113,129 +114,118 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
113 } 114 }
114 115
115 while (block) { 116 while (block) {
116 nv_wo32(pgt, pte + 0, offset_l); 117 nvkm_wo32(pgt, pte + 0, offset_l);
117 nv_wo32(pgt, pte + 4, offset_h); 118 nvkm_wo32(pgt, pte + 4, offset_h);
118 pte += 8; 119 pte += 8;
119 block -= 8; 120 block -= 8;
120 } 121 }
121 } 122 }
123 nvkm_done(pgt);
122} 124}
123 125
124static void 126static void
125nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, 127nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
126 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 128 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
127{ 129{
128 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; 130 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
129 pte <<= 3; 131 pte <<= 3;
132 nvkm_kmap(pgt);
130 while (cnt--) { 133 while (cnt--) {
131 u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target); 134 u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
132 nv_wo32(pgt, pte + 0, lower_32_bits(phys)); 135 nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
133 nv_wo32(pgt, pte + 4, upper_32_bits(phys)); 136 nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
134 pte += 8; 137 pte += 8;
135 } 138 }
139 nvkm_done(pgt);
136} 140}
137 141
138static void 142static void
139nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) 143nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
140{ 144{
141 pte <<= 3; 145 pte <<= 3;
146 nvkm_kmap(pgt);
142 while (cnt--) { 147 while (cnt--) {
143 nv_wo32(pgt, pte + 0, 0x00000000); 148 nvkm_wo32(pgt, pte + 0, 0x00000000);
144 nv_wo32(pgt, pte + 4, 0x00000000); 149 nvkm_wo32(pgt, pte + 4, 0x00000000);
145 pte += 8; 150 pte += 8;
146 } 151 }
152 nvkm_done(pgt);
147} 153}
148 154
149static void 155static void
150nv50_vm_flush(struct nvkm_vm *vm) 156nv50_vm_flush(struct nvkm_vm *vm)
151{ 157{
152 struct nv50_mmu_priv *priv = (void *)vm->mmu; 158 struct nvkm_mmu *mmu = vm->mmu;
153 struct nvkm_bar *bar = nvkm_bar(priv); 159 struct nvkm_subdev *subdev = &mmu->subdev;
154 struct nvkm_engine *engine; 160 struct nvkm_device *device = subdev->device;
155 int i, vme; 161 int i, vme;
156 162
157 bar->flush(bar); 163 mutex_lock(&subdev->mutex);
158 164 for (i = 0; i < NVKM_SUBDEV_NR; i++) {
159 mutex_lock(&nv_subdev(priv)->mutex);
160 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
161 if (!atomic_read(&vm->engref[i])) 165 if (!atomic_read(&vm->engref[i]))
162 continue; 166 continue;
163 167
164 /* unfortunate hw bug workaround... */ 168 /* unfortunate hw bug workaround... */
165 engine = nvkm_engine(priv, i); 169 if (i == NVKM_ENGINE_GR && device->gr) {
166 if (engine && engine->tlb_flush) { 170 int ret = nvkm_gr_tlb_flush(device->gr);
167 engine->tlb_flush(engine); 171 if (ret != -ENODEV)
168 continue; 172 continue;
169 } 173 }
170 174
171 switch (i) { 175 switch (i) {
172 case NVDEV_ENGINE_GR : vme = 0x00; break; 176 case NVKM_ENGINE_GR : vme = 0x00; break;
173 case NVDEV_ENGINE_VP : 177 case NVKM_ENGINE_VP :
174 case NVDEV_ENGINE_MSPDEC: vme = 0x01; break; 178 case NVKM_ENGINE_MSPDEC: vme = 0x01; break;
175 case NVDEV_SUBDEV_BAR : vme = 0x06; break; 179 case NVKM_SUBDEV_BAR : vme = 0x06; break;
176 case NVDEV_ENGINE_MSPPP : 180 case NVKM_ENGINE_MSPPP :
177 case NVDEV_ENGINE_MPEG : vme = 0x08; break; 181 case NVKM_ENGINE_MPEG : vme = 0x08; break;
178 case NVDEV_ENGINE_BSP : 182 case NVKM_ENGINE_BSP :
179 case NVDEV_ENGINE_MSVLD : vme = 0x09; break; 183 case NVKM_ENGINE_MSVLD : vme = 0x09; break;
180 case NVDEV_ENGINE_CIPHER: 184 case NVKM_ENGINE_CIPHER:
181 case NVDEV_ENGINE_SEC : vme = 0x0a; break; 185 case NVKM_ENGINE_SEC : vme = 0x0a; break;
182 case NVDEV_ENGINE_CE0 : vme = 0x0d; break; 186 case NVKM_ENGINE_CE0 : vme = 0x0d; break;
183 default: 187 default:
184 continue; 188 continue;
185 } 189 }
186 190
187 nv_wr32(priv, 0x100c80, (vme << 16) | 1); 191 nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
188 if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) 192 if (nvkm_msec(device, 2000,
189 nv_error(priv, "vm flush timeout: engine %d\n", vme); 193 if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
194 break;
195 ) < 0)
196 nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
190 } 197 }
191 mutex_unlock(&nv_subdev(priv)->mutex); 198 mutex_unlock(&subdev->mutex);
192} 199}
193 200
194static int 201static int
195nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, 202nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
196 u64 mm_offset, struct nvkm_vm **pvm) 203 struct lock_class_key *key, struct nvkm_vm **pvm)
197{ 204{
198 u32 block = (1 << (mmu->pgt_bits + 12)); 205 u32 block = (1 << (mmu->func->pgt_bits + 12));
199 if (block > length) 206 if (block > length)
200 block = length; 207 block = length;
201 208
202 return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm); 209 return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
203} 210}
204 211
205static int 212static const struct nvkm_mmu_func
206nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 213nv50_mmu = {
207 struct nvkm_oclass *oclass, void *data, u32 size, 214 .limit = (1ULL << 40),
208 struct nvkm_object **pobject) 215 .dma_bits = 40,
216 .pgt_bits = 29 - 12,
217 .spg_shift = 12,
218 .lpg_shift = 16,
219 .create = nv50_vm_create,
220 .map_pgt = nv50_vm_map_pgt,
221 .map = nv50_vm_map,
222 .map_sg = nv50_vm_map_sg,
223 .unmap = nv50_vm_unmap,
224 .flush = nv50_vm_flush,
225};
226
227int
228nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
209{ 229{
210 struct nv50_mmu_priv *priv; 230 return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu);
211 int ret;
212
213 ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
214 *pobject = nv_object(priv);
215 if (ret)
216 return ret;
217
218 priv->base.limit = 1ULL << 40;
219 priv->base.dma_bits = 40;
220 priv->base.pgt_bits = 29 - 12;
221 priv->base.spg_shift = 12;
222 priv->base.lpg_shift = 16;
223 priv->base.create = nv50_vm_create;
224 priv->base.map_pgt = nv50_vm_map_pgt;
225 priv->base.map = nv50_vm_map;
226 priv->base.map_sg = nv50_vm_map_sg;
227 priv->base.unmap = nv50_vm_unmap;
228 priv->base.flush = nv50_vm_flush;
229 return 0;
230} 231}
231
232struct nvkm_oclass
233nv50_mmu_oclass = {
234 .handle = NV_SUBDEV(MMU, 0x50),
235 .ofuncs = &(struct nvkm_ofuncs) {
236 .ctor = nv50_mmu_ctor,
237 .dtor = _nvkm_mmu_dtor,
238 .init = _nvkm_mmu_init,
239 .fini = _nvkm_mmu_fini,
240 },
241};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
new file mode 100644
index 000000000000..27cedc60b507
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -0,0 +1,39 @@
1#ifndef __NVKM_MMU_PRIV_H__
2#define __NVKM_MMU_PRIV_H__
3#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
4#include <subdev/mmu.h>
5
6void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
7 int index, struct nvkm_mmu *);
8int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
9 int index, struct nvkm_mmu **);
10
11struct nvkm_mmu_func {
12 void *(*dtor)(struct nvkm_mmu *);
13 int (*oneinit)(struct nvkm_mmu *);
14 void (*init)(struct nvkm_mmu *);
15
16 u64 limit;
17 u8 dma_bits;
18 u32 pgt_bits;
19 u8 spg_shift;
20 u8 lpg_shift;
21
22 int (*create)(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
23 struct lock_class_key *, struct nvkm_vm **);
24
25 void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
26 struct nvkm_memory *pgt[2]);
27 void (*map)(struct nvkm_vma *, struct nvkm_memory *,
28 struct nvkm_mem *, u32 pte, u32 cnt,
29 u64 phys, u64 delta);
30 void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
31 struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
32 void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
33 u32 pte, u32 cnt);
34 void (*flush)(struct nvkm_vm *);
35};
36
37int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
38 struct lock_class_key *, struct nvkm_vm **);
39#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 0ca9dcabb6d3..9700a7625012 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -23,14 +23,13 @@
23 */ 23 */
24#include "mxms.h" 24#include "mxms.h"
25 25
26#include <core/device.h>
27#include <core/option.h> 26#include <core/option.h>
28#include <subdev/bios.h> 27#include <subdev/bios.h>
29#include <subdev/bios/mxm.h> 28#include <subdev/bios/mxm.h>
30#include <subdev/i2c.h> 29#include <subdev/i2c.h>
31 30
32static bool 31static bool
33mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr, 32mxm_shadow_rom_fetch(struct nvkm_i2c_bus *bus, u8 addr,
34 u8 offset, u8 size, u8 *data) 33 u8 offset, u8 size, u8 *data)
35{ 34{
36 struct i2c_msg msgs[] = { 35 struct i2c_msg msgs[] = {
@@ -38,27 +37,28 @@ mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr,
38 { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, }, 37 { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
39 }; 38 };
40 39
41 return i2c_transfer(&i2c->adapter, msgs, 2) == 2; 40 return i2c_transfer(&bus->i2c, msgs, 2) == 2;
42} 41}
43 42
44static bool 43static bool
45mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version) 44mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
46{ 45{
47 struct nvkm_bios *bios = nvkm_bios(mxm); 46 struct nvkm_device *device = mxm->subdev.device;
48 struct nvkm_i2c *i2c = nvkm_i2c(mxm); 47 struct nvkm_bios *bios = device->bios;
49 struct nvkm_i2c_port *port = NULL; 48 struct nvkm_i2c *i2c = device->i2c;
49 struct nvkm_i2c_bus *bus = NULL;
50 u8 i2cidx, mxms[6], addr, size; 50 u8 i2cidx, mxms[6], addr, size;
51 51
52 i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f; 52 i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
53 if (i2cidx < 0x0f) 53 if (i2cidx < 0x0f)
54 port = i2c->find(i2c, i2cidx); 54 bus = nvkm_i2c_bus_find(i2c, i2cidx);
55 if (!port) 55 if (!bus)
56 return false; 56 return false;
57 57
58 addr = 0x54; 58 addr = 0x54;
59 if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) { 59 if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms)) {
60 addr = 0x56; 60 addr = 0x56;
61 if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) 61 if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms))
62 return false; 62 return false;
63 } 63 }
64 64
@@ -67,7 +67,7 @@ mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
67 mxm->mxms = kmalloc(size, GFP_KERNEL); 67 mxm->mxms = kmalloc(size, GFP_KERNEL);
68 68
69 if (mxm->mxms && 69 if (mxm->mxms &&
70 mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms)) 70 mxm_shadow_rom_fetch(bus, addr, 0, size, mxm->mxms))
71 return true; 71 return true;
72 72
73 kfree(mxm->mxms); 73 kfree(mxm->mxms);
@@ -79,7 +79,8 @@ mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
79static bool 79static bool
80mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version) 80mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
81{ 81{
82 struct nvkm_device *device = nv_device(mxm); 82 struct nvkm_subdev *subdev = &mxm->subdev;
83 struct nvkm_device *device = subdev->device;
83 static char muid[] = { 84 static char muid[] = {
84 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C, 85 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
85 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65 86 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
@@ -94,7 +95,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
94 acpi_handle handle; 95 acpi_handle handle;
95 int rev; 96 int rev;
96 97
97 handle = ACPI_HANDLE(nv_device_base(device)); 98 handle = ACPI_HANDLE(device->dev);
98 if (!handle) 99 if (!handle)
99 return false; 100 return false;
100 101
@@ -106,7 +107,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
106 rev = (version & 0xf0) << 4 | (version & 0x0f); 107 rev = (version & 0xf0) << 4 | (version & 0x0f);
107 obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4); 108 obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
108 if (!obj) { 109 if (!obj) {
109 nv_debug(mxm, "DSM MXMS failed\n"); 110 nvkm_debug(subdev, "DSM MXMS failed\n");
110 return false; 111 return false;
111 } 112 }
112 113
@@ -114,7 +115,8 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
114 mxm->mxms = kmemdup(obj->buffer.pointer, 115 mxm->mxms = kmemdup(obj->buffer.pointer,
115 obj->buffer.length, GFP_KERNEL); 116 obj->buffer.length, GFP_KERNEL);
116 } else if (obj->type == ACPI_TYPE_INTEGER) { 117 } else if (obj->type == ACPI_TYPE_INTEGER) {
117 nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value); 118 nvkm_debug(subdev, "DSM MXMS returned 0x%llx\n",
119 obj->integer.value);
118 } 120 }
119 121
120 ACPI_FREE(obj); 122 ACPI_FREE(obj);
@@ -129,6 +131,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
129static u8 131static u8
130wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version) 132wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
131{ 133{
134 struct nvkm_subdev *subdev = &mxm->subdev;
132 u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 }; 135 u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
133 struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args }; 136 struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
134 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; 137 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -137,18 +140,18 @@ wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
137 140
138 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); 141 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
139 if (ACPI_FAILURE(status)) { 142 if (ACPI_FAILURE(status)) {
140 nv_debug(mxm, "WMMX MXMI returned %d\n", status); 143 nvkm_debug(subdev, "WMMX MXMI returned %d\n", status);
141 return 0x00; 144 return 0x00;
142 } 145 }
143 146
144 obj = retn.pointer; 147 obj = retn.pointer;
145 if (obj->type == ACPI_TYPE_INTEGER) { 148 if (obj->type == ACPI_TYPE_INTEGER) {
146 version = obj->integer.value; 149 version = obj->integer.value;
147 nv_debug(mxm, "WMMX MXMI version %d.%d\n", 150 nvkm_debug(subdev, "WMMX MXMI version %d.%d\n",
148 (version >> 4), version & 0x0f); 151 (version >> 4), version & 0x0f);
149 } else { 152 } else {
150 version = 0; 153 version = 0;
151 nv_debug(mxm, "WMMX MXMI returned non-integer\n"); 154 nvkm_debug(subdev, "WMMX MXMI returned non-integer\n");
152 } 155 }
153 156
154 kfree(obj); 157 kfree(obj);
@@ -158,6 +161,7 @@ wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
158static bool 161static bool
159mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version) 162mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
160{ 163{
164 struct nvkm_subdev *subdev = &mxm->subdev;
161 u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 }; 165 u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
162 struct acpi_buffer args = { sizeof(mxms_args), mxms_args }; 166 struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
163 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; 167 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -165,7 +169,7 @@ mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
165 acpi_status status; 169 acpi_status status;
166 170
167 if (!wmi_has_guid(WMI_WMMX_GUID)) { 171 if (!wmi_has_guid(WMI_WMMX_GUID)) {
168 nv_debug(mxm, "WMMX GUID not found\n"); 172 nvkm_debug(subdev, "WMMX GUID not found\n");
169 return false; 173 return false;
170 } 174 }
171 175
@@ -177,7 +181,7 @@ mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
177 181
178 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); 182 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
179 if (ACPI_FAILURE(status)) { 183 if (ACPI_FAILURE(status)) {
180 nv_debug(mxm, "WMMX MXMS returned %d\n", status); 184 nvkm_debug(subdev, "WMMX MXMS returned %d\n", status);
181 return false; 185 return false;
182 } 186 }
183 187
@@ -211,7 +215,7 @@ mxm_shadow(struct nvkm_mxm *mxm, u8 version)
211{ 215{
212 struct mxm_shadow_h *shadow = _mxm_shadow; 216 struct mxm_shadow_h *shadow = _mxm_shadow;
213 do { 217 do {
214 nv_debug(mxm, "checking %s\n", shadow->name); 218 nvkm_debug(&mxm->subdev, "checking %s\n", shadow->name);
215 if (shadow->exec(mxm, version)) { 219 if (shadow->exec(mxm, version)) {
216 if (mxms_valid(mxm)) 220 if (mxms_valid(mxm))
217 return 0; 221 return 0;
@@ -222,33 +226,33 @@ mxm_shadow(struct nvkm_mxm *mxm, u8 version)
222 return -ENOENT; 226 return -ENOENT;
223} 227}
224 228
229static const struct nvkm_subdev_func
230nvkm_mxm = {
231};
232
225int 233int
226nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine, 234nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
227 struct nvkm_oclass *oclass, int length, void **pobject)
228{ 235{
229 struct nvkm_device *device = nv_device(parent); 236 struct nvkm_bios *bios = device->bios;
230 struct nvkm_bios *bios = nvkm_bios(device);
231 struct nvkm_mxm *mxm; 237 struct nvkm_mxm *mxm;
232 u8 ver, len; 238 u8 ver, len;
233 u16 data; 239 u16 data;
234 int ret;
235 240
236 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm", 241 if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
237 length, pobject); 242 return -ENOMEM;
238 mxm = *pobject; 243
239 if (ret) 244 nvkm_subdev_ctor(&nvkm_mxm, device, index, 0, &mxm->subdev);
240 return ret;
241 245
242 data = mxm_table(bios, &ver, &len); 246 data = mxm_table(bios, &ver, &len);
243 if (!data || !(ver = nv_ro08(bios, data))) { 247 if (!data || !(ver = nvbios_rd08(bios, data))) {
244 nv_debug(mxm, "no VBIOS data, nothing to do\n"); 248 nvkm_debug(&mxm->subdev, "no VBIOS data, nothing to do\n");
245 return 0; 249 return 0;
246 } 250 }
247 251
248 nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f); 252 nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
249 253
250 if (mxm_shadow(mxm, ver)) { 254 if (mxm_shadow(mxm, ver)) {
251 nv_info(mxm, "failed to locate valid SIS\n"); 255 nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
252#if 0 256#if 0
253 /* we should, perhaps, fall back to some kind of limited 257 /* we should, perhaps, fall back to some kind of limited
254 * mode here if the x86 vbios hasn't already done the 258 * mode here if the x86 vbios hasn't already done the
@@ -261,8 +265,8 @@ nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
261#endif 265#endif
262 } 266 }
263 267
264 nv_info(mxm, "MXMS Version %d.%d\n", 268 nvkm_debug(&mxm->subdev, "MXMS Version %d.%d\n",
265 mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff); 269 mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
266 mxms_foreach(mxm, 0, NULL, NULL); 270 mxms_foreach(mxm, 0, NULL, NULL);
267 271
268 if (nvkm_boolopt(device->cfgopt, "NvMXMDCB", true)) 272 if (nvkm_boolopt(device->cfgopt, "NvMXMDCB", true))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index a9b1d63fed58..45a2f8e784f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -47,7 +47,7 @@ mxms_version(struct nvkm_mxm *mxm)
47 break; 47 break;
48 } 48 }
49 49
50 nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]); 50 nvkm_debug(&mxm->subdev, "unknown version %d.%d\n", mxms[4], mxms[5]);
51 return 0x0000; 51 return 0x0000;
52} 52}
53 53
@@ -71,7 +71,7 @@ mxms_checksum(struct nvkm_mxm *mxm)
71 while (size--) 71 while (size--)
72 sum += *mxms++; 72 sum += *mxms++;
73 if (sum) { 73 if (sum) {
74 nv_debug(mxm, "checksum invalid\n"); 74 nvkm_debug(&mxm->subdev, "checksum invalid\n");
75 return false; 75 return false;
76 } 76 }
77 return true; 77 return true;
@@ -82,7 +82,7 @@ mxms_valid(struct nvkm_mxm *mxm)
82{ 82{
83 u8 *mxms = mxms_data(mxm); 83 u8 *mxms = mxms_data(mxm);
84 if (*(u32 *)mxms != 0x5f4d584d) { 84 if (*(u32 *)mxms != 0x5f4d584d) {
85 nv_debug(mxm, "signature invalid\n"); 85 nvkm_debug(&mxm->subdev, "signature invalid\n");
86 return false; 86 return false;
87 } 87 }
88 88
@@ -96,6 +96,7 @@ bool
96mxms_foreach(struct nvkm_mxm *mxm, u8 types, 96mxms_foreach(struct nvkm_mxm *mxm, u8 types,
97 bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info) 97 bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info)
98{ 98{
99 struct nvkm_subdev *subdev = &mxm->subdev;
99 u8 *mxms = mxms_data(mxm); 100 u8 *mxms = mxms_data(mxm);
100 u8 *desc = mxms + mxms_headerlen(mxm); 101 u8 *desc = mxms + mxms_headerlen(mxm);
101 u8 *fini = desc + mxms_structlen(mxm) - 1; 102 u8 *fini = desc + mxms_structlen(mxm) - 1;
@@ -140,29 +141,28 @@ mxms_foreach(struct nvkm_mxm *mxm, u8 types,
140 entries = desc[1] & 0x07; 141 entries = desc[1] & 0x07;
141 break; 142 break;
142 default: 143 default:
143 nv_debug(mxm, "unknown descriptor type %d\n", type); 144 nvkm_debug(subdev, "unknown descriptor type %d\n", type);
144 return false; 145 return false;
145 } 146 }
146 147
147 if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) { 148 if (mxm->subdev.debug >= NV_DBG_DEBUG && (exec == NULL)) {
148 static const char * mxms_desc_name[] = { 149 static const char * mxms_desc[] = {
149 "ODS", "SCCS", "TS", "IPS", 150 "ODS", "SCCS", "TS", "IPS",
150 "GSD", "VSS", "BCS", "FCS", 151 "GSD", "VSS", "BCS", "FCS",
151 }; 152 };
152 u8 *dump = desc; 153 u8 *dump = desc;
154 char data[32], *ptr;
153 int i, j; 155 int i, j;
154 156
155 nv_debug(mxm, "%4s: ", mxms_desc_name[type]); 157 for (j = headerlen - 1, ptr = data; j >= 0; j--)
156 for (j = headerlen - 1; j >= 0; j--) 158 ptr += sprintf(ptr, "%02x", dump[j]);
157 pr_cont("%02x", dump[j]);
158 pr_cont("\n");
159 dump += headerlen; 159 dump += headerlen;
160 160
161 nvkm_debug(subdev, "%4s: %s\n", mxms_desc[type], data);
161 for (i = 0; i < entries; i++, dump += recordlen) { 162 for (i = 0; i < entries; i++, dump += recordlen) {
162 nv_debug(mxm, " "); 163 for (j = recordlen - 1, ptr = data; j >= 0; j--)
163 for (j = recordlen - 1; j >= 0; j--) 164 ptr += sprintf(ptr, "%02x", dump[j]);
164 pr_cont("%02x", dump[j]); 165 nvkm_debug(subdev, " %s\n", data);
165 pr_cont("\n");
166 } 166 }
167 } 167 }
168 168
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
index 4ef804012d06..333e0c01545a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -1,6 +1,6 @@
1#ifndef __NVMXM_MXMS_H__ 1#ifndef __NVMXM_MXMS_H__
2#define __NVMXM_MXMS_H__ 2#define __NVMXM_MXMS_H__
3#include <subdev/mxm.h> 3#include "priv.h"
4 4
5struct mxms_odev { 5struct mxms_odev {
6 u8 outp_type; 6 u8 outp_type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index f20e4ca87e17..db14fad2ddfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -28,10 +28,6 @@
28#include <subdev/bios/dcb.h> 28#include <subdev/bios/dcb.h>
29#include <subdev/bios/mxm.h> 29#include <subdev/bios/mxm.h>
30 30
31struct nv50_mxm_priv {
32 struct nvkm_mxm base;
33};
34
35struct context { 31struct context {
36 u32 *outp; 32 u32 *outp;
37 struct mxms_odev desc; 33 struct mxms_odev desc;
@@ -53,7 +49,7 @@ mxm_match_tmds_partner(struct nvkm_mxm *mxm, u8 *data, void *info)
53static bool 49static bool
54mxm_match_dcb(struct nvkm_mxm *mxm, u8 *data, void *info) 50mxm_match_dcb(struct nvkm_mxm *mxm, u8 *data, void *info)
55{ 51{
56 struct nvkm_bios *bios = nvkm_bios(mxm); 52 struct nvkm_bios *bios = mxm->subdev.device->bios;
57 struct context *ctx = info; 53 struct context *ctx = info;
58 u64 desc = *(u64 *)data; 54 u64 desc = *(u64 *)data;
59 55
@@ -107,8 +103,8 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
107 * if one isn't found, disable it. 103 * if one isn't found, disable it.
108 */ 104 */
109 if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) { 105 if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
110 nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n", 106 nvkm_debug(&mxm->subdev, "disable %d: %08x %08x\n",
111 idx, ctx.outp[0], ctx.outp[1]); 107 idx, ctx.outp[0], ctx.outp[1]);
112 ctx.outp[0] |= 0x0000000f; 108 ctx.outp[0] |= 0x0000000f;
113 return 0; 109 return 0;
114 } 110 }
@@ -180,20 +176,22 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
180static bool 176static bool
181mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info) 177mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
182{ 178{
179 struct nvkm_subdev *subdev = &mxm->subdev;
183 u64 desc = *(u64 *)data; 180 u64 desc = *(u64 *)data;
184 if ((desc & 0xf0) != 0xf0) 181 if ((desc & 0xf0) != 0xf0)
185 nv_info(mxm, "unmatched output device 0x%016llx\n", desc); 182 nvkm_info(subdev, "unmatched output device %016llx\n", desc);
186 return true; 183 return true;
187} 184}
188 185
189static void 186static void
190mxm_dcb_sanitise(struct nvkm_mxm *mxm) 187mxm_dcb_sanitise(struct nvkm_mxm *mxm)
191{ 188{
192 struct nvkm_bios *bios = nvkm_bios(mxm); 189 struct nvkm_subdev *subdev = &mxm->subdev;
190 struct nvkm_bios *bios = subdev->device->bios;
193 u8 ver, hdr, cnt, len; 191 u8 ver, hdr, cnt, len;
194 u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len); 192 u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
195 if (dcb == 0x0000 || ver != 0x40) { 193 if (dcb == 0x0000 || ver != 0x40) {
196 nv_debug(mxm, "unsupported DCB version\n"); 194 nvkm_debug(subdev, "unsupported DCB version\n");
197 return; 195 return;
198 } 196 }
199 197
@@ -201,31 +199,20 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
201 mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL); 199 mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
202} 200}
203 201
204static int 202int
205nv50_mxm_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 203nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm)
206 struct nvkm_oclass *oclass, void *data, u32 size,
207 struct nvkm_object **pobject)
208{ 204{
209 struct nv50_mxm_priv *priv; 205 struct nvkm_mxm *mxm;
210 int ret; 206 int ret;
211 207
212 ret = nvkm_mxm_create(parent, engine, oclass, &priv); 208 ret = nvkm_mxm_new_(device, index, &mxm);
213 *pobject = nv_object(priv); 209 if (mxm)
210 *pmxm = &mxm->subdev;
214 if (ret) 211 if (ret)
215 return ret; 212 return ret;
216 213
217 if (priv->base.action & MXM_SANITISE_DCB) 214 if (mxm->action & MXM_SANITISE_DCB)
218 mxm_dcb_sanitise(&priv->base); 215 mxm_dcb_sanitise(mxm);
216
219 return 0; 217 return 0;
220} 218}
221
222struct nvkm_oclass
223nv50_mxm_oclass = {
224 .handle = NV_SUBDEV(MXM, 0x50),
225 .ofuncs = &(struct nvkm_ofuncs) {
226 .ctor = nv50_mxm_ctor,
227 .dtor = _nvkm_mxm_dtor,
228 .init = _nvkm_mxm_init,
229 .fini = _nvkm_mxm_fini,
230 },
231};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
new file mode 100644
index 000000000000..7d970157aed1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -0,0 +1,15 @@
1#ifndef __NVKM_MXM_PRIV_H__
2#define __NVKM_MXM_PRIV_H__
3#define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
4#include <subdev/mxm.h>
5
6#define MXM_SANITISE_DCB 0x00000001
7
8struct nvkm_mxm {
9 struct nvkm_subdev subdev;
10 u32 action;
11 u8 *mxms;
12};
13
14int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **);
15#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
new file mode 100644
index 000000000000..99672c3d0bad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -0,0 +1,7 @@
1nvkm-y += nvkm/subdev/pci/agp.o
2nvkm-y += nvkm/subdev/pci/base.o
3nvkm-y += nvkm/subdev/pci/nv04.o
4nvkm-y += nvkm/subdev/pci/nv40.o
5nvkm-y += nvkm/subdev/pci/nv4c.o
6nvkm-y += nvkm/subdev/pci/nv50.o
7nvkm-y += nvkm/subdev/pci/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
new file mode 100644
index 000000000000..814cb51cc873
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2015 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "agp.h"
23#ifdef __NVKM_PCI_AGP_H__
24#include <core/option.h>
25
26struct nvkm_device_agp_quirk {
27 u16 hostbridge_vendor;
28 u16 hostbridge_device;
29 u16 chip_vendor;
30 u16 chip_device;
31 int mode;
32};
33
34static const struct nvkm_device_agp_quirk
35nvkm_device_agp_quirks[] = {
36 /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
37 { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
38 {},
39};
40
41void
42nvkm_agp_fini(struct nvkm_pci *pci)
43{
44 if (pci->agp.acquired) {
45 agp_backend_release(pci->agp.bridge);
46 pci->agp.acquired = false;
47 }
48}
49
50/* Ensure AGP controller is in a consistent state in case we need to
51 * execute the VBIOS DEVINIT scripts.
52 */
53void
54nvkm_agp_preinit(struct nvkm_pci *pci)
55{
56 struct nvkm_device *device = pci->subdev.device;
57 u32 mode = nvkm_pci_rd32(pci, 0x004c);
58 u32 save[2];
59
60 /* First of all, disable fast writes, otherwise if it's already
61 * enabled in the AGP bridge and we disable the card's AGP
62 * controller we might be locking ourselves out of it.
63 */
64 if ((mode | pci->agp.mode) & PCI_AGP_COMMAND_FW) {
65 mode = pci->agp.mode & ~PCI_AGP_COMMAND_FW;
66 agp_enable(pci->agp.bridge, mode);
67 }
68
69 /* clear busmaster bit, and disable AGP */
70 save[0] = nvkm_pci_rd32(pci, 0x0004);
71 nvkm_pci_wr32(pci, 0x0004, save[0] & ~0x00000004);
72 nvkm_pci_wr32(pci, 0x004c, 0x00000000);
73
74 /* reset PGRAPH, PFIFO and PTIMER */
75 save[1] = nvkm_mask(device, 0x000200, 0x00011100, 0x00000000);
76 nvkm_mask(device, 0x000200, 0x00011100, save[1]);
77
78 /* and restore busmaster bit (gives effect of resetting AGP) */
79 nvkm_pci_wr32(pci, 0x0004, save[0]);
80}
81
82int
83nvkm_agp_init(struct nvkm_pci *pci)
84{
85 if (!agp_backend_acquire(pci->pdev)) {
86 nvkm_error(&pci->subdev, "failed to acquire agp\n");
87 return -ENODEV;
88 }
89
90 agp_enable(pci->agp.bridge, pci->agp.mode);
91 pci->agp.acquired = true;
92 return 0;
93}
94
95void
96nvkm_agp_dtor(struct nvkm_pci *pci)
97{
98 arch_phys_wc_del(pci->agp.mtrr);
99}
100
101void
102nvkm_agp_ctor(struct nvkm_pci *pci)
103{
104 const struct nvkm_device_agp_quirk *quirk = nvkm_device_agp_quirks;
105 struct nvkm_subdev *subdev = &pci->subdev;
106 struct nvkm_device *device = subdev->device;
107 struct agp_kern_info info;
108 int mode = -1;
109
110#ifdef __powerpc__
111 /* Disable AGP by default on all PowerPC machines for now -- At
112 * least some UniNorth-2 AGP bridges are known to be broken:
113 * DMA from the host to the card works just fine, but writeback
114 * from the card to the host goes straight to memory
115 * untranslated bypassing that GATT somehow, making them quite
116 * painful to deal with...
117 */
118 mode = 0;
119#endif
120 mode = nvkm_longopt(device->cfgopt, "NvAGP", mode);
121
122 /* acquire bridge temporarily, so that we can copy its info */
123 if (!(pci->agp.bridge = agp_backend_acquire(pci->pdev))) {
124 nvkm_warn(subdev, "failed to acquire agp\n");
125 return;
126 }
127 agp_copy_info(pci->agp.bridge, &info);
128 agp_backend_release(pci->agp.bridge);
129
130 pci->agp.mode = info.mode;
131 pci->agp.base = info.aper_base;
132 pci->agp.size = info.aper_size * 1024 * 1024;
133 pci->agp.cma = info.cant_use_aperture;
134 pci->agp.mtrr = -1;
135
136 /* determine if bridge + chipset combination needs a workaround */
137 while (quirk->hostbridge_vendor) {
138 if (info.device->vendor == quirk->hostbridge_vendor &&
139 info.device->device == quirk->hostbridge_device &&
140 pci->pdev->vendor == quirk->chip_vendor &&
141 pci->pdev->device == quirk->chip_device) {
142 nvkm_info(subdev, "forcing default agp mode to %dX, "
143 "use NvAGP=<mode> to override\n",
144 quirk->mode);
145 mode = quirk->mode;
146 break;
147 }
148 quirk++;
149 }
150
151 /* apply quirk / user-specified mode */
152 if (mode >= 1) {
153 if (pci->agp.mode & 0x00000008)
154 mode /= 4; /* AGPv3 */
155 pci->agp.mode &= ~0x00000007;
156 pci->agp.mode |= (mode & 0x7);
157 } else
158 if (mode == 0) {
159 pci->agp.bridge = NULL;
160 return;
161 }
162
163 /* fast writes appear to be broken on nv18, they make the card
164 * lock up randomly.
165 */
166 if (device->chipset == 0x18)
167 pci->agp.mode &= ~PCI_AGP_COMMAND_FW;
168
169 pci->agp.mtrr = arch_phys_wc_add(pci->agp.base, pci->agp.size);
170}
171#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
new file mode 100644
index 000000000000..df2dd08363ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
@@ -0,0 +1,18 @@
1#include "priv.h"
2#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
3#ifndef __NVKM_PCI_AGP_H__
4#define __NVKM_PCI_AGP_H__
5
6void nvkm_agp_ctor(struct nvkm_pci *);
7void nvkm_agp_dtor(struct nvkm_pci *);
8void nvkm_agp_preinit(struct nvkm_pci *);
9int nvkm_agp_init(struct nvkm_pci *);
10void nvkm_agp_fini(struct nvkm_pci *);
11#endif
12#else
13static inline void nvkm_agp_ctor(struct nvkm_pci *pci) {}
14static inline void nvkm_agp_dtor(struct nvkm_pci *pci) {}
15static inline void nvkm_agp_preinit(struct nvkm_pci *pci) {}
16static inline int nvkm_agp_init(struct nvkm_pci *pci) { return -ENOSYS; }
17static inline void nvkm_agp_fini(struct nvkm_pci *pci) {}
18#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
new file mode 100644
index 000000000000..d1c148e51922
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25#include "agp.h"
26
27#include <core/option.h>
28#include <core/pci.h>
29#include <subdev/mc.h>
30
31u32
32nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
33{
34 return pci->func->rd32(pci, addr);
35}
36
37void
38nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
39{
40 pci->func->wr08(pci, addr, data);
41}
42
43void
44nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
45{
46 pci->func->wr32(pci, addr, data);
47}
48
49void
50nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
51{
52 u32 data = nvkm_pci_rd32(pci, 0x0050);
53 if (shadow)
54 data |= 0x00000001;
55 else
56 data &= ~0x00000001;
57 nvkm_pci_wr32(pci, 0x0050, data);
58}
59
60static irqreturn_t
61nvkm_pci_intr(int irq, void *arg)
62{
63 struct nvkm_pci *pci = arg;
64 struct nvkm_mc *mc = pci->subdev.device->mc;
65 bool handled = false;
66 if (likely(mc)) {
67 nvkm_mc_intr_unarm(mc);
68 if (pci->msi)
69 pci->func->msi_rearm(pci);
70 nvkm_mc_intr(mc, &handled);
71 nvkm_mc_intr_rearm(mc);
72 }
73 return handled ? IRQ_HANDLED : IRQ_NONE;
74}
75
76static int
77nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
78{
79 struct nvkm_pci *pci = nvkm_pci(subdev);
80
81 if (pci->irq >= 0) {
82 free_irq(pci->irq, pci);
83 pci->irq = -1;
84 };
85
86 if (pci->agp.bridge)
87 nvkm_agp_fini(pci);
88
89 return 0;
90}
91
92static int
93nvkm_pci_preinit(struct nvkm_subdev *subdev)
94{
95 struct nvkm_pci *pci = nvkm_pci(subdev);
96 if (pci->agp.bridge)
97 nvkm_agp_preinit(pci);
98 return 0;
99}
100
101static int
102nvkm_pci_init(struct nvkm_subdev *subdev)
103{
104 struct nvkm_pci *pci = nvkm_pci(subdev);
105 struct pci_dev *pdev = pci->pdev;
106 int ret;
107
108 if (pci->agp.bridge) {
109 ret = nvkm_agp_init(pci);
110 if (ret)
111 return ret;
112 }
113
114 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
115 if (ret)
116 return ret;
117
118 pci->irq = pdev->irq;
119 return ret;
120}
121
122static void *
123nvkm_pci_dtor(struct nvkm_subdev *subdev)
124{
125 struct nvkm_pci *pci = nvkm_pci(subdev);
126 nvkm_agp_dtor(pci);
127 if (pci->msi)
128 pci_disable_msi(pci->pdev);
129 return nvkm_pci(subdev);
130}
131
132static const struct nvkm_subdev_func
133nvkm_pci_func = {
134 .dtor = nvkm_pci_dtor,
135 .preinit = nvkm_pci_preinit,
136 .init = nvkm_pci_init,
137 .fini = nvkm_pci_fini,
138};
139
140int
141nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
142 int index, struct nvkm_pci **ppci)
143{
144 struct nvkm_pci *pci;
145
146 if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
147 return -ENOMEM;
148 nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev);
149 pci->func = func;
150 pci->pdev = device->func->pci(device)->pdev;
151 pci->irq = -1;
152
153 if (device->type == NVKM_DEVICE_AGP)
154 nvkm_agp_ctor(pci);
155
156 switch (pci->pdev->device & 0x0ff0) {
157 case 0x00f0:
158 case 0x02e0:
159 /* BR02? NFI how these would be handled yet exactly */
160 break;
161 default:
162 switch (device->chipset) {
163 case 0xaa:
164 /* reported broken, nv also disable it */
165 break;
166 default:
167 pci->msi = true;
168 break;
169 }
170 }
171
172 pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
173 if (pci->msi && func->msi_rearm) {
174 pci->msi = pci_enable_msi(pci->pdev) == 0;
175 if (pci->msi)
176 nvkm_debug(&pci->subdev, "MSI enabled\n");
177 } else {
178 pci->msi = false;
179 }
180
181 return 0;
182}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
new file mode 100644
index 000000000000..86f8226532d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26static void
27gf100_pci_msi_rearm(struct nvkm_pci *pci)
28{
29 nvkm_pci_wr08(pci, 0x0704, 0xff);
30}
31
32static const struct nvkm_pci_func
33gf100_pci_func = {
34 .rd32 = nv40_pci_rd32,
35 .wr08 = nv40_pci_wr08,
36 .wr32 = nv40_pci_wr32,
37 .msi_rearm = gf100_pci_msi_rearm,
38};
39
40int
41gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
42{
43 return nvkm_pci_new_(&gf100_pci_func, device, index, ppci);
44}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
new file mode 100644
index 000000000000..5b1ed42cb90b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26static u32
27nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
28{
29 struct nvkm_device *device = pci->subdev.device;
30 return nvkm_rd32(device, 0x001800 + addr);
31}
32
33static void
34nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
35{
36 struct nvkm_device *device = pci->subdev.device;
37 nvkm_wr08(device, 0x001800 + addr, data);
38}
39
40static void
41nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
42{
43 struct nvkm_device *device = pci->subdev.device;
44 nvkm_wr32(device, 0x001800 + addr, data);
45}
46
47static const struct nvkm_pci_func
48nv04_pci_func = {
49 .rd32 = nv04_pci_rd32,
50 .wr08 = nv04_pci_wr08,
51 .wr32 = nv04_pci_wr32,
52};
53
54int
55nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
56{
57 return nvkm_pci_new_(&nv04_pci_func, device, index, ppci);
58}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
new file mode 100644
index 000000000000..090a187f165f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -0,0 +1,65 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26u32
27nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
28{
29 struct nvkm_device *device = pci->subdev.device;
30 return nvkm_rd32(device, 0x088000 + addr);
31}
32
33void
34nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
35{
36 struct nvkm_device *device = pci->subdev.device;
37 nvkm_wr08(device, 0x088000 + addr, data);
38}
39
40void
41nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
42{
43 struct nvkm_device *device = pci->subdev.device;
44 nvkm_wr32(device, 0x088000 + addr, data);
45}
46
47static void
48nv40_pci_msi_rearm(struct nvkm_pci *pci)
49{
50 nvkm_pci_wr08(pci, 0x0068, 0xff);
51}
52
53static const struct nvkm_pci_func
54nv40_pci_func = {
55 .rd32 = nv40_pci_rd32,
56 .wr08 = nv40_pci_wr08,
57 .wr32 = nv40_pci_wr32,
58 .msi_rearm = nv40_pci_msi_rearm,
59};
60
61int
62nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
63{
64 return nvkm_pci_new_(&nv40_pci_func, device, index, ppci);
65}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
new file mode 100644
index 000000000000..1f1b26b5fa72
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26static const struct nvkm_pci_func
27nv4c_pci_func = {
28 .rd32 = nv40_pci_rd32,
29 .wr08 = nv40_pci_wr08,
30 .wr32 = nv40_pci_wr32,
31};
32
33int
34nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
35{
36 return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci);
37}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
new file mode 100644
index 000000000000..3e167d4a381f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26#include <core/pci.h>
27
28/* MSI re-arm through the PRI appears to be broken on the original G80,
29 * so we access it via alternate PCI config space mechanisms.
30 */
31static void
32nv50_pci_msi_rearm(struct nvkm_pci *pci)
33{
34 struct nvkm_device *device = pci->subdev.device;
35 struct pci_dev *pdev = device->func->pci(device)->pdev;
36 pci_write_config_byte(pdev, 0x68, 0xff);
37}
38
39static const struct nvkm_pci_func
40nv50_pci_func = {
41 .rd32 = nv40_pci_rd32,
42 .wr08 = nv40_pci_wr08,
43 .wr32 = nv40_pci_wr32,
44 .msi_rearm = nv50_pci_msi_rearm,
45};
46
47int
48nv50_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
49{
50 return nvkm_pci_new_(&nv50_pci_func, device, index, ppci);
51}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
new file mode 100644
index 000000000000..d22c2c117106
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -0,0 +1,19 @@
1#ifndef __NVKM_PCI_PRIV_H__
2#define __NVKM_PCI_PRIV_H__
3#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
4#include <subdev/pci.h>
5
6int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
7 int index, struct nvkm_pci **);
8
9struct nvkm_pci_func {
10 u32 (*rd32)(struct nvkm_pci *, u16 addr);
11 void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
12 void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
13 void (*msi_rearm)(struct nvkm_pci *);
14};
15
16u32 nv40_pci_rd32(struct nvkm_pci *, u16);
17void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
18void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
19#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 7081d6a9b95f..88b643b8664e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -2,8 +2,9 @@ nvkm-y += nvkm/subdev/pmu/base.o
2nvkm-y += nvkm/subdev/pmu/memx.o 2nvkm-y += nvkm/subdev/pmu/memx.o
3nvkm-y += nvkm/subdev/pmu/gt215.o 3nvkm-y += nvkm/subdev/pmu/gt215.o
4nvkm-y += nvkm/subdev/pmu/gf100.o 4nvkm-y += nvkm/subdev/pmu/gf100.o
5nvkm-y += nvkm/subdev/pmu/gf110.o 5nvkm-y += nvkm/subdev/pmu/gf119.o
6nvkm-y += nvkm/subdev/pmu/gk104.o 6nvkm-y += nvkm/subdev/pmu/gk104.o
7nvkm-y += nvkm/subdev/pmu/gk110.o 7nvkm-y += nvkm/subdev/pmu/gk110.o
8nvkm-y += nvkm/subdev/pmu/gk208.o 8nvkm-y += nvkm/subdev/pmu/gk208.o
9nvkm-y += nvkm/subdev/pmu/gk20a.o 9nvkm-y += nvkm/subdev/pmu/gk20a.o
10nvkm-y += nvkm/subdev/pmu/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 054b2d2eec35..27a79c0c3888 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -28,21 +28,25 @@
28void 28void
29nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) 29nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
30{ 30{
31 const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu); 31 if (pmu->func->pgob)
32 if (impl->pgob) 32 pmu->func->pgob(pmu, enable);
33 impl->pgob(pmu, enable);
34} 33}
35 34
36static int 35int
37nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], 36nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
38 u32 process, u32 message, u32 data0, u32 data1) 37 u32 process, u32 message, u32 data0, u32 data1)
39{ 38{
40 struct nvkm_subdev *subdev = nv_subdev(pmu); 39 struct nvkm_subdev *subdev = &pmu->subdev;
40 struct nvkm_device *device = subdev->device;
41 u32 addr; 41 u32 addr;
42 42
43 /* wait for a free slot in the fifo */ 43 /* wait for a free slot in the fifo */
44 addr = nv_rd32(pmu, 0x10a4a0); 44 addr = nvkm_rd32(device, 0x10a4a0);
45 if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8)) 45 if (nvkm_msec(device, 2000,
46 u32 tmp = nvkm_rd32(device, 0x10a4b0);
47 if (tmp != (addr ^ 8))
48 break;
49 ) < 0)
46 return -EBUSY; 50 return -EBUSY;
47 51
48 /* we currently only support a single process at a time waiting 52 /* we currently only support a single process at a time waiting
@@ -57,20 +61,20 @@ nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
57 61
58 /* acquire data segment access */ 62 /* acquire data segment access */
59 do { 63 do {
60 nv_wr32(pmu, 0x10a580, 0x00000001); 64 nvkm_wr32(device, 0x10a580, 0x00000001);
61 } while (nv_rd32(pmu, 0x10a580) != 0x00000001); 65 } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
62 66
63 /* write the packet */ 67 /* write the packet */
64 nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + 68 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
65 pmu->send.base)); 69 pmu->send.base));
66 nv_wr32(pmu, 0x10a1c4, process); 70 nvkm_wr32(device, 0x10a1c4, process);
67 nv_wr32(pmu, 0x10a1c4, message); 71 nvkm_wr32(device, 0x10a1c4, message);
68 nv_wr32(pmu, 0x10a1c4, data0); 72 nvkm_wr32(device, 0x10a1c4, data0);
69 nv_wr32(pmu, 0x10a1c4, data1); 73 nvkm_wr32(device, 0x10a1c4, data1);
70 nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f); 74 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
71 75
72 /* release data segment access */ 76 /* release data segment access */
73 nv_wr32(pmu, 0x10a580, 0x00000000); 77 nvkm_wr32(device, 0x10a580, 0x00000000);
74 78
75 /* wait for reply, if requested */ 79 /* wait for reply, if requested */
76 if (reply) { 80 if (reply) {
@@ -87,29 +91,31 @@ static void
87nvkm_pmu_recv(struct work_struct *work) 91nvkm_pmu_recv(struct work_struct *work)
88{ 92{
89 struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); 93 struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
94 struct nvkm_subdev *subdev = &pmu->subdev;
95 struct nvkm_device *device = subdev->device;
90 u32 process, message, data0, data1; 96 u32 process, message, data0, data1;
91 97
92 /* nothing to do if GET == PUT */ 98 /* nothing to do if GET == PUT */
93 u32 addr = nv_rd32(pmu, 0x10a4cc); 99 u32 addr = nvkm_rd32(device, 0x10a4cc);
94 if (addr == nv_rd32(pmu, 0x10a4c8)) 100 if (addr == nvkm_rd32(device, 0x10a4c8))
95 return; 101 return;
96 102
97 /* acquire data segment access */ 103 /* acquire data segment access */
98 do { 104 do {
99 nv_wr32(pmu, 0x10a580, 0x00000002); 105 nvkm_wr32(device, 0x10a580, 0x00000002);
100 } while (nv_rd32(pmu, 0x10a580) != 0x00000002); 106 } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
101 107
102 /* read the packet */ 108 /* read the packet */
103 nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + 109 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
104 pmu->recv.base)); 110 pmu->recv.base));
105 process = nv_rd32(pmu, 0x10a1c4); 111 process = nvkm_rd32(device, 0x10a1c4);
106 message = nv_rd32(pmu, 0x10a1c4); 112 message = nvkm_rd32(device, 0x10a1c4);
107 data0 = nv_rd32(pmu, 0x10a1c4); 113 data0 = nvkm_rd32(device, 0x10a1c4);
108 data1 = nv_rd32(pmu, 0x10a1c4); 114 data1 = nvkm_rd32(device, 0x10a1c4);
109 nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f); 115 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
110 116
111 /* release data segment access */ 117 /* release data segment access */
112 nv_wr32(pmu, 0x10a580, 0x00000000); 118 nvkm_wr32(device, 0x10a580, 0x00000000);
113 119
114 /* wake process if it's waiting on a synchronous reply */ 120 /* wake process if it's waiting on a synchronous reply */
115 if (pmu->recv.process) { 121 if (pmu->recv.process) {
@@ -126,143 +132,149 @@ nvkm_pmu_recv(struct work_struct *work)
126 /* right now there's no other expected responses from the engine, 132 /* right now there's no other expected responses from the engine,
127 * so assume that any unexpected message is an error. 133 * so assume that any unexpected message is an error.
128 */ 134 */
129 nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n", 135 nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
130 (char)((process & 0x000000ff) >> 0), 136 (char)((process & 0x000000ff) >> 0),
131 (char)((process & 0x0000ff00) >> 8), 137 (char)((process & 0x0000ff00) >> 8),
132 (char)((process & 0x00ff0000) >> 16), 138 (char)((process & 0x00ff0000) >> 16),
133 (char)((process & 0xff000000) >> 24), 139 (char)((process & 0xff000000) >> 24),
134 process, message, data0, data1); 140 process, message, data0, data1);
135} 141}
136 142
137static void 143static void
138nvkm_pmu_intr(struct nvkm_subdev *subdev) 144nvkm_pmu_intr(struct nvkm_subdev *subdev)
139{ 145{
140 struct nvkm_pmu *pmu = (void *)subdev; 146 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
141 u32 disp = nv_rd32(pmu, 0x10a01c); 147 struct nvkm_device *device = pmu->subdev.device;
142 u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16); 148 u32 disp = nvkm_rd32(device, 0x10a01c);
149 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
143 150
144 if (intr & 0x00000020) { 151 if (intr & 0x00000020) {
145 u32 stat = nv_rd32(pmu, 0x10a16c); 152 u32 stat = nvkm_rd32(device, 0x10a16c);
146 if (stat & 0x80000000) { 153 if (stat & 0x80000000) {
147 nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n", 154 nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
148 stat & 0x00ffffff, nv_rd32(pmu, 0x10a168)); 155 stat & 0x00ffffff,
149 nv_wr32(pmu, 0x10a16c, 0x00000000); 156 nvkm_rd32(device, 0x10a168));
157 nvkm_wr32(device, 0x10a16c, 0x00000000);
150 intr &= ~0x00000020; 158 intr &= ~0x00000020;
151 } 159 }
152 } 160 }
153 161
154 if (intr & 0x00000040) { 162 if (intr & 0x00000040) {
155 schedule_work(&pmu->recv.work); 163 schedule_work(&pmu->recv.work);
156 nv_wr32(pmu, 0x10a004, 0x00000040); 164 nvkm_wr32(device, 0x10a004, 0x00000040);
157 intr &= ~0x00000040; 165 intr &= ~0x00000040;
158 } 166 }
159 167
160 if (intr & 0x00000080) { 168 if (intr & 0x00000080) {
161 nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0), 169 nvkm_info(subdev, "wr32 %06x %08x\n",
162 nv_rd32(pmu, 0x10a7a4)); 170 nvkm_rd32(device, 0x10a7a0),
163 nv_wr32(pmu, 0x10a004, 0x00000080); 171 nvkm_rd32(device, 0x10a7a4));
172 nvkm_wr32(device, 0x10a004, 0x00000080);
164 intr &= ~0x00000080; 173 intr &= ~0x00000080;
165 } 174 }
166 175
167 if (intr) { 176 if (intr) {
168 nv_error(pmu, "intr 0x%08x\n", intr); 177 nvkm_error(subdev, "intr %08x\n", intr);
169 nv_wr32(pmu, 0x10a004, intr); 178 nvkm_wr32(device, 0x10a004, intr);
170 } 179 }
171} 180}
172 181
173int 182static int
174_nvkm_pmu_fini(struct nvkm_object *object, bool suspend) 183nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
175{ 184{
176 struct nvkm_pmu *pmu = (void *)object; 185 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
186 struct nvkm_device *device = pmu->subdev.device;
177 187
178 nv_wr32(pmu, 0x10a014, 0x00000060); 188 nvkm_wr32(device, 0x10a014, 0x00000060);
179 flush_work(&pmu->recv.work); 189 flush_work(&pmu->recv.work);
180 190 return 0;
181 return nvkm_subdev_fini(&pmu->base, suspend);
182} 191}
183 192
184int 193static int
185_nvkm_pmu_init(struct nvkm_object *object) 194nvkm_pmu_init(struct nvkm_subdev *subdev)
186{ 195{
187 const struct nvkm_pmu_impl *impl = (void *)object->oclass; 196 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
188 struct nvkm_pmu *pmu = (void *)object; 197 struct nvkm_device *device = pmu->subdev.device;
189 int ret, i; 198 int i;
190
191 ret = nvkm_subdev_init(&pmu->base);
192 if (ret)
193 return ret;
194
195 nv_subdev(pmu)->intr = nvkm_pmu_intr;
196 pmu->message = nvkm_pmu_send;
197 pmu->pgob = nvkm_pmu_pgob;
198 199
199 /* prevent previous ucode from running, wait for idle, reset */ 200 /* prevent previous ucode from running, wait for idle, reset */
200 nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ 201 nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
201 nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000); 202 nvkm_msec(device, 2000,
202 nv_mask(pmu, 0x000200, 0x00002000, 0x00000000); 203 if (!nvkm_rd32(device, 0x10a04c))
203 nv_mask(pmu, 0x000200, 0x00002000, 0x00002000); 204 break;
204 nv_rd32(pmu, 0x000200); 205 );
205 nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000); 206 nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
207 nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
208 nvkm_rd32(device, 0x000200);
209 nvkm_msec(device, 2000,
210 if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
211 break;
212 );
206 213
207 /* upload data segment */ 214 /* upload data segment */
208 nv_wr32(pmu, 0x10a1c0, 0x01000000); 215 nvkm_wr32(device, 0x10a1c0, 0x01000000);
209 for (i = 0; i < impl->data.size / 4; i++) 216 for (i = 0; i < pmu->func->data.size / 4; i++)
210 nv_wr32(pmu, 0x10a1c4, impl->data.data[i]); 217 nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
211 218
212 /* upload code segment */ 219 /* upload code segment */
213 nv_wr32(pmu, 0x10a180, 0x01000000); 220 nvkm_wr32(device, 0x10a180, 0x01000000);
214 for (i = 0; i < impl->code.size / 4; i++) { 221 for (i = 0; i < pmu->func->code.size / 4; i++) {
215 if ((i & 0x3f) == 0) 222 if ((i & 0x3f) == 0)
216 nv_wr32(pmu, 0x10a188, i >> 6); 223 nvkm_wr32(device, 0x10a188, i >> 6);
217 nv_wr32(pmu, 0x10a184, impl->code.data[i]); 224 nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
218 } 225 }
219 226
220 /* start it running */ 227 /* start it running */
221 nv_wr32(pmu, 0x10a10c, 0x00000000); 228 nvkm_wr32(device, 0x10a10c, 0x00000000);
222 nv_wr32(pmu, 0x10a104, 0x00000000); 229 nvkm_wr32(device, 0x10a104, 0x00000000);
223 nv_wr32(pmu, 0x10a100, 0x00000002); 230 nvkm_wr32(device, 0x10a100, 0x00000002);
224 231
225 /* wait for valid host->pmu ring configuration */ 232 /* wait for valid host->pmu ring configuration */
226 if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000)) 233 if (nvkm_msec(device, 2000,
234 if (nvkm_rd32(device, 0x10a4d0))
235 break;
236 ) < 0)
227 return -EBUSY; 237 return -EBUSY;
228 pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff; 238 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
229 pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16; 239 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
230 240
231 /* wait for valid pmu->host ring configuration */ 241 /* wait for valid pmu->host ring configuration */
232 if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000)) 242 if (nvkm_msec(device, 2000,
243 if (nvkm_rd32(device, 0x10a4dc))
244 break;
245 ) < 0)
233 return -EBUSY; 246 return -EBUSY;
234 pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff; 247 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
235 pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16; 248 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
236 249
237 nv_wr32(pmu, 0x10a010, 0x000000e0); 250 nvkm_wr32(device, 0x10a010, 0x000000e0);
238 return 0; 251 return 0;
239} 252}
240 253
241int 254static void *
242nvkm_pmu_create_(struct nvkm_object *parent, struct nvkm_object *engine, 255nvkm_pmu_dtor(struct nvkm_subdev *subdev)
243 struct nvkm_oclass *oclass, int length, void **pobject)
244{ 256{
245 struct nvkm_pmu *pmu; 257 return nvkm_pmu(subdev);
246 int ret;
247
248 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PMU",
249 "pmu", length, pobject);
250 pmu = *pobject;
251 if (ret)
252 return ret;
253
254 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
255 init_waitqueue_head(&pmu->recv.wait);
256 return 0;
257} 258}
258 259
260static const struct nvkm_subdev_func
261nvkm_pmu = {
262 .dtor = nvkm_pmu_dtor,
263 .init = nvkm_pmu_init,
264 .fini = nvkm_pmu_fini,
265 .intr = nvkm_pmu_intr,
266};
267
259int 268int
260_nvkm_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 269nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
261 struct nvkm_oclass *oclass, void *data, u32 size, 270 int index, struct nvkm_pmu **ppmu)
262 struct nvkm_object **pobject)
263{ 271{
264 struct nvkm_pmu *pmu; 272 struct nvkm_pmu *pmu;
265 int ret = nvkm_pmu_create(parent, engine, oclass, &pmu); 273 if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
266 *pobject = nv_object(pmu); 274 return -ENOMEM;
267 return ret; 275 nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev);
276 pmu->func = func;
277 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
278 init_waitqueue_head(&pmu->recv.wait);
279 return 0;
268} 280}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
index ae9c3f18ae01..2f28c7e26a14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
@@ -32,7 +32,7 @@
32 32
33#include "macros.fuc" 33#include "macros.fuc"
34 34
35.section #gf110_pmu_data 35.section #gf119_pmu_data
36#define INCLUDE_PROC 36#define INCLUDE_PROC
37#include "kernel.fuc" 37#include "kernel.fuc"
38#include "arith.fuc" 38#include "arith.fuc"
@@ -56,7 +56,7 @@
56#undef INCLUDE_DATA 56#undef INCLUDE_DATA
57.align 256 57.align 256
58 58
59.section #gf110_pmu_code 59.section #gf119_pmu_code
60#define INCLUDE_CODE 60#define INCLUDE_CODE
61#include "kernel.fuc" 61#include "kernel.fuc"
62#include "arith.fuc" 62#include "arith.fuc"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index a0c499e4543c..31552af9b06e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,4 +1,4 @@
1uint32_t gf110_pmu_data[] = { 1uint32_t gf119_pmu_data[] = {
2/* 0x0000: proc_kern */ 2/* 0x0000: proc_kern */
3 0x52544e49, 3 0x52544e49,
4 0x00000000, 4 0x00000000,
@@ -915,7 +915,7 @@ uint32_t gf110_pmu_data[] = {
915 0x00000000, 915 0x00000000,
916}; 916};
917 917
918uint32_t gf110_pmu_code[] = { 918uint32_t gf119_pmu_code[] = {
919 0x034d0ef5, 919 0x034d0ef5,
920/* 0x0004: rd32 */ 920/* 0x0004: rd32 */
921 0x07a007f1, 921 0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 78a4ea0101f1..aeb8ccd891fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -24,17 +24,16 @@
24#include "priv.h" 24#include "priv.h"
25#include "fuc/gf100.fuc3.h" 25#include "fuc/gf100.fuc3.h"
26 26
27struct nvkm_oclass * 27static const struct nvkm_pmu_func
28gf100_pmu_oclass = &(struct nvkm_pmu_impl) { 28gf100_pmu = {
29 .base.handle = NV_SUBDEV(PMU, 0xc0),
30 .base.ofuncs = &(struct nvkm_ofuncs) {
31 .ctor = _nvkm_pmu_ctor,
32 .dtor = _nvkm_pmu_dtor,
33 .init = _nvkm_pmu_init,
34 .fini = _nvkm_pmu_fini,
35 },
36 .code.data = gf100_pmu_code, 29 .code.data = gf100_pmu_code,
37 .code.size = sizeof(gf100_pmu_code), 30 .code.size = sizeof(gf100_pmu_code),
38 .data.data = gf100_pmu_data, 31 .data.data = gf100_pmu_data,
39 .data.size = sizeof(gf100_pmu_data), 32 .data.size = sizeof(gf100_pmu_data),
40}.base; 33};
34
35int
36gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
37{
38 return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
39}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 6b3a23839ff0..fbc88d8ecd4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -22,19 +22,18 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25#include "fuc/gf110.fuc4.h" 25#include "fuc/gf119.fuc4.h"
26 26
27struct nvkm_oclass * 27static const struct nvkm_pmu_func
28gf110_pmu_oclass = &(struct nvkm_pmu_impl) { 28gf119_pmu = {
29 .base.handle = NV_SUBDEV(PMU, 0xd0), 29 .code.data = gf119_pmu_code,
30 .base.ofuncs = &(struct nvkm_ofuncs) { 30 .code.size = sizeof(gf119_pmu_code),
31 .ctor = _nvkm_pmu_ctor, 31 .data.data = gf119_pmu_data,
32 .dtor = _nvkm_pmu_dtor, 32 .data.size = sizeof(gf119_pmu_data),
33 .init = _nvkm_pmu_init, 33};
34 .fini = _nvkm_pmu_fini, 34
35 }, 35int
36 .code.data = gf110_pmu_code, 36gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
37 .code.size = sizeof(gf110_pmu_code), 37{
38 .data.data = gf110_pmu_data, 38 return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
39 .data.size = sizeof(gf110_pmu_data), 39}
40}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 28fdb8ea9ed8..e33f5c03b9ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -21,47 +21,97 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#define gf110_pmu_code gk104_pmu_code 24#define gf119_pmu_code gk104_pmu_code
25#define gf110_pmu_data gk104_pmu_data 25#define gf119_pmu_data gk104_pmu_data
26#include "priv.h" 26#include "priv.h"
27#include "fuc/gf110.fuc4.h" 27#include "fuc/gf119.fuc4.h"
28
29#include <core/option.h>
30#include <subdev/timer.h>
31
32static void
33magic_(struct nvkm_device *device, u32 ctrl, int size)
34{
35 nvkm_wr32(device, 0x00c800, 0x00000000);
36 nvkm_wr32(device, 0x00c808, 0x00000000);
37 nvkm_wr32(device, 0x00c800, ctrl);
38 nvkm_msec(device, 2000,
39 if (nvkm_rd32(device, 0x00c800) & 0x40000000) {
40 while (size--)
41 nvkm_wr32(device, 0x00c804, 0x00000000);
42 break;
43 }
44 );
45 nvkm_wr32(device, 0x00c800, 0x00000000);
46}
47
48static void
49magic(struct nvkm_device *device, u32 ctrl)
50{
51 magic_(device, 0x8000a41f | ctrl, 6);
52 magic_(device, 0x80000421 | ctrl, 1);
53}
28 54
29static void 55static void
30gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) 56gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
31{ 57{
32 nv_mask(pmu, 0x000200, 0x00001000, 0x00000000); 58 struct nvkm_device *device = pmu->subdev.device;
33 nv_rd32(pmu, 0x000200); 59
34 nv_mask(pmu, 0x000200, 0x08000000, 0x08000000); 60 nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
61 nvkm_rd32(device, 0x000200);
62 nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
35 msleep(50); 63 msleep(50);
36 64
37 nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002); 65 nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
38 nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); 66 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
39 nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); 67 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
40 68
41 nv_mask(pmu, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000); 69 nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
42 msleep(50); 70 msleep(50);
43 71
44 nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000); 72 nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
45 nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); 73 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
46 nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); 74 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
75
76 nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
77 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
78 nvkm_rd32(device, 0x000200);
47 79
48 nv_mask(pmu, 0x000200, 0x08000000, 0x00000000); 80 if ( nvkm_boolopt(device->cfgopt, "War00C800_0",
49 nv_mask(pmu, 0x000200, 0x00001000, 0x00001000); 81 device->quirk ? device->quirk->War00C800_0 : false)) {
50 nv_rd32(pmu, 0x000200); 82 nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
83 switch (device->chipset) {
84 case 0xe4:
85 magic(device, 0x04000000);
86 magic(device, 0x06000000);
87 magic(device, 0x0c000000);
88 magic(device, 0x0e000000);
89 break;
90 case 0xe6:
91 magic(device, 0x02000000);
92 magic(device, 0x04000000);
93 magic(device, 0x0a000000);
94 break;
95 case 0xe7:
96 magic(device, 0x02000000);
97 break;
98 default:
99 break;
100 }
101 }
51} 102}
52 103
53struct nvkm_oclass * 104static const struct nvkm_pmu_func
54gk104_pmu_oclass = &(struct nvkm_pmu_impl) { 105gk104_pmu = {
55 .base.handle = NV_SUBDEV(PMU, 0xe4),
56 .base.ofuncs = &(struct nvkm_ofuncs) {
57 .ctor = _nvkm_pmu_ctor,
58 .dtor = _nvkm_pmu_dtor,
59 .init = _nvkm_pmu_init,
60 .fini = _nvkm_pmu_fini,
61 },
62 .code.data = gk104_pmu_code, 106 .code.data = gk104_pmu_code,
63 .code.size = sizeof(gk104_pmu_code), 107 .code.size = sizeof(gk104_pmu_code),
64 .data.data = gk104_pmu_data, 108 .data.data = gk104_pmu_data,
65 .data.size = sizeof(gk104_pmu_data), 109 .data.size = sizeof(gk104_pmu_data),
66 .pgob = gk104_pmu_pgob, 110 .pgob = gk104_pmu_pgob,
67}.base; 111};
112
113int
114gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
115{
116 return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
117}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 89bb94b0af8b..ae255247c9d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -21,16 +21,17 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#define gf110_pmu_code gk110_pmu_code 24#define gf119_pmu_code gk110_pmu_code
25#define gf110_pmu_data gk110_pmu_data 25#define gf119_pmu_data gk110_pmu_data
26#include "priv.h" 26#include "priv.h"
27#include "fuc/gf110.fuc4.h" 27#include "fuc/gf119.fuc4.h"
28 28
29#include <subdev/timer.h> 29#include <subdev/timer.h>
30 30
31void 31void
32gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable) 32gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
33{ 33{
34 struct nvkm_device *device = pmu->subdev.device;
34 static const struct { 35 static const struct {
35 u32 addr; 36 u32 addr;
36 u32 data; 37 u32 data;
@@ -54,42 +55,44 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
54 }; 55 };
55 int i; 56 int i;
56 57
57 nv_mask(pmu, 0x000200, 0x00001000, 0x00000000); 58 nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
58 nv_rd32(pmu, 0x000200); 59 nvkm_rd32(device, 0x000200);
59 nv_mask(pmu, 0x000200, 0x08000000, 0x08000000); 60 nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
60 msleep(50); 61 msleep(50);
61 62
62 nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002); 63 nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
63 nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); 64 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
64 nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); 65 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
65 66
66 nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000); 67 nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000);
67 for (i = 0; i < ARRAY_SIZE(magic); i++) { 68 for (i = 0; i < ARRAY_SIZE(magic); i++) {
68 nv_wr32(pmu, magic[i].addr, magic[i].data); 69 nvkm_wr32(device, magic[i].addr, magic[i].data);
69 nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000); 70 nvkm_msec(device, 2000,
71 if (!(nvkm_rd32(device, magic[i].addr) & 0x80000000))
72 break;
73 );
70 } 74 }
71 75
72 nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000); 76 nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
73 nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001); 77 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
74 nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000); 78 nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
75 79
76 nv_mask(pmu, 0x000200, 0x08000000, 0x00000000); 80 nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
77 nv_mask(pmu, 0x000200, 0x00001000, 0x00001000); 81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
78 nv_rd32(pmu, 0x000200); 82 nvkm_rd32(device, 0x000200);
79} 83}
80 84
81struct nvkm_oclass * 85static const struct nvkm_pmu_func
82gk110_pmu_oclass = &(struct nvkm_pmu_impl) { 86gk110_pmu = {
83 .base.handle = NV_SUBDEV(PMU, 0xf0),
84 .base.ofuncs = &(struct nvkm_ofuncs) {
85 .ctor = _nvkm_pmu_ctor,
86 .dtor = _nvkm_pmu_dtor,
87 .init = _nvkm_pmu_init,
88 .fini = _nvkm_pmu_fini,
89 },
90 .code.data = gk110_pmu_code, 87 .code.data = gk110_pmu_code,
91 .code.size = sizeof(gk110_pmu_code), 88 .code.size = sizeof(gk110_pmu_code),
92 .data.data = gk110_pmu_data, 89 .data.data = gk110_pmu_data,
93 .data.size = sizeof(gk110_pmu_data), 90 .data.size = sizeof(gk110_pmu_data),
94 .pgob = gk110_pmu_pgob, 91 .pgob = gk110_pmu_pgob,
95}.base; 92};
93
94int
95gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
96{
97 return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
98}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index b14134ef9ea5..3b4917637902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -24,18 +24,17 @@
24#include "priv.h" 24#include "priv.h"
25#include "fuc/gk208.fuc5.h" 25#include "fuc/gk208.fuc5.h"
26 26
27struct nvkm_oclass * 27static const struct nvkm_pmu_func
28gk208_pmu_oclass = &(struct nvkm_pmu_impl) { 28gk208_pmu = {
29 .base.handle = NV_SUBDEV(PMU, 0x00),
30 .base.ofuncs = &(struct nvkm_ofuncs) {
31 .ctor = _nvkm_pmu_ctor,
32 .dtor = _nvkm_pmu_dtor,
33 .init = _nvkm_pmu_init,
34 .fini = _nvkm_pmu_fini,
35 },
36 .code.data = gk208_pmu_code, 29 .code.data = gk208_pmu_code,
37 .code.size = sizeof(gk208_pmu_code), 30 .code.size = sizeof(gk208_pmu_code),
38 .data.data = gk208_pmu_data, 31 .data.data = gk208_pmu_data,
39 .data.size = sizeof(gk208_pmu_data), 32 .data.size = sizeof(gk208_pmu_data),
40 .pgob = gk110_pmu_pgob, 33 .pgob = gk110_pmu_pgob,
41}.base; 34};
35
36int
37gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
38{
39 return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
40}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 594f746e68f2..6689d0290a7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -19,6 +19,7 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev)
22#include "priv.h" 23#include "priv.h"
23 24
24#include <subdev/clk.h> 25#include <subdev/clk.h>
@@ -35,7 +36,7 @@ struct gk20a_pmu_dvfs_data {
35 unsigned int avg_load; 36 unsigned int avg_load;
36}; 37};
37 38
38struct gk20a_pmu_priv { 39struct gk20a_pmu {
39 struct nvkm_pmu base; 40 struct nvkm_pmu base;
40 struct nvkm_alarm alarm; 41 struct nvkm_alarm alarm;
41 struct gk20a_pmu_dvfs_data *data; 42 struct gk20a_pmu_dvfs_data *data;
@@ -48,28 +49,28 @@ struct gk20a_pmu_dvfs_dev_status {
48}; 49};
49 50
50static int 51static int
51gk20a_pmu_dvfs_target(struct gk20a_pmu_priv *priv, int *state) 52gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
52{ 53{
53 struct nvkm_clk *clk = nvkm_clk(priv); 54 struct nvkm_clk *clk = pmu->base.subdev.device->clk;
54 55
55 return nvkm_clk_astate(clk, *state, 0, false); 56 return nvkm_clk_astate(clk, *state, 0, false);
56} 57}
57 58
58static int 59static int
59gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu_priv *priv, int *state) 60gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
60{ 61{
61 struct nvkm_clk *clk = nvkm_clk(priv); 62 struct nvkm_clk *clk = pmu->base.subdev.device->clk;
62 63
63 *state = clk->pstate; 64 *state = clk->pstate;
64 return 0; 65 return 0;
65} 66}
66 67
67static int 68static int
68gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv, 69gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
69 int *state, int load) 70 int *state, int load)
70{ 71{
71 struct gk20a_pmu_dvfs_data *data = priv->data; 72 struct gk20a_pmu_dvfs_data *data = pmu->data;
72 struct nvkm_clk *clk = nvkm_clk(priv); 73 struct nvkm_clk *clk = pmu->base.subdev.device->clk;
73 int cur_level, level; 74 int cur_level, level;
74 75
75 /* For GK20A, the performance level is directly mapped to pstate */ 76 /* For GK20A, the performance level is directly mapped to pstate */
@@ -84,7 +85,8 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
84 level = min(clk->state_nr - 1, level); 85 level = min(clk->state_nr - 1, level);
85 } 86 }
86 87
87 nv_trace(priv, "cur level = %d, new level = %d\n", cur_level, level); 88 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
89 cur_level, level);
88 90
89 *state = level; 91 *state = level;
90 92
@@ -95,30 +97,35 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
95} 97}
96 98
97static int 99static int
98gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu_priv *priv, 100gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
99 struct gk20a_pmu_dvfs_dev_status *status) 101 struct gk20a_pmu_dvfs_dev_status *status)
100{ 102{
101 status->busy = nv_rd32(priv, 0x10a508 + (BUSY_SLOT * 0x10)); 103 struct nvkm_device *device = pmu->base.subdev.device;
102 status->total= nv_rd32(priv, 0x10a508 + (CLK_SLOT * 0x10)); 104 status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10));
105 status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10));
103 return 0; 106 return 0;
104} 107}
105 108
106static void 109static void
107gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu_priv *priv) 110gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
108{ 111{
109 nv_wr32(priv, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000); 112 struct nvkm_device *device = pmu->base.subdev.device;
110 nv_wr32(priv, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000); 113 nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
114 nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
111} 115}
112 116
113static void 117static void
114gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) 118gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
115{ 119{
116 struct gk20a_pmu_priv *priv = 120 struct gk20a_pmu *pmu =
117 container_of(alarm, struct gk20a_pmu_priv, alarm); 121 container_of(alarm, struct gk20a_pmu, alarm);
118 struct gk20a_pmu_dvfs_data *data = priv->data; 122 struct gk20a_pmu_dvfs_data *data = pmu->data;
119 struct gk20a_pmu_dvfs_dev_status status; 123 struct gk20a_pmu_dvfs_dev_status status;
120 struct nvkm_clk *clk = nvkm_clk(priv); 124 struct nvkm_subdev *subdev = &pmu->base.subdev;
121 struct nvkm_volt *volt = nvkm_volt(priv); 125 struct nvkm_device *device = subdev->device;
126 struct nvkm_clk *clk = device->clk;
127 struct nvkm_timer *tmr = device->timer;
128 struct nvkm_volt *volt = device->volt;
122 u32 utilization = 0; 129 u32 utilization = 0;
123 int state, ret; 130 int state, ret;
124 131
@@ -129,9 +136,9 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
129 if (!clk || !volt) 136 if (!clk || !volt)
130 goto resched; 137 goto resched;
131 138
132 ret = gk20a_pmu_dvfs_get_dev_status(priv, &status); 139 ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status);
133 if (ret) { 140 if (ret) {
134 nv_warn(priv, "failed to get device status\n"); 141 nvkm_warn(subdev, "failed to get device status\n");
135 goto resched; 142 goto resched;
136 } 143 }
137 144
@@ -140,56 +147,52 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
140 147
141 data->avg_load = (data->p_smooth * data->avg_load) + utilization; 148 data->avg_load = (data->p_smooth * data->avg_load) + utilization;
142 data->avg_load /= data->p_smooth + 1; 149 data->avg_load /= data->p_smooth + 1;
143 nv_trace(priv, "utilization = %d %%, avg_load = %d %%\n", 150 nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
144 utilization, data->avg_load); 151 utilization, data->avg_load);
145 152
146 ret = gk20a_pmu_dvfs_get_cur_state(priv, &state); 153 ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state);
147 if (ret) { 154 if (ret) {
148 nv_warn(priv, "failed to get current state\n"); 155 nvkm_warn(subdev, "failed to get current state\n");
149 goto resched; 156 goto resched;
150 } 157 }
151 158
152 if (gk20a_pmu_dvfs_get_target_state(priv, &state, data->avg_load)) { 159 if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
153 nv_trace(priv, "set new state to %d\n", state); 160 nvkm_trace(subdev, "set new state to %d\n", state);
154 gk20a_pmu_dvfs_target(priv, &state); 161 gk20a_pmu_dvfs_target(pmu, &state);
155 } 162 }
156 163
157resched: 164resched:
158 gk20a_pmu_dvfs_reset_dev_status(priv); 165 gk20a_pmu_dvfs_reset_dev_status(pmu);
159 nvkm_timer_alarm(priv, 100000000, alarm); 166 nvkm_timer_alarm(tmr, 100000000, alarm);
160} 167}
161 168
162static int 169static int
163gk20a_pmu_fini(struct nvkm_object *object, bool suspend) 170gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
164{ 171{
165 struct nvkm_pmu *pmu = (void *)object; 172 struct gk20a_pmu *pmu = gk20a_pmu(subdev);
166 struct gk20a_pmu_priv *priv = (void *)pmu; 173 nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm);
167 174 return 0;
168 nvkm_timer_alarm_cancel(priv, &priv->alarm); 175}
169 176
170 return nvkm_subdev_fini(&pmu->base, suspend); 177static void *
178gk20a_pmu_dtor(struct nvkm_subdev *subdev)
179{
180 return gk20a_pmu(subdev);
171} 181}
172 182
173static int 183static int
174gk20a_pmu_init(struct nvkm_object *object) 184gk20a_pmu_init(struct nvkm_subdev *subdev)
175{ 185{
176 struct nvkm_pmu *pmu = (void *)object; 186 struct gk20a_pmu *pmu = gk20a_pmu(subdev);
177 struct gk20a_pmu_priv *priv = (void *)pmu; 187 struct nvkm_device *device = pmu->base.subdev.device;
178 int ret;
179
180 ret = nvkm_subdev_init(&pmu->base);
181 if (ret)
182 return ret;
183
184 pmu->pgob = nvkm_pmu_pgob;
185 188
186 /* init pwr perf counter */ 189 /* init pwr perf counter */
187 nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001); 190 nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
188 nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002); 191 nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
189 nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003); 192 nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
190 193
191 nvkm_timer_alarm(pmu, 2000000000, &priv->alarm); 194 nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm);
192 return ret; 195 return 0;
193} 196}
194 197
195static struct gk20a_pmu_dvfs_data 198static struct gk20a_pmu_dvfs_data
@@ -199,32 +202,26 @@ gk20a_dvfs_data= {
199 .p_smooth = 1, 202 .p_smooth = 1,
200}; 203};
201 204
202static int 205static const struct nvkm_subdev_func
203gk20a_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 206gk20a_pmu = {
204 struct nvkm_oclass *oclass, void *data, u32 size, 207 .init = gk20a_pmu_init,
205 struct nvkm_object **pobject) 208 .fini = gk20a_pmu_fini,
206{ 209 .dtor = gk20a_pmu_dtor,
207 struct gk20a_pmu_priv *priv; 210};
208 int ret;
209 211
210 ret = nvkm_pmu_create(parent, engine, oclass, &priv); 212int
211 *pobject = nv_object(priv); 213gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
212 if (ret) 214{
213 return ret; 215 static const struct nvkm_pmu_func func = {};
216 struct gk20a_pmu *pmu;
214 217
215 priv->data = &gk20a_dvfs_data; 218 if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
219 return -ENOMEM;
220 pmu->base.func = &func;
221 *ppmu = &pmu->base;
216 222
217 nvkm_alarm_init(&priv->alarm, gk20a_pmu_dvfs_work); 223 nvkm_subdev_ctor(&gk20a_pmu, device, index, 0, &pmu->base.subdev);
224 pmu->data = &gk20a_dvfs_data;
225 nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
218 return 0; 226 return 0;
219} 227}
220
221struct nvkm_oclass *
222gk20a_pmu_oclass = &(struct nvkm_pmu_impl) {
223 .base.handle = NV_SUBDEV(PMU, 0xea),
224 .base.ofuncs = &(struct nvkm_ofuncs) {
225 .ctor = gk20a_pmu_ctor,
226 .dtor = _nvkm_pmu_dtor,
227 .init = gk20a_pmu_init,
228 .fini = gk20a_pmu_fini,
229 },
230}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
new file mode 100644
index 000000000000..31b8692b4641
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#define gk208_pmu_code gm107_pmu_code
26#define gk208_pmu_data gm107_pmu_data
27#include "fuc/gk208.fuc5.h"
28
29static const struct nvkm_pmu_func
30gm107_pmu = {
31 .code.data = gm107_pmu_code,
32 .code.size = sizeof(gm107_pmu_code),
33 .data.data = gm107_pmu_data,
34 .data.size = sizeof(gm107_pmu_data),
35};
36
37int
38gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
39{
40 return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
41}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 30aaeb21de41..8ba7fa4ca75b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,26 +24,25 @@
24#include "priv.h" 24#include "priv.h"
25#include "fuc/gt215.fuc3.h" 25#include "fuc/gt215.fuc3.h"
26 26
27static int 27static void
28gt215_pmu_init(struct nvkm_object *object) 28gt215_pmu_reset(struct nvkm_pmu *pmu)
29{ 29{
30 struct nvkm_pmu *pmu = (void *)object; 30 struct nvkm_device *device = pmu->subdev.device;
31 nv_mask(pmu, 0x022210, 0x00000001, 0x00000000); 31 nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
32 nv_mask(pmu, 0x022210, 0x00000001, 0x00000001); 32 nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
33 return nvkm_pmu_init(pmu);
34} 33}
35 34
36struct nvkm_oclass * 35static const struct nvkm_pmu_func
37gt215_pmu_oclass = &(struct nvkm_pmu_impl) { 36gt215_pmu = {
38 .base.handle = NV_SUBDEV(PMU, 0xa3), 37 .reset = gt215_pmu_reset,
39 .base.ofuncs = &(struct nvkm_ofuncs) {
40 .ctor = _nvkm_pmu_ctor,
41 .dtor = _nvkm_pmu_dtor,
42 .init = gt215_pmu_init,
43 .fini = _nvkm_pmu_fini,
44 },
45 .code.data = gt215_pmu_code, 38 .code.data = gt215_pmu_code,
46 .code.size = sizeof(gt215_pmu_code), 39 .code.size = sizeof(gt215_pmu_code),
47 .data.data = gt215_pmu_data, 40 .data.data = gt215_pmu_data,
48 .data.size = sizeof(gt215_pmu_data), 41 .data.size = sizeof(gt215_pmu_data),
49}.base; 42};
43
44int
45gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
46{
47 return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
48}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index b75c5b885980..e6f74168238c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -2,8 +2,6 @@
2#define __NVKM_PMU_MEMX_H__ 2#define __NVKM_PMU_MEMX_H__
3#include "priv.h" 3#include "priv.h"
4 4
5#include <core/device.h>
6
7struct nvkm_memx { 5struct nvkm_memx {
8 struct nvkm_pmu *pmu; 6 struct nvkm_pmu *pmu;
9 u32 base; 7 u32 base;
@@ -18,13 +16,13 @@ struct nvkm_memx {
18static void 16static void
19memx_out(struct nvkm_memx *memx) 17memx_out(struct nvkm_memx *memx)
20{ 18{
21 struct nvkm_pmu *pmu = memx->pmu; 19 struct nvkm_device *device = memx->pmu->subdev.device;
22 int i; 20 int i;
23 21
24 if (memx->c.mthd) { 22 if (memx->c.mthd) {
25 nv_wr32(pmu, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd); 23 nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
26 for (i = 0; i < memx->c.size; i++) 24 for (i = 0; i < memx->c.size; i++)
27 nv_wr32(pmu, 0x10a1c4, memx->c.data[i]); 25 nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
28 memx->c.mthd = 0; 26 memx->c.mthd = 0;
29 memx->c.size = 0; 27 memx->c.size = 0;
30 } 28 }
@@ -44,12 +42,13 @@ memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
44int 42int
45nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx) 43nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
46{ 44{
45 struct nvkm_device *device = pmu->subdev.device;
47 struct nvkm_memx *memx; 46 struct nvkm_memx *memx;
48 u32 reply[2]; 47 u32 reply[2];
49 int ret; 48 int ret;
50 49
51 ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO, 50 ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
52 MEMX_INFO_DATA, 0); 51 MEMX_INFO_DATA, 0);
53 if (ret) 52 if (ret)
54 return ret; 53 return ret;
55 54
@@ -62,9 +61,9 @@ nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
62 61
63 /* acquire data segment access */ 62 /* acquire data segment access */
64 do { 63 do {
65 nv_wr32(pmu, 0x10a580, 0x00000003); 64 nvkm_wr32(device, 0x10a580, 0x00000003);
66 } while (nv_rd32(pmu, 0x10a580) != 0x00000003); 65 } while (nvkm_rd32(device, 0x10a580) != 0x00000003);
67 nv_wr32(pmu, 0x10a1c0, 0x01000000 | memx->base); 66 nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
68 return 0; 67 return 0;
69} 68}
70 69
@@ -73,23 +72,25 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
73{ 72{
74 struct nvkm_memx *memx = *pmemx; 73 struct nvkm_memx *memx = *pmemx;
75 struct nvkm_pmu *pmu = memx->pmu; 74 struct nvkm_pmu *pmu = memx->pmu;
75 struct nvkm_subdev *subdev = &pmu->subdev;
76 struct nvkm_device *device = subdev->device;
76 u32 finish, reply[2]; 77 u32 finish, reply[2];
77 78
78 /* flush the cache... */ 79 /* flush the cache... */
79 memx_out(memx); 80 memx_out(memx);
80 81
81 /* release data segment access */ 82 /* release data segment access */
82 finish = nv_rd32(pmu, 0x10a1c0) & 0x00ffffff; 83 finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
83 nv_wr32(pmu, 0x10a580, 0x00000000); 84 nvkm_wr32(device, 0x10a580, 0x00000000);
84 85
85 /* call MEMX process to execute the script, and wait for reply */ 86 /* call MEMX process to execute the script, and wait for reply */
86 if (exec) { 87 if (exec) {
87 pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC, 88 nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
88 memx->base, finish); 89 memx->base, finish);
89 } 90 }
90 91
91 nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n", 92 nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
92 reply[0], reply[1]); 93 reply[0], reply[1]);
93 kfree(memx); 94 kfree(memx);
94 return 0; 95 return 0;
95} 96}
@@ -97,7 +98,7 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
97void 98void
98nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data) 99nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
99{ 100{
100 nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data); 101 nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
101 memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data }); 102 memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
102} 103}
103 104
@@ -105,8 +106,8 @@ void
105nvkm_memx_wait(struct nvkm_memx *memx, 106nvkm_memx_wait(struct nvkm_memx *memx,
106 u32 addr, u32 mask, u32 data, u32 nsec) 107 u32 addr, u32 mask, u32 data, u32 nsec)
107{ 108{
108 nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n", 109 nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
109 addr, mask, data, nsec); 110 addr, mask, data, nsec);
110 memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec }); 111 memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
111 memx_out(memx); /* fuc can't handle multiple */ 112 memx_out(memx); /* fuc can't handle multiple */
112} 113}
@@ -114,7 +115,7 @@ nvkm_memx_wait(struct nvkm_memx *memx,
114void 115void
115nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec) 116nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
116{ 117{
117 nv_debug(memx->pmu, " DELAY = %d ns\n", nsec); 118 nvkm_debug(&memx->pmu->subdev, " DELAY = %d ns\n", nsec);
118 memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec }); 119 memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
119 memx_out(memx); /* fuc can't handle multiple */ 120 memx_out(memx); /* fuc can't handle multiple */
120} 121}
@@ -122,16 +123,17 @@ nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
122void 123void
123nvkm_memx_wait_vblank(struct nvkm_memx *memx) 124nvkm_memx_wait_vblank(struct nvkm_memx *memx)
124{ 125{
125 struct nvkm_pmu *pmu = memx->pmu; 126 struct nvkm_subdev *subdev = &memx->pmu->subdev;
127 struct nvkm_device *device = subdev->device;
126 u32 heads, x, y, px = 0; 128 u32 heads, x, y, px = 0;
127 int i, head_sync; 129 int i, head_sync;
128 130
129 if (nv_device(pmu)->chipset < 0xd0) { 131 if (device->chipset < 0xd0) {
130 heads = nv_rd32(pmu, 0x610050); 132 heads = nvkm_rd32(device, 0x610050);
131 for (i = 0; i < 2; i++) { 133 for (i = 0; i < 2; i++) {
132 /* Heuristic: sync to head with biggest resolution */ 134 /* Heuristic: sync to head with biggest resolution */
133 if (heads & (2 << (i << 3))) { 135 if (heads & (2 << (i << 3))) {
134 x = nv_rd32(pmu, 0x610b40 + (0x540 * i)); 136 x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
135 y = (x & 0xffff0000) >> 16; 137 y = (x & 0xffff0000) >> 16;
136 x &= 0x0000ffff; 138 x &= 0x0000ffff;
137 if ((x * y) > px) { 139 if ((x * y) > px) {
@@ -143,11 +145,11 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx)
143 } 145 }
144 146
145 if (px == 0) { 147 if (px == 0) {
146 nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n"); 148 nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
147 return; 149 return;
148 } 150 }
149 151
150 nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync); 152 nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
151 memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync }); 153 memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
152 memx_out(memx); /* fuc can't handle multiple */ 154 memx_out(memx); /* fuc can't handle multiple */
153} 155}
@@ -155,18 +157,19 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx)
155void 157void
156nvkm_memx_train(struct nvkm_memx *memx) 158nvkm_memx_train(struct nvkm_memx *memx)
157{ 159{
158 nv_debug(memx->pmu, " MEM TRAIN\n"); 160 nvkm_debug(&memx->pmu->subdev, " MEM TRAIN\n");
159 memx_cmd(memx, MEMX_TRAIN, 0, NULL); 161 memx_cmd(memx, MEMX_TRAIN, 0, NULL);
160} 162}
161 163
162int 164int
163nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize) 165nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
164{ 166{
167 struct nvkm_device *device = pmu->subdev.device;
165 u32 reply[2], base, size, i; 168 u32 reply[2], base, size, i;
166 int ret; 169 int ret;
167 170
168 ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO, 171 ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
169 MEMX_INFO_TRAIN, 0); 172 MEMX_INFO_TRAIN, 0);
170 if (ret) 173 if (ret)
171 return ret; 174 return ret;
172 175
@@ -176,10 +179,10 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
176 return -ENOMEM; 179 return -ENOMEM;
177 180
178 /* read the packet */ 181 /* read the packet */
179 nv_wr32(pmu, 0x10a1c0, 0x02000000 | base); 182 nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
180 183
181 for (i = 0; i < size; i++) 184 for (i = 0; i < size; i++)
182 res[i] = nv_rd32(pmu, 0x10a1c4); 185 res[i] = nvkm_rd32(device, 0x10a1c4);
183 186
184 return 0; 187 return 0;
185} 188}
@@ -187,14 +190,14 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
187void 190void
188nvkm_memx_block(struct nvkm_memx *memx) 191nvkm_memx_block(struct nvkm_memx *memx)
189{ 192{
190 nv_debug(memx->pmu, " HOST BLOCKED\n"); 193 nvkm_debug(&memx->pmu->subdev, " HOST BLOCKED\n");
191 memx_cmd(memx, MEMX_ENTER, 0, NULL); 194 memx_cmd(memx, MEMX_ENTER, 0, NULL);
192} 195}
193 196
194void 197void
195nvkm_memx_unblock(struct nvkm_memx *memx) 198nvkm_memx_unblock(struct nvkm_memx *memx)
196{ 199{
197 nv_debug(memx->pmu, " HOST UNBLOCKED\n"); 200 nvkm_debug(&memx->pmu->subdev, " HOST UNBLOCKED\n");
198 memx_cmd(memx, MEMX_LEAVE, 0, NULL); 201 memx_cmd(memx, MEMX_LEAVE, 0, NULL);
199} 202}
200#endif 203#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 799e7c8b88f5..f38c88fae3d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -1,38 +1,20 @@
1#ifndef __NVKM_PMU_PRIV_H__ 1#ifndef __NVKM_PMU_PRIV_H__
2#define __NVKM_PMU_PRIV_H__ 2#define __NVKM_PMU_PRIV_H__
3#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
3#include <subdev/pmu.h> 4#include <subdev/pmu.h>
4#include <subdev/pmu/fuc/os.h> 5#include <subdev/pmu/fuc/os.h>
5 6
6#define nvkm_pmu_create(p, e, o, d) \ 7int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
7 nvkm_pmu_create_((p), (e), (o), sizeof(**d), (void **)d) 8 int index, struct nvkm_pmu **);
8#define nvkm_pmu_destroy(p) \
9 nvkm_subdev_destroy(&(p)->base)
10#define nvkm_pmu_init(p) ({ \
11 struct nvkm_pmu *_pmu = (p); \
12 _nvkm_pmu_init(nv_object(_pmu)); \
13})
14#define nvkm_pmu_fini(p,s) ({ \
15 struct nvkm_pmu *_pmu = (p); \
16 _nvkm_pmu_fini(nv_object(_pmu), (s)); \
17})
18 9
19int nvkm_pmu_create_(struct nvkm_object *, struct nvkm_object *, 10struct nvkm_pmu_func {
20 struct nvkm_oclass *, int, void **); 11 void (*reset)(struct nvkm_pmu *);
21 12
22int _nvkm_pmu_ctor(struct nvkm_object *, struct nvkm_object *,
23 struct nvkm_oclass *, void *, u32,
24 struct nvkm_object **);
25#define _nvkm_pmu_dtor _nvkm_subdev_dtor
26int _nvkm_pmu_init(struct nvkm_object *);
27int _nvkm_pmu_fini(struct nvkm_object *, bool);
28void nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable);
29
30struct nvkm_pmu_impl {
31 struct nvkm_oclass base;
32 struct { 13 struct {
33 u32 *data; 14 u32 *data;
34 u32 size; 15 u32 size;
35 } code; 16 } code;
17
36 struct { 18 struct {
37 u32 *data; 19 u32 *data;
38 u32 size; 20 u32 size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 5837cf1292d9..135758ba3e28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -9,5 +9,5 @@ nvkm-y += nvkm/subdev/therm/nv40.o
9nvkm-y += nvkm/subdev/therm/nv50.o 9nvkm-y += nvkm/subdev/therm/nv50.o
10nvkm-y += nvkm/subdev/therm/g84.o 10nvkm-y += nvkm/subdev/therm/g84.o
11nvkm-y += nvkm/subdev/therm/gt215.o 11nvkm-y += nvkm/subdev/therm/gt215.o
12nvkm-y += nvkm/subdev/therm/gf110.o 12nvkm-y += nvkm/subdev/therm/gf119.o
13nvkm-y += nvkm/subdev/therm/gm107.o 13nvkm-y += nvkm/subdev/therm/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index ec327cb64a0d..949dc6101a58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -23,21 +23,26 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/device.h> 26int
27nvkm_therm_temp_get(struct nvkm_therm *therm)
28{
29 if (therm->func->temp_get)
30 return therm->func->temp_get(therm);
31 return -ENODEV;
32}
27 33
28static int 34static int
29nvkm_therm_update_trip(struct nvkm_therm *therm) 35nvkm_therm_update_trip(struct nvkm_therm *therm)
30{ 36{
31 struct nvkm_therm_priv *priv = (void *)therm; 37 struct nvbios_therm_trip_point *trip = therm->fan->bios.trip,
32 struct nvbios_therm_trip_point *trip = priv->fan->bios.trip,
33 *cur_trip = NULL, 38 *cur_trip = NULL,
34 *last_trip = priv->last_trip; 39 *last_trip = therm->last_trip;
35 u8 temp = therm->temp_get(therm); 40 u8 temp = therm->func->temp_get(therm);
36 u16 duty, i; 41 u16 duty, i;
37 42
38 /* look for the trip point corresponding to the current temperature */ 43 /* look for the trip point corresponding to the current temperature */
39 cur_trip = NULL; 44 cur_trip = NULL;
40 for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) { 45 for (i = 0; i < therm->fan->bios.nr_fan_trip; i++) {
41 if (temp >= trip[i].temp) 46 if (temp >= trip[i].temp)
42 cur_trip = &trip[i]; 47 cur_trip = &trip[i];
43 } 48 }
@@ -49,10 +54,10 @@ nvkm_therm_update_trip(struct nvkm_therm *therm)
49 54
50 if (cur_trip) { 55 if (cur_trip) {
51 duty = cur_trip->fan_duty; 56 duty = cur_trip->fan_duty;
52 priv->last_trip = cur_trip; 57 therm->last_trip = cur_trip;
53 } else { 58 } else {
54 duty = 0; 59 duty = 0;
55 priv->last_trip = NULL; 60 therm->last_trip = NULL;
56 } 61 }
57 62
58 return duty; 63 return duty;
@@ -61,51 +66,50 @@ nvkm_therm_update_trip(struct nvkm_therm *therm)
61static int 66static int
62nvkm_therm_update_linear(struct nvkm_therm *therm) 67nvkm_therm_update_linear(struct nvkm_therm *therm)
63{ 68{
64 struct nvkm_therm_priv *priv = (void *)therm; 69 u8 linear_min_temp = therm->fan->bios.linear_min_temp;
65 u8 linear_min_temp = priv->fan->bios.linear_min_temp; 70 u8 linear_max_temp = therm->fan->bios.linear_max_temp;
66 u8 linear_max_temp = priv->fan->bios.linear_max_temp; 71 u8 temp = therm->func->temp_get(therm);
67 u8 temp = therm->temp_get(therm);
68 u16 duty; 72 u16 duty;
69 73
70 /* handle the non-linear part first */ 74 /* handle the non-linear part first */
71 if (temp < linear_min_temp) 75 if (temp < linear_min_temp)
72 return priv->fan->bios.min_duty; 76 return therm->fan->bios.min_duty;
73 else if (temp > linear_max_temp) 77 else if (temp > linear_max_temp)
74 return priv->fan->bios.max_duty; 78 return therm->fan->bios.max_duty;
75 79
76 /* we are in the linear zone */ 80 /* we are in the linear zone */
77 duty = (temp - linear_min_temp); 81 duty = (temp - linear_min_temp);
78 duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty); 82 duty *= (therm->fan->bios.max_duty - therm->fan->bios.min_duty);
79 duty /= (linear_max_temp - linear_min_temp); 83 duty /= (linear_max_temp - linear_min_temp);
80 duty += priv->fan->bios.min_duty; 84 duty += therm->fan->bios.min_duty;
81 return duty; 85 return duty;
82} 86}
83 87
84static void 88static void
85nvkm_therm_update(struct nvkm_therm *therm, int mode) 89nvkm_therm_update(struct nvkm_therm *therm, int mode)
86{ 90{
87 struct nvkm_timer *ptimer = nvkm_timer(therm); 91 struct nvkm_subdev *subdev = &therm->subdev;
88 struct nvkm_therm_priv *priv = (void *)therm; 92 struct nvkm_timer *tmr = subdev->device->timer;
89 unsigned long flags; 93 unsigned long flags;
90 bool immd = true; 94 bool immd = true;
91 bool poll = true; 95 bool poll = true;
92 int duty = -1; 96 int duty = -1;
93 97
94 spin_lock_irqsave(&priv->lock, flags); 98 spin_lock_irqsave(&therm->lock, flags);
95 if (mode < 0) 99 if (mode < 0)
96 mode = priv->mode; 100 mode = therm->mode;
97 priv->mode = mode; 101 therm->mode = mode;
98 102
99 switch (mode) { 103 switch (mode) {
100 case NVKM_THERM_CTRL_MANUAL: 104 case NVKM_THERM_CTRL_MANUAL:
101 ptimer->alarm_cancel(ptimer, &priv->alarm); 105 nvkm_timer_alarm_cancel(tmr, &therm->alarm);
102 duty = nvkm_therm_fan_get(therm); 106 duty = nvkm_therm_fan_get(therm);
103 if (duty < 0) 107 if (duty < 0)
104 duty = 100; 108 duty = 100;
105 poll = false; 109 poll = false;
106 break; 110 break;
107 case NVKM_THERM_CTRL_AUTO: 111 case NVKM_THERM_CTRL_AUTO:
108 switch(priv->fan->bios.fan_mode) { 112 switch(therm->fan->bios.fan_mode) {
109 case NVBIOS_THERM_FAN_TRIP: 113 case NVBIOS_THERM_FAN_TRIP:
110 duty = nvkm_therm_update_trip(therm); 114 duty = nvkm_therm_update_trip(therm);
111 break; 115 break;
@@ -113,8 +117,8 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
113 duty = nvkm_therm_update_linear(therm); 117 duty = nvkm_therm_update_linear(therm);
114 break; 118 break;
115 case NVBIOS_THERM_FAN_OTHER: 119 case NVBIOS_THERM_FAN_OTHER:
116 if (priv->cstate) 120 if (therm->cstate)
117 duty = priv->cstate; 121 duty = therm->cstate;
118 poll = false; 122 poll = false;
119 break; 123 break;
120 } 124 }
@@ -122,29 +126,29 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
122 break; 126 break;
123 case NVKM_THERM_CTRL_NONE: 127 case NVKM_THERM_CTRL_NONE:
124 default: 128 default:
125 ptimer->alarm_cancel(ptimer, &priv->alarm); 129 nvkm_timer_alarm_cancel(tmr, &therm->alarm);
126 poll = false; 130 poll = false;
127 } 131 }
128 132
129 if (list_empty(&priv->alarm.head) && poll) 133 if (list_empty(&therm->alarm.head) && poll)
130 ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm); 134 nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
131 spin_unlock_irqrestore(&priv->lock, flags); 135 spin_unlock_irqrestore(&therm->lock, flags);
132 136
133 if (duty >= 0) { 137 if (duty >= 0) {
134 nv_debug(therm, "FAN target request: %d%%\n", duty); 138 nvkm_debug(subdev, "FAN target request: %d%%\n", duty);
135 nvkm_therm_fan_set(therm, immd, duty); 139 nvkm_therm_fan_set(therm, immd, duty);
136 } 140 }
137} 141}
138 142
139int 143int
140nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir) 144nvkm_therm_cstate(struct nvkm_therm *therm, int fan, int dir)
141{ 145{
142 struct nvkm_therm_priv *priv = (void *)ptherm; 146 struct nvkm_subdev *subdev = &therm->subdev;
143 if (!dir || (dir < 0 && fan < priv->cstate) || 147 if (!dir || (dir < 0 && fan < therm->cstate) ||
144 (dir > 0 && fan > priv->cstate)) { 148 (dir > 0 && fan > therm->cstate)) {
145 nv_debug(ptherm, "default fan speed -> %d%%\n", fan); 149 nvkm_debug(subdev, "default fan speed -> %d%%\n", fan);
146 priv->cstate = fan; 150 therm->cstate = fan;
147 nvkm_therm_update(ptherm, -1); 151 nvkm_therm_update(therm, -1);
148 } 152 }
149 return 0; 153 return 0;
150} 154}
@@ -152,16 +156,16 @@ nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir)
152static void 156static void
153nvkm_therm_alarm(struct nvkm_alarm *alarm) 157nvkm_therm_alarm(struct nvkm_alarm *alarm)
154{ 158{
155 struct nvkm_therm_priv *priv = 159 struct nvkm_therm *therm =
156 container_of(alarm, struct nvkm_therm_priv, alarm); 160 container_of(alarm, struct nvkm_therm, alarm);
157 nvkm_therm_update(&priv->base, -1); 161 nvkm_therm_update(therm, -1);
158} 162}
159 163
160int 164int
161nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode) 165nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
162{ 166{
163 struct nvkm_therm_priv *priv = (void *)therm; 167 struct nvkm_subdev *subdev = &therm->subdev;
164 struct nvkm_device *device = nv_device(therm); 168 struct nvkm_device *device = subdev->device;
165 static const char *name[] = { 169 static const char *name[] = {
166 "disabled", 170 "disabled",
167 "manual", 171 "manual",
@@ -171,51 +175,49 @@ nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
171 /* The default PPWR ucode on fermi interferes with fan management */ 175 /* The default PPWR ucode on fermi interferes with fan management */
172 if ((mode >= ARRAY_SIZE(name)) || 176 if ((mode >= ARRAY_SIZE(name)) ||
173 (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 && 177 (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
174 !nvkm_subdev(device, NVDEV_SUBDEV_PMU))) 178 !device->pmu))
175 return -EINVAL; 179 return -EINVAL;
176 180
177 /* do not allow automatic fan management if the thermal sensor is 181 /* do not allow automatic fan management if the thermal sensor is
178 * not available */ 182 * not available */
179 if (mode == NVKM_THERM_CTRL_AUTO && therm->temp_get(therm) < 0) 183 if (mode == NVKM_THERM_CTRL_AUTO &&
184 therm->func->temp_get(therm) < 0)
180 return -EINVAL; 185 return -EINVAL;
181 186
182 if (priv->mode == mode) 187 if (therm->mode == mode)
183 return 0; 188 return 0;
184 189
185 nv_info(therm, "fan management: %s\n", name[mode]); 190 nvkm_debug(subdev, "fan management: %s\n", name[mode]);
186 nvkm_therm_update(therm, mode); 191 nvkm_therm_update(therm, mode);
187 return 0; 192 return 0;
188} 193}
189 194
190int 195int
191nvkm_therm_attr_get(struct nvkm_therm *therm, 196nvkm_therm_attr_get(struct nvkm_therm *therm, enum nvkm_therm_attr_type type)
192 enum nvkm_therm_attr_type type)
193{ 197{
194 struct nvkm_therm_priv *priv = (void *)therm;
195
196 switch (type) { 198 switch (type) {
197 case NVKM_THERM_ATTR_FAN_MIN_DUTY: 199 case NVKM_THERM_ATTR_FAN_MIN_DUTY:
198 return priv->fan->bios.min_duty; 200 return therm->fan->bios.min_duty;
199 case NVKM_THERM_ATTR_FAN_MAX_DUTY: 201 case NVKM_THERM_ATTR_FAN_MAX_DUTY:
200 return priv->fan->bios.max_duty; 202 return therm->fan->bios.max_duty;
201 case NVKM_THERM_ATTR_FAN_MODE: 203 case NVKM_THERM_ATTR_FAN_MODE:
202 return priv->mode; 204 return therm->mode;
203 case NVKM_THERM_ATTR_THRS_FAN_BOOST: 205 case NVKM_THERM_ATTR_THRS_FAN_BOOST:
204 return priv->bios_sensor.thrs_fan_boost.temp; 206 return therm->bios_sensor.thrs_fan_boost.temp;
205 case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST: 207 case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
206 return priv->bios_sensor.thrs_fan_boost.hysteresis; 208 return therm->bios_sensor.thrs_fan_boost.hysteresis;
207 case NVKM_THERM_ATTR_THRS_DOWN_CLK: 209 case NVKM_THERM_ATTR_THRS_DOWN_CLK:
208 return priv->bios_sensor.thrs_down_clock.temp; 210 return therm->bios_sensor.thrs_down_clock.temp;
209 case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST: 211 case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
210 return priv->bios_sensor.thrs_down_clock.hysteresis; 212 return therm->bios_sensor.thrs_down_clock.hysteresis;
211 case NVKM_THERM_ATTR_THRS_CRITICAL: 213 case NVKM_THERM_ATTR_THRS_CRITICAL:
212 return priv->bios_sensor.thrs_critical.temp; 214 return therm->bios_sensor.thrs_critical.temp;
213 case NVKM_THERM_ATTR_THRS_CRITICAL_HYST: 215 case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
214 return priv->bios_sensor.thrs_critical.hysteresis; 216 return therm->bios_sensor.thrs_critical.hysteresis;
215 case NVKM_THERM_ATTR_THRS_SHUTDOWN: 217 case NVKM_THERM_ATTR_THRS_SHUTDOWN:
216 return priv->bios_sensor.thrs_shutdown.temp; 218 return therm->bios_sensor.thrs_shutdown.temp;
217 case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST: 219 case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
218 return priv->bios_sensor.thrs_shutdown.hysteresis; 220 return therm->bios_sensor.thrs_shutdown.hysteresis;
219 } 221 }
220 222
221 return -EINVAL; 223 return -EINVAL;
@@ -225,143 +227,156 @@ int
225nvkm_therm_attr_set(struct nvkm_therm *therm, 227nvkm_therm_attr_set(struct nvkm_therm *therm,
226 enum nvkm_therm_attr_type type, int value) 228 enum nvkm_therm_attr_type type, int value)
227{ 229{
228 struct nvkm_therm_priv *priv = (void *)therm;
229
230 switch (type) { 230 switch (type) {
231 case NVKM_THERM_ATTR_FAN_MIN_DUTY: 231 case NVKM_THERM_ATTR_FAN_MIN_DUTY:
232 if (value < 0) 232 if (value < 0)
233 value = 0; 233 value = 0;
234 if (value > priv->fan->bios.max_duty) 234 if (value > therm->fan->bios.max_duty)
235 value = priv->fan->bios.max_duty; 235 value = therm->fan->bios.max_duty;
236 priv->fan->bios.min_duty = value; 236 therm->fan->bios.min_duty = value;
237 return 0; 237 return 0;
238 case NVKM_THERM_ATTR_FAN_MAX_DUTY: 238 case NVKM_THERM_ATTR_FAN_MAX_DUTY:
239 if (value < 0) 239 if (value < 0)
240 value = 0; 240 value = 0;
241 if (value < priv->fan->bios.min_duty) 241 if (value < therm->fan->bios.min_duty)
242 value = priv->fan->bios.min_duty; 242 value = therm->fan->bios.min_duty;
243 priv->fan->bios.max_duty = value; 243 therm->fan->bios.max_duty = value;
244 return 0; 244 return 0;
245 case NVKM_THERM_ATTR_FAN_MODE: 245 case NVKM_THERM_ATTR_FAN_MODE:
246 return nvkm_therm_fan_mode(therm, value); 246 return nvkm_therm_fan_mode(therm, value);
247 case NVKM_THERM_ATTR_THRS_FAN_BOOST: 247 case NVKM_THERM_ATTR_THRS_FAN_BOOST:
248 priv->bios_sensor.thrs_fan_boost.temp = value; 248 therm->bios_sensor.thrs_fan_boost.temp = value;
249 priv->sensor.program_alarms(therm); 249 therm->func->program_alarms(therm);
250 return 0; 250 return 0;
251 case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST: 251 case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
252 priv->bios_sensor.thrs_fan_boost.hysteresis = value; 252 therm->bios_sensor.thrs_fan_boost.hysteresis = value;
253 priv->sensor.program_alarms(therm); 253 therm->func->program_alarms(therm);
254 return 0; 254 return 0;
255 case NVKM_THERM_ATTR_THRS_DOWN_CLK: 255 case NVKM_THERM_ATTR_THRS_DOWN_CLK:
256 priv->bios_sensor.thrs_down_clock.temp = value; 256 therm->bios_sensor.thrs_down_clock.temp = value;
257 priv->sensor.program_alarms(therm); 257 therm->func->program_alarms(therm);
258 return 0; 258 return 0;
259 case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST: 259 case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
260 priv->bios_sensor.thrs_down_clock.hysteresis = value; 260 therm->bios_sensor.thrs_down_clock.hysteresis = value;
261 priv->sensor.program_alarms(therm); 261 therm->func->program_alarms(therm);
262 return 0; 262 return 0;
263 case NVKM_THERM_ATTR_THRS_CRITICAL: 263 case NVKM_THERM_ATTR_THRS_CRITICAL:
264 priv->bios_sensor.thrs_critical.temp = value; 264 therm->bios_sensor.thrs_critical.temp = value;
265 priv->sensor.program_alarms(therm); 265 therm->func->program_alarms(therm);
266 return 0; 266 return 0;
267 case NVKM_THERM_ATTR_THRS_CRITICAL_HYST: 267 case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
268 priv->bios_sensor.thrs_critical.hysteresis = value; 268 therm->bios_sensor.thrs_critical.hysteresis = value;
269 priv->sensor.program_alarms(therm); 269 therm->func->program_alarms(therm);
270 return 0; 270 return 0;
271 case NVKM_THERM_ATTR_THRS_SHUTDOWN: 271 case NVKM_THERM_ATTR_THRS_SHUTDOWN:
272 priv->bios_sensor.thrs_shutdown.temp = value; 272 therm->bios_sensor.thrs_shutdown.temp = value;
273 priv->sensor.program_alarms(therm); 273 therm->func->program_alarms(therm);
274 return 0; 274 return 0;
275 case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST: 275 case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
276 priv->bios_sensor.thrs_shutdown.hysteresis = value; 276 therm->bios_sensor.thrs_shutdown.hysteresis = value;
277 priv->sensor.program_alarms(therm); 277 therm->func->program_alarms(therm);
278 return 0; 278 return 0;
279 } 279 }
280 280
281 return -EINVAL; 281 return -EINVAL;
282} 282}
283 283
284int 284static void
285_nvkm_therm_init(struct nvkm_object *object) 285nvkm_therm_intr(struct nvkm_subdev *subdev)
286{ 286{
287 struct nvkm_therm *therm = (void *)object; 287 struct nvkm_therm *therm = nvkm_therm(subdev);
288 struct nvkm_therm_priv *priv = (void *)therm; 288 if (therm->func->intr)
289 int ret; 289 therm->func->intr(therm);
290
291 ret = nvkm_subdev_init(&therm->base);
292 if (ret)
293 return ret;
294
295 if (priv->suspend >= 0) {
296 /* restore the pwm value only when on manual or auto mode */
297 if (priv->suspend > 0)
298 nvkm_therm_fan_set(therm, true, priv->fan->percent);
299
300 nvkm_therm_fan_mode(therm, priv->suspend);
301 }
302 nvkm_therm_sensor_init(therm);
303 nvkm_therm_fan_init(therm);
304 return 0;
305} 290}
306 291
307int 292static int
308_nvkm_therm_fini(struct nvkm_object *object, bool suspend) 293nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend)
309{ 294{
310 struct nvkm_therm *therm = (void *)object; 295 struct nvkm_therm *therm = nvkm_therm(subdev);
311 struct nvkm_therm_priv *priv = (void *)therm; 296
297 if (therm->func->fini)
298 therm->func->fini(therm);
312 299
313 nvkm_therm_fan_fini(therm, suspend); 300 nvkm_therm_fan_fini(therm, suspend);
314 nvkm_therm_sensor_fini(therm, suspend); 301 nvkm_therm_sensor_fini(therm, suspend);
302
315 if (suspend) { 303 if (suspend) {
316 priv->suspend = priv->mode; 304 therm->suspend = therm->mode;
317 priv->mode = NVKM_THERM_CTRL_NONE; 305 therm->mode = NVKM_THERM_CTRL_NONE;
318 } 306 }
319 307
320 return nvkm_subdev_fini(&therm->base, suspend);
321}
322
323int
324nvkm_therm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
325 struct nvkm_oclass *oclass, int length, void **pobject)
326{
327 struct nvkm_therm_priv *priv;
328 int ret;
329
330 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PTHERM",
331 "therm", length, pobject);
332 priv = *pobject;
333 if (ret)
334 return ret;
335
336 nvkm_alarm_init(&priv->alarm, nvkm_therm_alarm);
337 spin_lock_init(&priv->lock);
338 spin_lock_init(&priv->sensor.alarm_program_lock);
339
340 priv->base.fan_get = nvkm_therm_fan_user_get;
341 priv->base.fan_set = nvkm_therm_fan_user_set;
342 priv->base.fan_sense = nvkm_therm_fan_sense;
343 priv->base.attr_get = nvkm_therm_attr_get;
344 priv->base.attr_set = nvkm_therm_attr_set;
345 priv->mode = priv->suspend = -1; /* undefined */
346 return 0; 308 return 0;
347} 309}
348 310
349int 311static int
350nvkm_therm_preinit(struct nvkm_therm *therm) 312nvkm_therm_oneinit(struct nvkm_subdev *subdev)
351{ 313{
314 struct nvkm_therm *therm = nvkm_therm(subdev);
352 nvkm_therm_sensor_ctor(therm); 315 nvkm_therm_sensor_ctor(therm);
353 nvkm_therm_ic_ctor(therm); 316 nvkm_therm_ic_ctor(therm);
354 nvkm_therm_fan_ctor(therm); 317 nvkm_therm_fan_ctor(therm);
355
356 nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO); 318 nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
357 nvkm_therm_sensor_preinit(therm); 319 nvkm_therm_sensor_preinit(therm);
358 return 0; 320 return 0;
359} 321}
360 322
361void 323static int
362_nvkm_therm_dtor(struct nvkm_object *object) 324nvkm_therm_init(struct nvkm_subdev *subdev)
325{
326 struct nvkm_therm *therm = nvkm_therm(subdev);
327
328 therm->func->init(therm);
329
330 if (therm->suspend >= 0) {
331 /* restore the pwm value only when on manual or auto mode */
332 if (therm->suspend > 0)
333 nvkm_therm_fan_set(therm, true, therm->fan->percent);
334
335 nvkm_therm_fan_mode(therm, therm->suspend);
336 }
337
338 nvkm_therm_sensor_init(therm);
339 nvkm_therm_fan_init(therm);
340 return 0;
341}
342
343static void *
344nvkm_therm_dtor(struct nvkm_subdev *subdev)
345{
346 struct nvkm_therm *therm = nvkm_therm(subdev);
347 kfree(therm->fan);
348 return therm;
349}
350
351static const struct nvkm_subdev_func
352nvkm_therm = {
353 .dtor = nvkm_therm_dtor,
354 .oneinit = nvkm_therm_oneinit,
355 .init = nvkm_therm_init,
356 .fini = nvkm_therm_fini,
357 .intr = nvkm_therm_intr,
358};
359
360int
361nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
362 int index, struct nvkm_therm **ptherm)
363{ 363{
364 struct nvkm_therm_priv *priv = (void *)object; 364 struct nvkm_therm *therm;
365 kfree(priv->fan); 365
366 nvkm_subdev_destroy(&priv->base.base); 366 if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
367 return -ENOMEM;
368
369 nvkm_subdev_ctor(&nvkm_therm, device, index, 0, &therm->subdev);
370 therm->func = func;
371
372 nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
373 spin_lock_init(&therm->lock);
374 spin_lock_init(&therm->sensor.alarm_program_lock);
375
376 therm->fan_get = nvkm_therm_fan_user_get;
377 therm->fan_set = nvkm_therm_fan_user_set;
378 therm->attr_get = nvkm_therm_attr_get;
379 therm->attr_set = nvkm_therm_attr_set;
380 therm->mode = therm->suspend = -1; /* undefined */
381 return 0;
367} 382}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 434fa745ca40..91198d79393a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -32,8 +32,8 @@ static int
32nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target) 32nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
33{ 33{
34 struct nvkm_therm *therm = fan->parent; 34 struct nvkm_therm *therm = fan->parent;
35 struct nvkm_therm_priv *priv = (void *)therm; 35 struct nvkm_subdev *subdev = &therm->subdev;
36 struct nvkm_timer *ptimer = nvkm_timer(priv); 36 struct nvkm_timer *tmr = subdev->device->timer;
37 unsigned long flags; 37 unsigned long flags;
38 int ret = 0; 38 int ret = 0;
39 int duty; 39 int duty;
@@ -45,7 +45,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
45 target = max_t(u8, target, fan->bios.min_duty); 45 target = max_t(u8, target, fan->bios.min_duty);
46 target = min_t(u8, target, fan->bios.max_duty); 46 target = min_t(u8, target, fan->bios.max_duty);
47 if (fan->percent != target) { 47 if (fan->percent != target) {
48 nv_debug(therm, "FAN target: %d\n", target); 48 nvkm_debug(subdev, "FAN target: %d\n", target);
49 fan->percent = target; 49 fan->percent = target;
50 } 50 }
51 51
@@ -70,7 +70,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
70 duty = target; 70 duty = target;
71 } 71 }
72 72
73 nv_debug(therm, "FAN update: %d\n", duty); 73 nvkm_debug(subdev, "FAN update: %d\n", duty);
74 ret = fan->set(therm, duty); 74 ret = fan->set(therm, duty);
75 if (ret) { 75 if (ret) {
76 spin_unlock_irqrestore(&fan->lock, flags); 76 spin_unlock_irqrestore(&fan->lock, flags);
@@ -95,7 +95,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
95 else 95 else
96 delay = bump_period; 96 delay = bump_period;
97 97
98 ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm); 98 nvkm_timer_alarm(tmr, delay * 1000 * 1000, &fan->alarm);
99 } 99 }
100 100
101 return ret; 101 return ret;
@@ -111,48 +111,51 @@ nvkm_fan_alarm(struct nvkm_alarm *alarm)
111int 111int
112nvkm_therm_fan_get(struct nvkm_therm *therm) 112nvkm_therm_fan_get(struct nvkm_therm *therm)
113{ 113{
114 struct nvkm_therm_priv *priv = (void *)therm; 114 return therm->fan->get(therm);
115 return priv->fan->get(therm);
116} 115}
117 116
118int 117int
119nvkm_therm_fan_set(struct nvkm_therm *therm, bool immediate, int percent) 118nvkm_therm_fan_set(struct nvkm_therm *therm, bool immediate, int percent)
120{ 119{
121 struct nvkm_therm_priv *priv = (void *)therm; 120 return nvkm_fan_update(therm->fan, immediate, percent);
122 return nvkm_fan_update(priv->fan, immediate, percent);
123} 121}
124 122
125int 123int
126nvkm_therm_fan_sense(struct nvkm_therm *therm) 124nvkm_therm_fan_sense(struct nvkm_therm *therm)
127{ 125{
128 struct nvkm_therm_priv *priv = (void *)therm; 126 struct nvkm_device *device = therm->subdev.device;
129 struct nvkm_timer *ptimer = nvkm_timer(therm); 127 struct nvkm_timer *tmr = device->timer;
130 struct nvkm_gpio *gpio = nvkm_gpio(therm); 128 struct nvkm_gpio *gpio = device->gpio;
131 u32 cycles, cur, prev; 129 u32 cycles, cur, prev;
132 u64 start, end, tach; 130 u64 start, end, tach;
133 131
134 if (priv->fan->tach.func == DCB_GPIO_UNUSED) 132 if (therm->func->fan_sense)
133 return therm->func->fan_sense(therm);
134
135 if (therm->fan->tach.func == DCB_GPIO_UNUSED)
135 return -ENODEV; 136 return -ENODEV;
136 137
137 /* Time a complete rotation and extrapolate to RPM: 138 /* Time a complete rotation and extrapolate to RPM:
138 * When the fan spins, it changes the value of GPIO FAN_SENSE. 139 * When the fan spins, it changes the value of GPIO FAN_SENSE.
139 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation. 140 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
140 */ 141 */
141 start = ptimer->read(ptimer); 142 start = nvkm_timer_read(tmr);
142 prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line); 143 prev = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
144 therm->fan->tach.line);
143 cycles = 0; 145 cycles = 0;
144 do { 146 do {
145 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */ 147 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
146 148
147 cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line); 149 cur = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
150 therm->fan->tach.line);
148 if (prev != cur) { 151 if (prev != cur) {
149 if (!start) 152 if (!start)
150 start = ptimer->read(ptimer); 153 start = nvkm_timer_read(tmr);
151 cycles++; 154 cycles++;
152 prev = cur; 155 prev = cur;
153 } 156 }
154 } while (cycles < 5 && ptimer->read(ptimer) - start < 250000000); 157 } while (cycles < 5 && nvkm_timer_read(tmr) - start < 250000000);
155 end = ptimer->read(ptimer); 158 end = nvkm_timer_read(tmr);
156 159
157 if (cycles == 5) { 160 if (cycles == 5) {
158 tach = (u64)60000000000ULL; 161 tach = (u64)60000000000ULL;
@@ -171,9 +174,7 @@ nvkm_therm_fan_user_get(struct nvkm_therm *therm)
171int 174int
172nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent) 175nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
173{ 176{
174 struct nvkm_therm_priv *priv = (void *)therm; 177 if (therm->mode != NVKM_THERM_CTRL_MANUAL)
175
176 if (priv->mode != NVKM_THERM_CTRL_MANUAL)
177 return -EINVAL; 178 return -EINVAL;
178 179
179 return nvkm_therm_fan_set(therm, true, percent); 180 return nvkm_therm_fan_set(therm, true, percent);
@@ -182,29 +183,25 @@ nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
182static void 183static void
183nvkm_therm_fan_set_defaults(struct nvkm_therm *therm) 184nvkm_therm_fan_set_defaults(struct nvkm_therm *therm)
184{ 185{
185 struct nvkm_therm_priv *priv = (void *)therm; 186 therm->fan->bios.pwm_freq = 0;
186 187 therm->fan->bios.min_duty = 0;
187 priv->fan->bios.pwm_freq = 0; 188 therm->fan->bios.max_duty = 100;
188 priv->fan->bios.min_duty = 0; 189 therm->fan->bios.bump_period = 500;
189 priv->fan->bios.max_duty = 100; 190 therm->fan->bios.slow_down_period = 2000;
190 priv->fan->bios.bump_period = 500; 191 therm->fan->bios.linear_min_temp = 40;
191 priv->fan->bios.slow_down_period = 2000; 192 therm->fan->bios.linear_max_temp = 85;
192 priv->fan->bios.linear_min_temp = 40;
193 priv->fan->bios.linear_max_temp = 85;
194} 193}
195 194
196static void 195static void
197nvkm_therm_fan_safety_checks(struct nvkm_therm *therm) 196nvkm_therm_fan_safety_checks(struct nvkm_therm *therm)
198{ 197{
199 struct nvkm_therm_priv *priv = (void *)therm; 198 if (therm->fan->bios.min_duty > 100)
199 therm->fan->bios.min_duty = 100;
200 if (therm->fan->bios.max_duty > 100)
201 therm->fan->bios.max_duty = 100;
200 202
201 if (priv->fan->bios.min_duty > 100) 203 if (therm->fan->bios.min_duty > therm->fan->bios.max_duty)
202 priv->fan->bios.min_duty = 100; 204 therm->fan->bios.min_duty = therm->fan->bios.max_duty;
203 if (priv->fan->bios.max_duty > 100)
204 priv->fan->bios.max_duty = 100;
205
206 if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
207 priv->fan->bios.min_duty = priv->fan->bios.max_duty;
208} 205}
209 206
210int 207int
@@ -216,29 +213,28 @@ nvkm_therm_fan_init(struct nvkm_therm *therm)
216int 213int
217nvkm_therm_fan_fini(struct nvkm_therm *therm, bool suspend) 214nvkm_therm_fan_fini(struct nvkm_therm *therm, bool suspend)
218{ 215{
219 struct nvkm_therm_priv *priv = (void *)therm; 216 struct nvkm_timer *tmr = therm->subdev.device->timer;
220 struct nvkm_timer *ptimer = nvkm_timer(therm);
221
222 if (suspend) 217 if (suspend)
223 ptimer->alarm_cancel(ptimer, &priv->fan->alarm); 218 nvkm_timer_alarm_cancel(tmr, &therm->fan->alarm);
224 return 0; 219 return 0;
225} 220}
226 221
227int 222int
228nvkm_therm_fan_ctor(struct nvkm_therm *therm) 223nvkm_therm_fan_ctor(struct nvkm_therm *therm)
229{ 224{
230 struct nvkm_therm_priv *priv = (void *)therm; 225 struct nvkm_subdev *subdev = &therm->subdev;
231 struct nvkm_gpio *gpio = nvkm_gpio(therm); 226 struct nvkm_device *device = subdev->device;
232 struct nvkm_bios *bios = nvkm_bios(therm); 227 struct nvkm_gpio *gpio = device->gpio;
228 struct nvkm_bios *bios = device->bios;
233 struct dcb_gpio_func func; 229 struct dcb_gpio_func func;
234 int ret; 230 int ret;
235 231
236 /* attempt to locate a drivable fan, and determine control method */ 232 /* attempt to locate a drivable fan, and determine control method */
237 ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func); 233 ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
238 if (ret == 0) { 234 if (ret == 0) {
239 /* FIXME: is this really the place to perform such checks ? */ 235 /* FIXME: is this really the place to perform such checks ? */
240 if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) { 236 if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) {
241 nv_debug(therm, "GPIO_FAN is in input mode\n"); 237 nvkm_debug(subdev, "GPIO_FAN is in input mode\n");
242 ret = -EINVAL; 238 ret = -EINVAL;
243 } else { 239 } else {
244 ret = nvkm_fanpwm_create(therm, &func); 240 ret = nvkm_fanpwm_create(therm, &func);
@@ -254,28 +250,29 @@ nvkm_therm_fan_ctor(struct nvkm_therm *therm)
254 return ret; 250 return ret;
255 } 251 }
256 252
257 nv_info(therm, "FAN control: %s\n", priv->fan->type); 253 nvkm_debug(subdev, "FAN control: %s\n", therm->fan->type);
258 254
259 /* read the current speed, it is useful when resuming */ 255 /* read the current speed, it is useful when resuming */
260 priv->fan->percent = nvkm_therm_fan_get(therm); 256 therm->fan->percent = nvkm_therm_fan_get(therm);
261 257
262 /* attempt to detect a tachometer connection */ 258 /* attempt to detect a tachometer connection */
263 ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach); 259 ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff,
260 &therm->fan->tach);
264 if (ret) 261 if (ret)
265 priv->fan->tach.func = DCB_GPIO_UNUSED; 262 therm->fan->tach.func = DCB_GPIO_UNUSED;
266 263
267 /* initialise fan bump/slow update handling */ 264 /* initialise fan bump/slow update handling */
268 priv->fan->parent = therm; 265 therm->fan->parent = therm;
269 nvkm_alarm_init(&priv->fan->alarm, nvkm_fan_alarm); 266 nvkm_alarm_init(&therm->fan->alarm, nvkm_fan_alarm);
270 spin_lock_init(&priv->fan->lock); 267 spin_lock_init(&therm->fan->lock);
271 268
272 /* other random init... */ 269 /* other random init... */
273 nvkm_therm_fan_set_defaults(therm); 270 nvkm_therm_fan_set_defaults(therm);
274 nvbios_perf_fan_parse(bios, &priv->fan->perf); 271 nvbios_perf_fan_parse(bios, &therm->fan->perf);
275 if (!nvbios_fan_parse(bios, &priv->fan->bios)) { 272 if (!nvbios_fan_parse(bios, &therm->fan->bios)) {
276 nv_debug(therm, "parsing the fan table failed\n"); 273 nvkm_debug(subdev, "parsing the fan table failed\n");
277 if (nvbios_therm_fan_parse(bios, &priv->fan->bios)) 274 if (nvbios_therm_fan_parse(bios, &therm->fan->bios))
278 nv_error(therm, "parsing both fan tables failed\n"); 275 nvkm_error(subdev, "parsing both fan tables failed\n");
279 } 276 }
280 nvkm_therm_fan_safety_checks(therm); 277 nvkm_therm_fan_safety_checks(therm);
281 return 0; 278 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
index 534e5970ec9c..8ae300f911b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
@@ -38,11 +38,10 @@ nvkm_fannil_set(struct nvkm_therm *therm, int percent)
38int 38int
39nvkm_fannil_create(struct nvkm_therm *therm) 39nvkm_fannil_create(struct nvkm_therm *therm)
40{ 40{
41 struct nvkm_therm_priv *tpriv = (void *)therm;
42 struct nvkm_fan *priv; 41 struct nvkm_fan *priv;
43 42
44 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 43 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
45 tpriv->fan = priv; 44 therm->fan = priv;
46 if (!priv) 45 if (!priv)
47 return -ENOMEM; 46 return -ENOMEM;
48 47
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
index bde5ceaeb70a..340f37a299dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
@@ -24,13 +24,12 @@
24 */ 24 */
25#include "priv.h" 25#include "priv.h"
26 26
27#include <core/device.h>
28#include <core/option.h> 27#include <core/option.h>
29#include <subdev/bios.h> 28#include <subdev/bios.h>
30#include <subdev/bios/fan.h> 29#include <subdev/bios/fan.h>
31#include <subdev/gpio.h> 30#include <subdev/gpio.h>
32 31
33struct nvkm_fanpwm_priv { 32struct nvkm_fanpwm {
34 struct nvkm_fan base; 33 struct nvkm_fan base;
35 struct dcb_gpio_func func; 34 struct dcb_gpio_func func;
36}; 35};
@@ -38,76 +37,74 @@ struct nvkm_fanpwm_priv {
38static int 37static int
39nvkm_fanpwm_get(struct nvkm_therm *therm) 38nvkm_fanpwm_get(struct nvkm_therm *therm)
40{ 39{
41 struct nvkm_therm_priv *tpriv = (void *)therm; 40 struct nvkm_fanpwm *fan = (void *)therm->fan;
42 struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan; 41 struct nvkm_device *device = therm->subdev.device;
43 struct nvkm_gpio *gpio = nvkm_gpio(therm); 42 struct nvkm_gpio *gpio = device->gpio;
44 int card_type = nv_device(therm)->card_type; 43 int card_type = device->card_type;
45 u32 divs, duty; 44 u32 divs, duty;
46 int ret; 45 int ret;
47 46
48 ret = therm->pwm_get(therm, priv->func.line, &divs, &duty); 47 ret = therm->func->pwm_get(therm, fan->func.line, &divs, &duty);
49 if (ret == 0 && divs) { 48 if (ret == 0 && divs) {
50 divs = max(divs, duty); 49 divs = max(divs, duty);
51 if (card_type <= NV_40 || (priv->func.log[0] & 1)) 50 if (card_type <= NV_40 || (fan->func.log[0] & 1))
52 duty = divs - duty; 51 duty = divs - duty;
53 return (duty * 100) / divs; 52 return (duty * 100) / divs;
54 } 53 }
55 54
56 return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100; 55 return nvkm_gpio_get(gpio, 0, fan->func.func, fan->func.line) * 100;
57} 56}
58 57
59static int 58static int
60nvkm_fanpwm_set(struct nvkm_therm *therm, int percent) 59nvkm_fanpwm_set(struct nvkm_therm *therm, int percent)
61{ 60{
62 struct nvkm_therm_priv *tpriv = (void *)therm; 61 struct nvkm_fanpwm *fan = (void *)therm->fan;
63 struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan; 62 int card_type = therm->subdev.device->card_type;
64 int card_type = nv_device(therm)->card_type;
65 u32 divs, duty; 63 u32 divs, duty;
66 int ret; 64 int ret;
67 65
68 divs = priv->base.perf.pwm_divisor; 66 divs = fan->base.perf.pwm_divisor;
69 if (priv->base.bios.pwm_freq) { 67 if (fan->base.bios.pwm_freq) {
70 divs = 1; 68 divs = 1;
71 if (therm->pwm_clock) 69 if (therm->func->pwm_clock)
72 divs = therm->pwm_clock(therm, priv->func.line); 70 divs = therm->func->pwm_clock(therm, fan->func.line);
73 divs /= priv->base.bios.pwm_freq; 71 divs /= fan->base.bios.pwm_freq;
74 } 72 }
75 73
76 duty = ((divs * percent) + 99) / 100; 74 duty = ((divs * percent) + 99) / 100;
77 if (card_type <= NV_40 || (priv->func.log[0] & 1)) 75 if (card_type <= NV_40 || (fan->func.log[0] & 1))
78 duty = divs - duty; 76 duty = divs - duty;
79 77
80 ret = therm->pwm_set(therm, priv->func.line, divs, duty); 78 ret = therm->func->pwm_set(therm, fan->func.line, divs, duty);
81 if (ret == 0) 79 if (ret == 0)
82 ret = therm->pwm_ctrl(therm, priv->func.line, true); 80 ret = therm->func->pwm_ctrl(therm, fan->func.line, true);
83 return ret; 81 return ret;
84} 82}
85 83
86int 84int
87nvkm_fanpwm_create(struct nvkm_therm *therm, struct dcb_gpio_func *func) 85nvkm_fanpwm_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
88{ 86{
89 struct nvkm_device *device = nv_device(therm); 87 struct nvkm_device *device = therm->subdev.device;
90 struct nvkm_therm_priv *tpriv = (void *)therm; 88 struct nvkm_bios *bios = device->bios;
91 struct nvkm_bios *bios = nvkm_bios(therm); 89 struct nvkm_fanpwm *fan;
92 struct nvkm_fanpwm_priv *priv; 90 struct nvbios_therm_fan info = {};
93 struct nvbios_therm_fan fan;
94 u32 divs, duty; 91 u32 divs, duty;
95 92
96 nvbios_fan_parse(bios, &fan); 93 nvbios_fan_parse(bios, &info);
97 94
98 if (!nvkm_boolopt(device->cfgopt, "NvFanPWM", func->param) || 95 if (!nvkm_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
99 !therm->pwm_ctrl || fan.type == NVBIOS_THERM_FAN_TOGGLE || 96 !therm->func->pwm_ctrl || info.type == NVBIOS_THERM_FAN_TOGGLE ||
100 therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV) 97 therm->func->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
101 return -ENODEV; 98 return -ENODEV;
102 99
103 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 100 fan = kzalloc(sizeof(*fan), GFP_KERNEL);
104 tpriv->fan = &priv->base; 101 therm->fan = &fan->base;
105 if (!priv) 102 if (!fan)
106 return -ENOMEM; 103 return -ENOMEM;
107 104
108 priv->base.type = "PWM"; 105 fan->base.type = "PWM";
109 priv->base.get = nvkm_fanpwm_get; 106 fan->base.get = nvkm_fanpwm_get;
110 priv->base.set = nvkm_fanpwm_set; 107 fan->base.set = nvkm_fanpwm_set;
111 priv->func = *func; 108 fan->func = *func;
112 return 0; 109 return 0;
113} 110}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 4ce041e81371..59701b7a6597 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -26,7 +26,7 @@
26#include <subdev/gpio.h> 26#include <subdev/gpio.h>
27#include <subdev/timer.h> 27#include <subdev/timer.h>
28 28
29struct nvkm_fantog_priv { 29struct nvkm_fantog {
30 struct nvkm_fan base; 30 struct nvkm_fan base;
31 struct nvkm_alarm alarm; 31 struct nvkm_alarm alarm;
32 spinlock_t lock; 32 spinlock_t lock;
@@ -36,83 +36,81 @@ struct nvkm_fantog_priv {
36}; 36};
37 37
38static void 38static void
39nvkm_fantog_update(struct nvkm_fantog_priv *priv, int percent) 39nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
40{ 40{
41 struct nvkm_therm_priv *tpriv = (void *)priv->base.parent; 41 struct nvkm_therm *therm = fan->base.parent;
42 struct nvkm_timer *ptimer = nvkm_timer(tpriv); 42 struct nvkm_device *device = therm->subdev.device;
43 struct nvkm_gpio *gpio = nvkm_gpio(tpriv); 43 struct nvkm_timer *tmr = device->timer;
44 struct nvkm_gpio *gpio = device->gpio;
44 unsigned long flags; 45 unsigned long flags;
45 int duty; 46 int duty;
46 47
47 spin_lock_irqsave(&priv->lock, flags); 48 spin_lock_irqsave(&fan->lock, flags);
48 if (percent < 0) 49 if (percent < 0)
49 percent = priv->percent; 50 percent = fan->percent;
50 priv->percent = percent; 51 fan->percent = percent;
51 52
52 duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff); 53 duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
53 gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty); 54 nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
54 55
55 if (list_empty(&priv->alarm.head) && percent != (duty * 100)) { 56 if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
56 u64 next_change = (percent * priv->period_us) / 100; 57 u64 next_change = (percent * fan->period_us) / 100;
57 if (!duty) 58 if (!duty)
58 next_change = priv->period_us - next_change; 59 next_change = fan->period_us - next_change;
59 ptimer->alarm(ptimer, next_change * 1000, &priv->alarm); 60 nvkm_timer_alarm(tmr, next_change * 1000, &fan->alarm);
60 } 61 }
61 spin_unlock_irqrestore(&priv->lock, flags); 62 spin_unlock_irqrestore(&fan->lock, flags);
62} 63}
63 64
64static void 65static void
65nvkm_fantog_alarm(struct nvkm_alarm *alarm) 66nvkm_fantog_alarm(struct nvkm_alarm *alarm)
66{ 67{
67 struct nvkm_fantog_priv *priv = 68 struct nvkm_fantog *fan =
68 container_of(alarm, struct nvkm_fantog_priv, alarm); 69 container_of(alarm, struct nvkm_fantog, alarm);
69 nvkm_fantog_update(priv, -1); 70 nvkm_fantog_update(fan, -1);
70} 71}
71 72
72static int 73static int
73nvkm_fantog_get(struct nvkm_therm *therm) 74nvkm_fantog_get(struct nvkm_therm *therm)
74{ 75{
75 struct nvkm_therm_priv *tpriv = (void *)therm; 76 struct nvkm_fantog *fan = (void *)therm->fan;
76 struct nvkm_fantog_priv *priv = (void *)tpriv->fan; 77 return fan->percent;
77 return priv->percent;
78} 78}
79 79
80static int 80static int
81nvkm_fantog_set(struct nvkm_therm *therm, int percent) 81nvkm_fantog_set(struct nvkm_therm *therm, int percent)
82{ 82{
83 struct nvkm_therm_priv *tpriv = (void *)therm; 83 struct nvkm_fantog *fan = (void *)therm->fan;
84 struct nvkm_fantog_priv *priv = (void *)tpriv->fan; 84 if (therm->func->pwm_ctrl)
85 if (therm->pwm_ctrl) 85 therm->func->pwm_ctrl(therm, fan->func.line, false);
86 therm->pwm_ctrl(therm, priv->func.line, false); 86 nvkm_fantog_update(fan, percent);
87 nvkm_fantog_update(priv, percent);
88 return 0; 87 return 0;
89} 88}
90 89
91int 90int
92nvkm_fantog_create(struct nvkm_therm *therm, struct dcb_gpio_func *func) 91nvkm_fantog_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
93{ 92{
94 struct nvkm_therm_priv *tpriv = (void *)therm; 93 struct nvkm_fantog *fan;
95 struct nvkm_fantog_priv *priv;
96 int ret; 94 int ret;
97 95
98 if (therm->pwm_ctrl) { 96 if (therm->func->pwm_ctrl) {
99 ret = therm->pwm_ctrl(therm, func->line, false); 97 ret = therm->func->pwm_ctrl(therm, func->line, false);
100 if (ret) 98 if (ret)
101 return ret; 99 return ret;
102 } 100 }
103 101
104 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 102 fan = kzalloc(sizeof(*fan), GFP_KERNEL);
105 tpriv->fan = &priv->base; 103 therm->fan = &fan->base;
106 if (!priv) 104 if (!fan)
107 return -ENOMEM; 105 return -ENOMEM;
108 106
109 priv->base.type = "toggle"; 107 fan->base.type = "toggle";
110 priv->base.get = nvkm_fantog_get; 108 fan->base.get = nvkm_fantog_get;
111 priv->base.set = nvkm_fantog_set; 109 fan->base.set = nvkm_fantog_set;
112 nvkm_alarm_init(&priv->alarm, nvkm_fantog_alarm); 110 nvkm_alarm_init(&fan->alarm, nvkm_fantog_alarm);
113 priv->period_us = 100000; /* 10Hz */ 111 fan->period_us = 100000; /* 10Hz */
114 priv->percent = 100; 112 fan->percent = 100;
115 priv->func = *func; 113 fan->func = *func;
116 spin_lock_init(&priv->lock); 114 spin_lock_init(&fan->lock);
117 return 0; 115 return 0;
118} 116}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 85b5d0c18c0b..86e81930d8ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -26,17 +26,13 @@
26 26
27#include <subdev/fuse.h> 27#include <subdev/fuse.h>
28 28
29struct g84_therm_priv {
30 struct nvkm_therm_priv base;
31};
32
33int 29int
34g84_temp_get(struct nvkm_therm *therm) 30g84_temp_get(struct nvkm_therm *therm)
35{ 31{
36 struct nvkm_fuse *fuse = nvkm_fuse(therm); 32 struct nvkm_device *device = therm->subdev.device;
37 33
38 if (nv_ro32(fuse, 0x1a8) == 1) 34 if (nvkm_fuse_read(device->fuse, 0x1a8) == 1)
39 return nv_rd32(therm, 0x20400); 35 return nvkm_rd32(device, 0x20400);
40 else 36 else
41 return -ENODEV; 37 return -ENODEV;
42} 38}
@@ -44,12 +40,12 @@ g84_temp_get(struct nvkm_therm *therm)
44void 40void
45g84_sensor_setup(struct nvkm_therm *therm) 41g84_sensor_setup(struct nvkm_therm *therm)
46{ 42{
47 struct nvkm_fuse *fuse = nvkm_fuse(therm); 43 struct nvkm_device *device = therm->subdev.device;
48 44
49 /* enable temperature reading for cards with insane defaults */ 45 /* enable temperature reading for cards with insane defaults */
50 if (nv_ro32(fuse, 0x1a8) == 1) { 46 if (nvkm_fuse_read(device->fuse, 0x1a8) == 1) {
51 nv_mask(therm, 0x20008, 0x80008000, 0x80000000); 47 nvkm_mask(device, 0x20008, 0x80008000, 0x80000000);
52 nv_mask(therm, 0x2000c, 0x80000003, 0x00000000); 48 nvkm_mask(device, 0x2000c, 0x80000003, 0x00000000);
53 mdelay(20); /* wait for the temperature to stabilize */ 49 mdelay(20); /* wait for the temperature to stabilize */
54 } 50 }
55} 51}
@@ -57,36 +53,40 @@ g84_sensor_setup(struct nvkm_therm *therm)
57static void 53static void
58g84_therm_program_alarms(struct nvkm_therm *therm) 54g84_therm_program_alarms(struct nvkm_therm *therm)
59{ 55{
60 struct nvkm_therm_priv *priv = (void *)therm; 56 struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
61 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 57 struct nvkm_subdev *subdev = &therm->subdev;
58 struct nvkm_device *device = subdev->device;
62 unsigned long flags; 59 unsigned long flags;
63 60
64 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); 61 spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
65 62
66 /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */ 63 /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
67 nv_wr32(therm, 0x20000, 0x000003ff); 64 nvkm_wr32(device, 0x20000, 0x000003ff);
68 65
69 /* shutdown: The computer should be shutdown when reached */ 66 /* shutdown: The computer should be shutdown when reached */
70 nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis); 67 nvkm_wr32(device, 0x20484, sensor->thrs_shutdown.hysteresis);
71 nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp); 68 nvkm_wr32(device, 0x20480, sensor->thrs_shutdown.temp);
72 69
73 /* THRS_1 : fan boost*/ 70 /* THRS_1 : fan boost*/
74 nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp); 71 nvkm_wr32(device, 0x204c4, sensor->thrs_fan_boost.temp);
75 72
76 /* THRS_2 : critical */ 73 /* THRS_2 : critical */
77 nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp); 74 nvkm_wr32(device, 0x204c0, sensor->thrs_critical.temp);
78 75
79 /* THRS_4 : down clock */ 76 /* THRS_4 : down clock */
80 nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp); 77 nvkm_wr32(device, 0x20414, sensor->thrs_down_clock.temp);
81 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); 78 spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
82 79
83 nv_debug(therm, 80 nvkm_debug(subdev,
84 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n", 81 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
85 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis, 82 sensor->thrs_fan_boost.temp,
86 sensor->thrs_down_clock.temp, 83 sensor->thrs_fan_boost.hysteresis,
87 sensor->thrs_down_clock.hysteresis, 84 sensor->thrs_down_clock.temp,
88 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis, 85 sensor->thrs_down_clock.hysteresis,
89 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis); 86 sensor->thrs_critical.temp,
87 sensor->thrs_critical.hysteresis,
88 sensor->thrs_shutdown.temp,
89 sensor->thrs_shutdown.hysteresis);
90 90
91} 91}
92 92
@@ -97,24 +97,25 @@ g84_therm_threshold_hyst_emulation(struct nvkm_therm *therm,
97 const struct nvbios_therm_threshold *thrs, 97 const struct nvbios_therm_threshold *thrs,
98 enum nvkm_therm_thrs thrs_name) 98 enum nvkm_therm_thrs thrs_name)
99{ 99{
100 struct nvkm_device *device = therm->subdev.device;
100 enum nvkm_therm_thrs_direction direction; 101 enum nvkm_therm_thrs_direction direction;
101 enum nvkm_therm_thrs_state prev_state, new_state; 102 enum nvkm_therm_thrs_state prev_state, new_state;
102 int temp, cur; 103 int temp, cur;
103 104
104 prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name); 105 prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
105 temp = nv_rd32(therm, thrs_reg); 106 temp = nvkm_rd32(device, thrs_reg);
106 107
107 /* program the next threshold */ 108 /* program the next threshold */
108 if (temp == thrs->temp) { 109 if (temp == thrs->temp) {
109 nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis); 110 nvkm_wr32(device, thrs_reg, thrs->temp - thrs->hysteresis);
110 new_state = NVKM_THERM_THRS_HIGHER; 111 new_state = NVKM_THERM_THRS_HIGHER;
111 } else { 112 } else {
112 nv_wr32(therm, thrs_reg, thrs->temp); 113 nvkm_wr32(device, thrs_reg, thrs->temp);
113 new_state = NVKM_THERM_THRS_LOWER; 114 new_state = NVKM_THERM_THRS_LOWER;
114 } 115 }
115 116
116 /* fix the state (in case someone reprogrammed the alarms) */ 117 /* fix the state (in case someone reprogrammed the alarms) */
117 cur = therm->temp_get(therm); 118 cur = therm->func->temp_get(therm);
118 if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp) 119 if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp)
119 new_state = NVKM_THERM_THRS_HIGHER; 120 new_state = NVKM_THERM_THRS_HIGHER;
120 else if (new_state == NVKM_THERM_THRS_HIGHER && 121 else if (new_state == NVKM_THERM_THRS_HIGHER &&
@@ -135,17 +136,17 @@ g84_therm_threshold_hyst_emulation(struct nvkm_therm *therm,
135} 136}
136 137
137static void 138static void
138g84_therm_intr(struct nvkm_subdev *subdev) 139g84_therm_intr(struct nvkm_therm *therm)
139{ 140{
140 struct nvkm_therm *therm = nvkm_therm(subdev); 141 struct nvkm_subdev *subdev = &therm->subdev;
141 struct nvkm_therm_priv *priv = (void *)therm; 142 struct nvkm_device *device = subdev->device;
142 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 143 struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
143 unsigned long flags; 144 unsigned long flags;
144 uint32_t intr; 145 uint32_t intr;
145 146
146 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); 147 spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
147 148
148 intr = nv_rd32(therm, 0x20100) & 0x3ff; 149 intr = nvkm_rd32(device, 0x20100) & 0x3ff;
149 150
150 /* THRS_4: downclock */ 151 /* THRS_4: downclock */
151 if (intr & 0x002) { 152 if (intr & 0x002) {
@@ -180,87 +181,66 @@ g84_therm_intr(struct nvkm_subdev *subdev)
180 } 181 }
181 182
182 if (intr) 183 if (intr)
183 nv_error(therm, "unhandled intr 0x%08x\n", intr); 184 nvkm_error(subdev, "intr %08x\n", intr);
184 185
185 /* ACK everything */ 186 /* ACK everything */
186 nv_wr32(therm, 0x20100, 0xffffffff); 187 nvkm_wr32(device, 0x20100, 0xffffffff);
187 nv_wr32(therm, 0x1100, 0x10000); /* PBUS */ 188 nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
188 189
189 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); 190 spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
190} 191}
191 192
192static int 193void
193g84_therm_init(struct nvkm_object *object) 194g84_therm_fini(struct nvkm_therm *therm)
194{ 195{
195 struct g84_therm_priv *priv = (void *)object; 196 struct nvkm_device *device = therm->subdev.device;
196 int ret;
197 197
198 ret = nvkm_therm_init(&priv->base.base); 198 /* Disable PTherm IRQs */
199 if (ret) 199 nvkm_wr32(device, 0x20000, 0x00000000);
200 return ret;
201 200
202 g84_sensor_setup(&priv->base.base); 201 /* ACK all PTherm IRQs */
203 return 0; 202 nvkm_wr32(device, 0x20100, 0xffffffff);
203 nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
204} 204}
205 205
206static int 206static void
207g84_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 207g84_therm_init(struct nvkm_therm *therm)
208 struct nvkm_oclass *oclass, void *data, u32 size,
209 struct nvkm_object **pobject)
210{ 208{
211 struct g84_therm_priv *priv; 209 g84_sensor_setup(therm);
212 int ret;
213
214 ret = nvkm_therm_create(parent, engine, oclass, &priv);
215 *pobject = nv_object(priv);
216 if (ret)
217 return ret;
218
219 priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
220 priv->base.base.pwm_get = nv50_fan_pwm_get;
221 priv->base.base.pwm_set = nv50_fan_pwm_set;
222 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
223 priv->base.base.temp_get = g84_temp_get;
224 priv->base.sensor.program_alarms = g84_therm_program_alarms;
225 nv_subdev(priv)->intr = g84_therm_intr;
226
227 /* init the thresholds */
228 nvkm_therm_sensor_set_threshold_state(&priv->base.base,
229 NVKM_THERM_THRS_SHUTDOWN,
230 NVKM_THERM_THRS_LOWER);
231 nvkm_therm_sensor_set_threshold_state(&priv->base.base,
232 NVKM_THERM_THRS_FANBOOST,
233 NVKM_THERM_THRS_LOWER);
234 nvkm_therm_sensor_set_threshold_state(&priv->base.base,
235 NVKM_THERM_THRS_CRITICAL,
236 NVKM_THERM_THRS_LOWER);
237 nvkm_therm_sensor_set_threshold_state(&priv->base.base,
238 NVKM_THERM_THRS_DOWNCLOCK,
239 NVKM_THERM_THRS_LOWER);
240
241 return nvkm_therm_preinit(&priv->base.base);
242} 210}
243 211
212static const struct nvkm_therm_func
213g84_therm = {
214 .init = g84_therm_init,
215 .fini = g84_therm_fini,
216 .intr = g84_therm_intr,
217 .pwm_ctrl = nv50_fan_pwm_ctrl,
218 .pwm_get = nv50_fan_pwm_get,
219 .pwm_set = nv50_fan_pwm_set,
220 .pwm_clock = nv50_fan_pwm_clock,
221 .temp_get = g84_temp_get,
222 .program_alarms = g84_therm_program_alarms,
223};
224
244int 225int
245g84_therm_fini(struct nvkm_object *object, bool suspend) 226g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm)
246{ 227{
247 /* Disable PTherm IRQs */ 228 struct nvkm_therm *therm;
248 nv_wr32(object, 0x20000, 0x00000000); 229 int ret;
249 230
250 /* ACK all PTherm IRQs */ 231 ret = nvkm_therm_new_(&g84_therm, device, index, &therm);
251 nv_wr32(object, 0x20100, 0xffffffff); 232 *ptherm = therm;
252 nv_wr32(object, 0x1100, 0x10000); /* PBUS */ 233 if (ret)
234 return ret;
253 235
254 return _nvkm_therm_fini(object, suspend); 236 /* init the thresholds */
237 nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_SHUTDOWN,
238 NVKM_THERM_THRS_LOWER);
239 nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_FANBOOST,
240 NVKM_THERM_THRS_LOWER);
241 nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_CRITICAL,
242 NVKM_THERM_THRS_LOWER);
243 nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_DOWNCLOCK,
244 NVKM_THERM_THRS_LOWER);
245 return 0;
255} 246}
256
257struct nvkm_oclass
258g84_therm_oclass = {
259 .handle = NV_SUBDEV(THERM, 0x84),
260 .ofuncs = &(struct nvkm_ofuncs) {
261 .ctor = g84_therm_ctor,
262 .dtor = _nvkm_therm_dtor,
263 .init = g84_therm_init,
264 .fini = g84_therm_fini,
265 },
266};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
deleted file mode 100644
index 46b7e656a752..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <core/device.h>
27
28struct gf110_therm_priv {
29 struct nvkm_therm_priv base;
30};
31
32static int
33pwm_info(struct nvkm_therm *therm, int line)
34{
35 u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
36
37 switch (gpio & 0x000000c0) {
38 case 0x00000000: /* normal mode, possibly pwm forced off by us */
39 case 0x00000040: /* nvio special */
40 switch (gpio & 0x0000001f) {
41 case 0x00: return 2;
42 case 0x19: return 1;
43 case 0x1c: return 0;
44 case 0x1e: return 2;
45 default:
46 break;
47 }
48 default:
49 break;
50 }
51
52 nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
53 return -ENODEV;
54}
55
56static int
57gf110_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
58{
59 u32 data = enable ? 0x00000040 : 0x00000000;
60 int indx = pwm_info(therm, line);
61 if (indx < 0)
62 return indx;
63 else if (indx < 2)
64 nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
65 /* nothing to do for indx == 2, it seems hardwired to PTHERM */
66 return 0;
67}
68
69static int
70gf110_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
71{
72 int indx = pwm_info(therm, line);
73 if (indx < 0)
74 return indx;
75 else if (indx < 2) {
76 if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
77 *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
78 *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
79 return 0;
80 }
81 } else if (indx == 2) {
82 *divs = nv_rd32(therm, 0x0200d8) & 0x1fff;
83 *duty = nv_rd32(therm, 0x0200dc) & 0x1fff;
84 return 0;
85 }
86
87 return -EINVAL;
88}
89
90static int
91gf110_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
92{
93 int indx = pwm_info(therm, line);
94 if (indx < 0)
95 return indx;
96 else if (indx < 2) {
97 nv_wr32(therm, 0x00e114 + (indx * 8), divs);
98 nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
99 } else if (indx == 2) {
100 nv_mask(therm, 0x0200d8, 0x1fff, divs); /* keep the high bits */
101 nv_wr32(therm, 0x0200dc, duty | 0x40000000);
102 }
103 return 0;
104}
105
106static int
107gf110_fan_pwm_clock(struct nvkm_therm *therm, int line)
108{
109 int indx = pwm_info(therm, line);
110 if (indx < 0)
111 return 0;
112 else if (indx < 2)
113 return (nv_device(therm)->crystal * 1000) / 20;
114 else
115 return nv_device(therm)->crystal * 1000 / 10;
116}
117
118int
119gf110_therm_init(struct nvkm_object *object)
120{
121 struct gf110_therm_priv *priv = (void *)object;
122 int ret;
123
124 ret = nvkm_therm_init(&priv->base.base);
125 if (ret)
126 return ret;
127
128 /* enable fan tach, count revolutions per-second */
129 nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
130 if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
131 nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
132 nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
133 nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
134 }
135 nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
136
137 return 0;
138}
139
140static int
141gf110_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
142 struct nvkm_oclass *oclass, void *data, u32 size,
143 struct nvkm_object **pobject)
144{
145 struct gf110_therm_priv *priv;
146 int ret;
147
148 ret = nvkm_therm_create(parent, engine, oclass, &priv);
149 *pobject = nv_object(priv);
150 if (ret)
151 return ret;
152
153 g84_sensor_setup(&priv->base.base);
154
155 priv->base.base.pwm_ctrl = gf110_fan_pwm_ctrl;
156 priv->base.base.pwm_get = gf110_fan_pwm_get;
157 priv->base.base.pwm_set = gf110_fan_pwm_set;
158 priv->base.base.pwm_clock = gf110_fan_pwm_clock;
159 priv->base.base.temp_get = g84_temp_get;
160 priv->base.base.fan_sense = gt215_therm_fan_sense;
161 priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
162 return nvkm_therm_preinit(&priv->base.base);
163}
164
165struct nvkm_oclass
166gf110_therm_oclass = {
167 .handle = NV_SUBDEV(THERM, 0xd0),
168 .ofuncs = &(struct nvkm_ofuncs) {
169 .ctor = gf110_therm_ctor,
170 .dtor = _nvkm_therm_dtor,
171 .init = gf110_therm_init,
172 .fini = g84_therm_fini,
173 },
174};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
new file mode 100644
index 000000000000..06dcfd6ee966
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26static int
27pwm_info(struct nvkm_therm *therm, int line)
28{
29 struct nvkm_subdev *subdev = &therm->subdev;
30 struct nvkm_device *device = subdev->device;
31 u32 gpio = nvkm_rd32(device, 0x00d610 + (line * 0x04));
32
33 switch (gpio & 0x000000c0) {
34 case 0x00000000: /* normal mode, possibly pwm forced off by us */
35 case 0x00000040: /* nvio special */
36 switch (gpio & 0x0000001f) {
37 case 0x00: return 2;
38 case 0x19: return 1;
39 case 0x1c: return 0;
40 case 0x1e: return 2;
41 default:
42 break;
43 }
44 default:
45 break;
46 }
47
48 nvkm_error(subdev, "GPIO %d unknown PWM: %08x\n", line, gpio);
49 return -ENODEV;
50}
51
52static int
53gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
54{
55 struct nvkm_device *device = therm->subdev.device;
56 u32 data = enable ? 0x00000040 : 0x00000000;
57 int indx = pwm_info(therm, line);
58 if (indx < 0)
59 return indx;
60 else if (indx < 2)
61 nvkm_mask(device, 0x00d610 + (line * 0x04), 0x000000c0, data);
62 /* nothing to do for indx == 2, it seems hardwired to PTHERM */
63 return 0;
64}
65
66static int
67gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
68{
69 struct nvkm_device *device = therm->subdev.device;
70 int indx = pwm_info(therm, line);
71 if (indx < 0)
72 return indx;
73 else if (indx < 2) {
74 if (nvkm_rd32(device, 0x00d610 + (line * 0x04)) & 0x00000040) {
75 *divs = nvkm_rd32(device, 0x00e114 + (indx * 8));
76 *duty = nvkm_rd32(device, 0x00e118 + (indx * 8));
77 return 0;
78 }
79 } else if (indx == 2) {
80 *divs = nvkm_rd32(device, 0x0200d8) & 0x1fff;
81 *duty = nvkm_rd32(device, 0x0200dc) & 0x1fff;
82 return 0;
83 }
84
85 return -EINVAL;
86}
87
88static int
89gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
90{
91 struct nvkm_device *device = therm->subdev.device;
92 int indx = pwm_info(therm, line);
93 if (indx < 0)
94 return indx;
95 else if (indx < 2) {
96 nvkm_wr32(device, 0x00e114 + (indx * 8), divs);
97 nvkm_wr32(device, 0x00e118 + (indx * 8), duty | 0x80000000);
98 } else if (indx == 2) {
99 nvkm_mask(device, 0x0200d8, 0x1fff, divs); /* keep the high bits */
100 nvkm_wr32(device, 0x0200dc, duty | 0x40000000);
101 }
102 return 0;
103}
104
105static int
106gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
107{
108 struct nvkm_device *device = therm->subdev.device;
109 int indx = pwm_info(therm, line);
110 if (indx < 0)
111 return 0;
112 else if (indx < 2)
113 return (device->crystal * 1000) / 20;
114 else
115 return device->crystal * 1000 / 10;
116}
117
118void
119gf119_therm_init(struct nvkm_therm *therm)
120{
121 struct nvkm_device *device = therm->subdev.device;
122
123 g84_sensor_setup(therm);
124
125 /* enable fan tach, count revolutions per-second */
126 nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
127 if (therm->fan->tach.func != DCB_GPIO_UNUSED) {
128 nvkm_mask(device, 0x00d79c, 0x000000ff, therm->fan->tach.line);
129 nvkm_wr32(device, 0x00e724, device->crystal * 1000);
130 nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
131 }
132 nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
133}
134
135static const struct nvkm_therm_func
136gf119_therm = {
137 .init = gf119_therm_init,
138 .fini = g84_therm_fini,
139 .pwm_ctrl = gf119_fan_pwm_ctrl,
140 .pwm_get = gf119_fan_pwm_get,
141 .pwm_set = gf119_fan_pwm_set,
142 .pwm_clock = gf119_fan_pwm_clock,
143 .temp_get = g84_temp_get,
144 .fan_sense = gt215_therm_fan_sense,
145 .program_alarms = nvkm_therm_program_alarms_polling,
146};
147
148int
149gf119_therm_new(struct nvkm_device *device, int index,
150 struct nvkm_therm **ptherm)
151{
152 return nvkm_therm_new_(&gf119_therm, device, index, ptherm);
153}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 2fd110f09878..86848ece4d89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -23,12 +23,6 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/device.h>
27
28struct gm107_therm_priv {
29 struct nvkm_therm_priv base;
30};
31
32static int 26static int
33gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable) 27gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
34{ 28{
@@ -39,55 +33,43 @@ gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
39static int 33static int
40gm107_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty) 34gm107_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
41{ 35{
42 *divs = nv_rd32(therm, 0x10eb20) & 0x1fff; 36 struct nvkm_device *device = therm->subdev.device;
43 *duty = nv_rd32(therm, 0x10eb24) & 0x1fff; 37 *divs = nvkm_rd32(device, 0x10eb20) & 0x1fff;
38 *duty = nvkm_rd32(device, 0x10eb24) & 0x1fff;
44 return 0; 39 return 0;
45} 40}
46 41
47static int 42static int
48gm107_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty) 43gm107_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
49{ 44{
50 nv_mask(therm, 0x10eb10, 0x1fff, divs); /* keep the high bits */ 45 struct nvkm_device *device = therm->subdev.device;
51 nv_wr32(therm, 0x10eb14, duty | 0x80000000); 46 nvkm_mask(device, 0x10eb10, 0x1fff, divs); /* keep the high bits */
47 nvkm_wr32(device, 0x10eb14, duty | 0x80000000);
52 return 0; 48 return 0;
53} 49}
54 50
55static int 51static int
56gm107_fan_pwm_clock(struct nvkm_therm *therm, int line) 52gm107_fan_pwm_clock(struct nvkm_therm *therm, int line)
57{ 53{
58 return nv_device(therm)->crystal * 1000; 54 return therm->subdev.device->crystal * 1000;
59} 55}
60 56
61static int 57static const struct nvkm_therm_func
62gm107_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 58gm107_therm = {
63 struct nvkm_oclass *oclass, void *data, u32 size, 59 .init = gf119_therm_init,
64 struct nvkm_object **pobject) 60 .fini = g84_therm_fini,
65{ 61 .pwm_ctrl = gm107_fan_pwm_ctrl,
66 struct gm107_therm_priv *priv; 62 .pwm_get = gm107_fan_pwm_get,
67 int ret; 63 .pwm_set = gm107_fan_pwm_set,
68 64 .pwm_clock = gm107_fan_pwm_clock,
69 ret = nvkm_therm_create(parent, engine, oclass, &priv); 65 .temp_get = g84_temp_get,
70 *pobject = nv_object(priv); 66 .fan_sense = gt215_therm_fan_sense,
71 if (ret) 67 .program_alarms = nvkm_therm_program_alarms_polling,
72 return ret; 68};
73 69
74 priv->base.base.pwm_ctrl = gm107_fan_pwm_ctrl; 70int
75 priv->base.base.pwm_get = gm107_fan_pwm_get; 71gm107_therm_new(struct nvkm_device *device, int index,
76 priv->base.base.pwm_set = gm107_fan_pwm_set; 72 struct nvkm_therm **ptherm)
77 priv->base.base.pwm_clock = gm107_fan_pwm_clock; 73{
78 priv->base.base.temp_get = g84_temp_get; 74 return nvkm_therm_new_(&gm107_therm, device, index, ptherm);
79 priv->base.base.fan_sense = gt215_therm_fan_sense;
80 priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
81 return nvkm_therm_preinit(&priv->base.base);
82} 75}
83
84struct nvkm_oclass
85gm107_therm_oclass = {
86 .handle = NV_SUBDEV(THERM, 0x117),
87 .ofuncs = &(struct nvkm_ofuncs) {
88 .ctor = gm107_therm_ctor,
89 .dtor = _nvkm_therm_dtor,
90 .init = gf110_therm_init,
91 .fini = g84_therm_fini,
92 },
93};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index e99be20332f2..c08097f2aff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -23,78 +23,53 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/device.h>
27#include <subdev/gpio.h> 26#include <subdev/gpio.h>
28 27
29struct gt215_therm_priv {
30 struct nvkm_therm_priv base;
31};
32
33int 28int
34gt215_therm_fan_sense(struct nvkm_therm *therm) 29gt215_therm_fan_sense(struct nvkm_therm *therm)
35{ 30{
36 u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff; 31 struct nvkm_device *device = therm->subdev.device;
37 u32 ctrl = nv_rd32(therm, 0x00e720); 32 u32 tach = nvkm_rd32(device, 0x00e728) & 0x0000ffff;
33 u32 ctrl = nvkm_rd32(device, 0x00e720);
38 if (ctrl & 0x00000001) 34 if (ctrl & 0x00000001)
39 return tach * 60 / 2; 35 return tach * 60 / 2;
40 return -ENODEV; 36 return -ENODEV;
41} 37}
42 38
43static int 39static void
44gt215_therm_init(struct nvkm_object *object) 40gt215_therm_init(struct nvkm_therm *therm)
45{ 41{
46 struct gt215_therm_priv *priv = (void *)object; 42 struct nvkm_device *device = therm->subdev.device;
47 struct dcb_gpio_func *tach = &priv->base.fan->tach; 43 struct dcb_gpio_func *tach = &therm->fan->tach;
48 int ret;
49
50 ret = nvkm_therm_init(&priv->base.base);
51 if (ret)
52 return ret;
53 44
54 g84_sensor_setup(&priv->base.base); 45 g84_sensor_setup(therm);
55 46
56 /* enable fan tach, count revolutions per-second */ 47 /* enable fan tach, count revolutions per-second */
57 nv_mask(priv, 0x00e720, 0x00000003, 0x00000002); 48 nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
58 if (tach->func != DCB_GPIO_UNUSED) { 49 if (tach->func != DCB_GPIO_UNUSED) {
59 nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000); 50 nvkm_wr32(device, 0x00e724, device->crystal * 1000);
60 nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16); 51 nvkm_mask(device, 0x00e720, 0x001f0000, tach->line << 16);
61 nv_mask(priv, 0x00e720, 0x00000001, 0x00000001); 52 nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
62 } 53 }
63 nv_mask(priv, 0x00e720, 0x00000002, 0x00000000); 54 nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
64
65 return 0;
66} 55}
67 56
68static int 57static const struct nvkm_therm_func
69gt215_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 58gt215_therm = {
70 struct nvkm_oclass *oclass, void *data, u32 size, 59 .init = gt215_therm_init,
71 struct nvkm_object **pobject) 60 .fini = g84_therm_fini,
72{ 61 .pwm_ctrl = nv50_fan_pwm_ctrl,
73 struct gt215_therm_priv *priv; 62 .pwm_get = nv50_fan_pwm_get,
74 int ret; 63 .pwm_set = nv50_fan_pwm_set,
75 64 .pwm_clock = nv50_fan_pwm_clock,
76 ret = nvkm_therm_create(parent, engine, oclass, &priv); 65 .temp_get = g84_temp_get,
77 *pobject = nv_object(priv); 66 .fan_sense = gt215_therm_fan_sense,
78 if (ret) 67 .program_alarms = nvkm_therm_program_alarms_polling,
79 return ret; 68};
80 69
81 priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl; 70int
82 priv->base.base.pwm_get = nv50_fan_pwm_get; 71gt215_therm_new(struct nvkm_device *device, int index,
83 priv->base.base.pwm_set = nv50_fan_pwm_set; 72 struct nvkm_therm **ptherm)
84 priv->base.base.pwm_clock = nv50_fan_pwm_clock; 73{
85 priv->base.base.temp_get = g84_temp_get; 74 return nvkm_therm_new_(&gt215_therm, device, index, ptherm);
86 priv->base.base.fan_sense = gt215_therm_fan_sense;
87 priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
88 return nvkm_therm_preinit(&priv->base.base);
89} 75}
90
91struct nvkm_oclass
92gt215_therm_oclass = {
93 .handle = NV_SUBDEV(THERM, 0xa3),
94 .ofuncs = &(struct nvkm_ofuncs) {
95 .ctor = gt215_therm_ctor,
96 .dtor = _nvkm_therm_dtor,
97 .init = gt215_therm_init,
98 .fini = g84_therm_fini,
99 },
100};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 09fc4605e853..6e0ddc1bb583 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -27,16 +27,16 @@
27#include <subdev/i2c.h> 27#include <subdev/i2c.h>
28 28
29static bool 29static bool
30probe_monitoring_device(struct nvkm_i2c_port *i2c, 30probe_monitoring_device(struct nvkm_i2c_bus *bus,
31 struct i2c_board_info *info, void *data) 31 struct i2c_board_info *info, void *data)
32{ 32{
33 struct nvkm_therm_priv *priv = data; 33 struct nvkm_therm *therm = data;
34 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 34 struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
35 struct i2c_client *client; 35 struct i2c_client *client;
36 36
37 request_module("%s%s", I2C_MODULE_PREFIX, info->type); 37 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
38 38
39 client = i2c_new_device(&i2c->adapter, info); 39 client = i2c_new_device(&bus->i2c, info);
40 if (!client) 40 if (!client)
41 return false; 41 return false;
42 42
@@ -46,15 +46,15 @@ probe_monitoring_device(struct nvkm_i2c_port *i2c,
46 return false; 46 return false;
47 } 47 }
48 48
49 nv_info(priv, 49 nvkm_debug(&therm->subdev,
50 "Found an %s at address 0x%x (controlled by lm_sensors, " 50 "Found an %s at address 0x%x (controlled by lm_sensors, "
51 "temp offset %+i C)\n", 51 "temp offset %+i C)\n",
52 info->type, info->addr, sensor->offset_constant); 52 info->type, info->addr, sensor->offset_constant);
53 priv->ic = client; 53 therm->ic = client;
54 return true; 54 return true;
55} 55}
56 56
57static struct nvkm_i2c_board_info 57static struct nvkm_i2c_bus_probe
58nv_board_infos[] = { 58nv_board_infos[] = {
59 { { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 }, 59 { { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
60 { { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 }, 60 { { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
@@ -82,38 +82,43 @@ nv_board_infos[] = {
82void 82void
83nvkm_therm_ic_ctor(struct nvkm_therm *therm) 83nvkm_therm_ic_ctor(struct nvkm_therm *therm)
84{ 84{
85 struct nvkm_therm_priv *priv = (void *)therm; 85 struct nvkm_device *device = therm->subdev.device;
86 struct nvkm_bios *bios = nvkm_bios(therm); 86 struct nvkm_bios *bios = device->bios;
87 struct nvkm_i2c *i2c = nvkm_i2c(therm); 87 struct nvkm_i2c *i2c = device->i2c;
88 struct nvkm_i2c_bus *bus;
88 struct nvbios_extdev_func extdev_entry; 89 struct nvbios_extdev_func extdev_entry;
89 90
91 bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
92 if (!bus)
93 return;
94
90 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) { 95 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
91 struct nvkm_i2c_board_info board[] = { 96 struct nvkm_i2c_bus_probe board[] = {
92 { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0}, 97 { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
93 { } 98 { }
94 }; 99 };
95 100
96 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 101 nvkm_i2c_bus_probe(bus, "monitoring device", board,
97 board, probe_monitoring_device, therm); 102 probe_monitoring_device, therm);
98 if (priv->ic) 103 if (therm->ic)
99 return; 104 return;
100 } 105 }
101 106
102 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) { 107 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
103 struct nvkm_i2c_board_info board[] = { 108 struct nvkm_i2c_bus_probe board[] = {
104 { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 }, 109 { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
105 { } 110 { }
106 }; 111 };
107 112
108 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 113 nvkm_i2c_bus_probe(bus, "monitoring device", board,
109 board, probe_monitoring_device, therm); 114 probe_monitoring_device, therm);
110 if (priv->ic) 115 if (therm->ic)
111 return; 116 return;
112 } 117 }
113 118
114 /* The vbios doesn't provide the address of an exisiting monitoring 119 /* The vbios doesn't provide the address of an exisiting monitoring
115 device. Let's try our static list. 120 device. Let's try our static list.
116 */ 121 */
117 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 122 nvkm_i2c_bus_probe(bus, "monitoring device", nv_board_infos,
118 nv_board_infos, probe_monitoring_device, therm); 123 probe_monitoring_device, therm);
119} 124}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 8496fffd4688..6326fdc5a48d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -24,26 +24,17 @@
24 */ 24 */
25#include "priv.h" 25#include "priv.h"
26 26
27#include <core/device.h>
28
29struct nv40_therm_priv {
30 struct nvkm_therm_priv base;
31};
32
33enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 }; 27enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
34 28
35static enum nv40_sensor_style 29static enum nv40_sensor_style
36nv40_sensor_style(struct nvkm_therm *therm) 30nv40_sensor_style(struct nvkm_therm *therm)
37{ 31{
38 struct nvkm_device *device = nv_device(therm); 32 switch (therm->subdev.device->chipset) {
39
40 switch (device->chipset) {
41 case 0x43: 33 case 0x43:
42 case 0x44: 34 case 0x44:
43 case 0x4a: 35 case 0x4a:
44 case 0x47: 36 case 0x47:
45 return OLD_STYLE; 37 return OLD_STYLE;
46
47 case 0x46: 38 case 0x46:
48 case 0x49: 39 case 0x49:
49 case 0x4b: 40 case 0x4b:
@@ -61,18 +52,19 @@ nv40_sensor_style(struct nvkm_therm *therm)
61static int 52static int
62nv40_sensor_setup(struct nvkm_therm *therm) 53nv40_sensor_setup(struct nvkm_therm *therm)
63{ 54{
55 struct nvkm_device *device = therm->subdev.device;
64 enum nv40_sensor_style style = nv40_sensor_style(therm); 56 enum nv40_sensor_style style = nv40_sensor_style(therm);
65 57
66 /* enable ADC readout and disable the ALARM threshold */ 58 /* enable ADC readout and disable the ALARM threshold */
67 if (style == NEW_STYLE) { 59 if (style == NEW_STYLE) {
68 nv_mask(therm, 0x15b8, 0x80000000, 0); 60 nvkm_mask(device, 0x15b8, 0x80000000, 0);
69 nv_wr32(therm, 0x15b0, 0x80003fff); 61 nvkm_wr32(device, 0x15b0, 0x80003fff);
70 mdelay(20); /* wait for the temperature to stabilize */ 62 mdelay(20); /* wait for the temperature to stabilize */
71 return nv_rd32(therm, 0x15b4) & 0x3fff; 63 return nvkm_rd32(device, 0x15b4) & 0x3fff;
72 } else if (style == OLD_STYLE) { 64 } else if (style == OLD_STYLE) {
73 nv_wr32(therm, 0x15b0, 0xff); 65 nvkm_wr32(device, 0x15b0, 0xff);
74 mdelay(20); /* wait for the temperature to stabilize */ 66 mdelay(20); /* wait for the temperature to stabilize */
75 return nv_rd32(therm, 0x15b4) & 0xff; 67 return nvkm_rd32(device, 0x15b4) & 0xff;
76 } else 68 } else
77 return -ENODEV; 69 return -ENODEV;
78} 70}
@@ -80,17 +72,17 @@ nv40_sensor_setup(struct nvkm_therm *therm)
80static int 72static int
81nv40_temp_get(struct nvkm_therm *therm) 73nv40_temp_get(struct nvkm_therm *therm)
82{ 74{
83 struct nvkm_therm_priv *priv = (void *)therm; 75 struct nvkm_device *device = therm->subdev.device;
84 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 76 struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
85 enum nv40_sensor_style style = nv40_sensor_style(therm); 77 enum nv40_sensor_style style = nv40_sensor_style(therm);
86 int core_temp; 78 int core_temp;
87 79
88 if (style == NEW_STYLE) { 80 if (style == NEW_STYLE) {
89 nv_wr32(therm, 0x15b0, 0x80003fff); 81 nvkm_wr32(device, 0x15b0, 0x80003fff);
90 core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; 82 core_temp = nvkm_rd32(device, 0x15b4) & 0x3fff;
91 } else if (style == OLD_STYLE) { 83 } else if (style == OLD_STYLE) {
92 nv_wr32(therm, 0x15b0, 0xff); 84 nvkm_wr32(device, 0x15b0, 0xff);
93 core_temp = nv_rd32(therm, 0x15b4) & 0xff; 85 core_temp = nvkm_rd32(device, 0x15b4) & 0xff;
94 } else 86 } else
95 return -ENODEV; 87 return -ENODEV;
96 88
@@ -113,11 +105,13 @@ nv40_temp_get(struct nvkm_therm *therm)
113static int 105static int
114nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable) 106nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
115{ 107{
108 struct nvkm_subdev *subdev = &therm->subdev;
109 struct nvkm_device *device = subdev->device;
116 u32 mask = enable ? 0x80000000 : 0x0000000; 110 u32 mask = enable ? 0x80000000 : 0x0000000;
117 if (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask); 111 if (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask);
118 else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask); 112 else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask);
119 else { 113 else {
120 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line); 114 nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
121 return -ENODEV; 115 return -ENODEV;
122 } 116 }
123 return 0; 117 return 0;
@@ -126,8 +120,10 @@ nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
126static int 120static int
127nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty) 121nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
128{ 122{
123 struct nvkm_subdev *subdev = &therm->subdev;
124 struct nvkm_device *device = subdev->device;
129 if (line == 2) { 125 if (line == 2) {
130 u32 reg = nv_rd32(therm, 0x0010f0); 126 u32 reg = nvkm_rd32(device, 0x0010f0);
131 if (reg & 0x80000000) { 127 if (reg & 0x80000000) {
132 *duty = (reg & 0x7fff0000) >> 16; 128 *duty = (reg & 0x7fff0000) >> 16;
133 *divs = (reg & 0x00007fff); 129 *divs = (reg & 0x00007fff);
@@ -135,14 +131,14 @@ nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
135 } 131 }
136 } else 132 } else
137 if (line == 9) { 133 if (line == 9) {
138 u32 reg = nv_rd32(therm, 0x0015f4); 134 u32 reg = nvkm_rd32(device, 0x0015f4);
139 if (reg & 0x80000000) { 135 if (reg & 0x80000000) {
140 *divs = nv_rd32(therm, 0x0015f8); 136 *divs = nvkm_rd32(device, 0x0015f8);
141 *duty = (reg & 0x7fffffff); 137 *duty = (reg & 0x7fffffff);
142 return 0; 138 return 0;
143 } 139 }
144 } else { 140 } else {
145 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line); 141 nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
146 return -ENODEV; 142 return -ENODEV;
147 } 143 }
148 144
@@ -152,14 +148,16 @@ nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
152static int 148static int
153nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty) 149nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
154{ 150{
151 struct nvkm_subdev *subdev = &therm->subdev;
152 struct nvkm_device *device = subdev->device;
155 if (line == 2) { 153 if (line == 2) {
156 nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs); 154 nvkm_mask(device, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
157 } else 155 } else
158 if (line == 9) { 156 if (line == 9) {
159 nv_wr32(therm, 0x0015f8, divs); 157 nvkm_wr32(device, 0x0015f8, divs);
160 nv_mask(therm, 0x0015f4, 0x7fffffff, duty); 158 nvkm_mask(device, 0x0015f4, 0x7fffffff, duty);
161 } else { 159 } else {
162 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line); 160 nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
163 return -ENODEV; 161 return -ENODEV;
164 } 162 }
165 163
@@ -167,59 +165,40 @@ nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
167} 165}
168 166
169void 167void
170nv40_therm_intr(struct nvkm_subdev *subdev) 168nv40_therm_intr(struct nvkm_therm *therm)
171{ 169{
172 struct nvkm_therm *therm = nvkm_therm(subdev); 170 struct nvkm_subdev *subdev = &therm->subdev;
173 uint32_t stat = nv_rd32(therm, 0x1100); 171 struct nvkm_device *device = subdev->device;
172 uint32_t stat = nvkm_rd32(device, 0x1100);
174 173
175 /* traitement */ 174 /* traitement */
176 175
177 /* ack all IRQs */ 176 /* ack all IRQs */
178 nv_wr32(therm, 0x1100, 0x70000); 177 nvkm_wr32(device, 0x1100, 0x70000);
179 178
180 nv_error(therm, "THERM received an IRQ: stat = %x\n", stat); 179 nvkm_error(subdev, "THERM received an IRQ: stat = %x\n", stat);
181} 180}
182 181
183static int 182static void
184nv40_therm_ctor(struct nvkm_object *parent, 183nv40_therm_init(struct nvkm_therm *therm)
185 struct nvkm_object *engine,
186 struct nvkm_oclass *oclass, void *data, u32 size,
187 struct nvkm_object **pobject)
188{ 184{
189 struct nv40_therm_priv *priv;
190 int ret;
191
192 ret = nvkm_therm_create(parent, engine, oclass, &priv);
193 *pobject = nv_object(priv);
194 if (ret)
195 return ret;
196
197 priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
198 priv->base.base.pwm_get = nv40_fan_pwm_get;
199 priv->base.base.pwm_set = nv40_fan_pwm_set;
200 priv->base.base.temp_get = nv40_temp_get;
201 priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
202 nv_subdev(priv)->intr = nv40_therm_intr;
203 return nvkm_therm_preinit(&priv->base.base);
204}
205
206static int
207nv40_therm_init(struct nvkm_object *object)
208{
209 struct nvkm_therm *therm = (void *)object;
210
211 nv40_sensor_setup(therm); 185 nv40_sensor_setup(therm);
212
213 return _nvkm_therm_init(object);
214} 186}
215 187
216struct nvkm_oclass 188static const struct nvkm_therm_func
217nv40_therm_oclass = { 189nv40_therm = {
218 .handle = NV_SUBDEV(THERM, 0x40), 190 .init = nv40_therm_init,
219 .ofuncs = &(struct nvkm_ofuncs) { 191 .intr = nv40_therm_intr,
220 .ctor = nv40_therm_ctor, 192 .pwm_ctrl = nv40_fan_pwm_ctrl,
221 .dtor = _nvkm_therm_dtor, 193 .pwm_get = nv40_fan_pwm_get,
222 .init = nv40_therm_init, 194 .pwm_set = nv40_fan_pwm_set,
223 .fini = _nvkm_therm_fini, 195 .temp_get = nv40_temp_get,
224 }, 196 .program_alarms = nvkm_therm_program_alarms_polling,
225}; 197};
198
199int
200nv40_therm_new(struct nvkm_device *device, int index,
201 struct nvkm_therm **ptherm)
202{
203 return nvkm_therm_new_(&nv40_therm, device, index, ptherm);
204}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 1ef59e8922d4..9b57b433d4cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -24,15 +24,11 @@
24 */ 24 */
25#include "priv.h" 25#include "priv.h"
26 26
27#include <core/device.h>
28
29struct nv50_therm_priv {
30 struct nvkm_therm_priv base;
31};
32
33static int 27static int
34pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx) 28pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
35{ 29{
30 struct nvkm_subdev *subdev = &therm->subdev;
31
36 if (*line == 0x04) { 32 if (*line == 0x04) {
37 *ctrl = 0x00e100; 33 *ctrl = 0x00e100;
38 *line = 4; 34 *line = 4;
@@ -48,7 +44,7 @@ pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
48 *line = 0; 44 *line = 0;
49 *indx = 0; 45 *indx = 0;
50 } else { 46 } else {
51 nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line); 47 nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", *line);
52 return -ENODEV; 48 return -ENODEV;
53 } 49 }
54 50
@@ -58,23 +54,25 @@ pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
58int 54int
59nv50_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable) 55nv50_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
60{ 56{
57 struct nvkm_device *device = therm->subdev.device;
61 u32 data = enable ? 0x00000001 : 0x00000000; 58 u32 data = enable ? 0x00000001 : 0x00000000;
62 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id); 59 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
63 if (ret == 0) 60 if (ret == 0)
64 nv_mask(therm, ctrl, 0x00010001 << line, data << line); 61 nvkm_mask(device, ctrl, 0x00010001 << line, data << line);
65 return ret; 62 return ret;
66} 63}
67 64
68int 65int
69nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty) 66nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
70{ 67{
68 struct nvkm_device *device = therm->subdev.device;
71 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id); 69 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
72 if (ret) 70 if (ret)
73 return ret; 71 return ret;
74 72
75 if (nv_rd32(therm, ctrl) & (1 << line)) { 73 if (nvkm_rd32(device, ctrl) & (1 << line)) {
76 *divs = nv_rd32(therm, 0x00e114 + (id * 8)); 74 *divs = nvkm_rd32(device, 0x00e114 + (id * 8));
77 *duty = nv_rd32(therm, 0x00e118 + (id * 8)); 75 *duty = nvkm_rd32(device, 0x00e118 + (id * 8));
78 return 0; 76 return 0;
79 } 77 }
80 78
@@ -84,36 +82,36 @@ nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
84int 82int
85nv50_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty) 83nv50_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
86{ 84{
85 struct nvkm_device *device = therm->subdev.device;
87 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id); 86 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
88 if (ret) 87 if (ret)
89 return ret; 88 return ret;
90 89
91 nv_wr32(therm, 0x00e114 + (id * 8), divs); 90 nvkm_wr32(device, 0x00e114 + (id * 8), divs);
92 nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000); 91 nvkm_wr32(device, 0x00e118 + (id * 8), duty | 0x80000000);
93 return 0; 92 return 0;
94} 93}
95 94
96int 95int
97nv50_fan_pwm_clock(struct nvkm_therm *therm, int line) 96nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
98{ 97{
99 int chipset = nv_device(therm)->chipset; 98 struct nvkm_device *device = therm->subdev.device;
100 int crystal = nv_device(therm)->crystal;
101 int pwm_clock; 99 int pwm_clock;
102 100
103 /* determine the PWM source clock */ 101 /* determine the PWM source clock */
104 if (chipset > 0x50 && chipset < 0x94) { 102 if (device->chipset > 0x50 && device->chipset < 0x94) {
105 u8 pwm_div = nv_rd32(therm, 0x410c); 103 u8 pwm_div = nvkm_rd32(device, 0x410c);
106 if (nv_rd32(therm, 0xc040) & 0x800000) { 104 if (nvkm_rd32(device, 0xc040) & 0x800000) {
107 /* Use the HOST clock (100 MHz) 105 /* Use the HOST clock (100 MHz)
108 * Where does this constant(2.4) comes from? */ 106 * Where does this constant(2.4) comes from? */
109 pwm_clock = (100000000 >> pwm_div) * 10 / 24; 107 pwm_clock = (100000000 >> pwm_div) * 10 / 24;
110 } else { 108 } else {
111 /* Where does this constant(20) comes from? */ 109 /* Where does this constant(20) comes from? */
112 pwm_clock = (crystal * 1000) >> pwm_div; 110 pwm_clock = (device->crystal * 1000) >> pwm_div;
113 pwm_clock /= 20; 111 pwm_clock /= 20;
114 } 112 }
115 } else { 113 } else {
116 pwm_clock = (crystal * 1000) / 20; 114 pwm_clock = (device->crystal * 1000) / 20;
117 } 115 }
118 116
119 return pwm_clock; 117 return pwm_clock;
@@ -122,18 +120,19 @@ nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
122static void 120static void
123nv50_sensor_setup(struct nvkm_therm *therm) 121nv50_sensor_setup(struct nvkm_therm *therm)
124{ 122{
125 nv_mask(therm, 0x20010, 0x40000000, 0x0); 123 struct nvkm_device *device = therm->subdev.device;
124 nvkm_mask(device, 0x20010, 0x40000000, 0x0);
126 mdelay(20); /* wait for the temperature to stabilize */ 125 mdelay(20); /* wait for the temperature to stabilize */
127} 126}
128 127
129static int 128static int
130nv50_temp_get(struct nvkm_therm *therm) 129nv50_temp_get(struct nvkm_therm *therm)
131{ 130{
132 struct nvkm_therm_priv *priv = (void *)therm; 131 struct nvkm_device *device = therm->subdev.device;
133 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 132 struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
134 int core_temp; 133 int core_temp;
135 134
136 core_temp = nv_rd32(therm, 0x20014) & 0x3fff; 135 core_temp = nvkm_rd32(device, 0x20014) & 0x3fff;
137 136
138 /* if the slope or the offset is unset, do no use the sensor */ 137 /* if the slope or the offset is unset, do no use the sensor */
139 if (!sensor->slope_div || !sensor->slope_mult || 138 if (!sensor->slope_div || !sensor->slope_mult ||
@@ -151,48 +150,27 @@ nv50_temp_get(struct nvkm_therm *therm)
151 return core_temp; 150 return core_temp;
152} 151}
153 152
154static int 153static void
155nv50_therm_ctor(struct nvkm_object *parent, 154nv50_therm_init(struct nvkm_therm *therm)
156 struct nvkm_object *engine,
157 struct nvkm_oclass *oclass, void *data, u32 size,
158 struct nvkm_object **pobject)
159{
160 struct nv50_therm_priv *priv;
161 int ret;
162
163 ret = nvkm_therm_create(parent, engine, oclass, &priv);
164 *pobject = nv_object(priv);
165 if (ret)
166 return ret;
167
168 priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
169 priv->base.base.pwm_get = nv50_fan_pwm_get;
170 priv->base.base.pwm_set = nv50_fan_pwm_set;
171 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
172 priv->base.base.temp_get = nv50_temp_get;
173 priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
174 nv_subdev(priv)->intr = nv40_therm_intr;
175
176 return nvkm_therm_preinit(&priv->base.base);
177}
178
179static int
180nv50_therm_init(struct nvkm_object *object)
181{ 155{
182 struct nvkm_therm *therm = (void *)object;
183
184 nv50_sensor_setup(therm); 156 nv50_sensor_setup(therm);
185
186 return _nvkm_therm_init(object);
187} 157}
188 158
189struct nvkm_oclass 159static const struct nvkm_therm_func
190nv50_therm_oclass = { 160nv50_therm = {
191 .handle = NV_SUBDEV(THERM, 0x50), 161 .init = nv50_therm_init,
192 .ofuncs = &(struct nvkm_ofuncs) { 162 .intr = nv40_therm_intr,
193 .ctor = nv50_therm_ctor, 163 .pwm_ctrl = nv50_fan_pwm_ctrl,
194 .dtor = _nvkm_therm_dtor, 164 .pwm_get = nv50_fan_pwm_get,
195 .init = nv50_therm_init, 165 .pwm_set = nv50_fan_pwm_set,
196 .fini = _nvkm_therm_fini, 166 .pwm_clock = nv50_fan_pwm_clock,
197 }, 167 .temp_get = nv50_temp_get,
168 .program_alarms = nvkm_therm_program_alarms_polling,
198}; 169};
170
171int
172nv50_therm_new(struct nvkm_device *device, int index,
173 struct nvkm_therm **ptherm)
174{
175 return nvkm_therm_new_(&nv50_therm, device, index, ptherm);
176}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 916a149efe6e..235a5d8daff6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -1,5 +1,6 @@
1#ifndef __NVTHERM_PRIV_H__ 1#ifndef __NVTHERM_PRIV_H__
2#define __NVTHERM_PRIV_H__ 2#define __NVTHERM_PRIV_H__
3#define nvkm_therm(p) container_of((p), struct nvkm_therm, subdev)
3/* 4/*
4 * Copyright 2012 The Nouveau community 5 * Copyright 2012 The Nouveau community
5 * 6 *
@@ -28,8 +29,9 @@
28#include <subdev/bios/extdev.h> 29#include <subdev/bios/extdev.h>
29#include <subdev/bios/gpio.h> 30#include <subdev/bios/gpio.h>
30#include <subdev/bios/perf.h> 31#include <subdev/bios/perf.h>
31#include <subdev/bios/therm.h> 32
32#include <subdev/timer.h> 33int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
34 int index, struct nvkm_therm **);
33 35
34struct nvkm_fan { 36struct nvkm_fan {
35 struct nvkm_therm *parent; 37 struct nvkm_therm *parent;
@@ -48,59 +50,6 @@ struct nvkm_fan {
48 struct dcb_gpio_func tach; 50 struct dcb_gpio_func tach;
49}; 51};
50 52
51enum nvkm_therm_thrs_direction {
52 NVKM_THERM_THRS_FALLING = 0,
53 NVKM_THERM_THRS_RISING = 1
54};
55
56enum nvkm_therm_thrs_state {
57 NVKM_THERM_THRS_LOWER = 0,
58 NVKM_THERM_THRS_HIGHER = 1
59};
60
61enum nvkm_therm_thrs {
62 NVKM_THERM_THRS_FANBOOST = 0,
63 NVKM_THERM_THRS_DOWNCLOCK = 1,
64 NVKM_THERM_THRS_CRITICAL = 2,
65 NVKM_THERM_THRS_SHUTDOWN = 3,
66 NVKM_THERM_THRS_NR
67};
68
69struct nvkm_therm_priv {
70 struct nvkm_therm base;
71
72 /* automatic thermal management */
73 struct nvkm_alarm alarm;
74 spinlock_t lock;
75 struct nvbios_therm_trip_point *last_trip;
76 int mode;
77 int cstate;
78 int suspend;
79
80 /* bios */
81 struct nvbios_therm_sensor bios_sensor;
82
83 /* fan priv */
84 struct nvkm_fan *fan;
85
86 /* alarms priv */
87 struct {
88 spinlock_t alarm_program_lock;
89 struct nvkm_alarm therm_poll_alarm;
90 enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
91 void (*program_alarms)(struct nvkm_therm *);
92 } sensor;
93
94 /* what should be done if the card overheats */
95 struct {
96 void (*downclock)(struct nvkm_therm *, bool active);
97 void (*pause)(struct nvkm_therm *, bool active);
98 } emergency;
99
100 /* ic */
101 struct i2c_client *ic;
102};
103
104int nvkm_therm_fan_mode(struct nvkm_therm *, int mode); 53int nvkm_therm_fan_mode(struct nvkm_therm *, int mode);
105int nvkm_therm_attr_get(struct nvkm_therm *, enum nvkm_therm_attr_type); 54int nvkm_therm_attr_get(struct nvkm_therm *, enum nvkm_therm_attr_type);
106int nvkm_therm_attr_set(struct nvkm_therm *, enum nvkm_therm_attr_type, int); 55int nvkm_therm_attr_set(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
@@ -117,8 +66,6 @@ int nvkm_therm_fan_set(struct nvkm_therm *, bool now, int percent);
117int nvkm_therm_fan_user_get(struct nvkm_therm *); 66int nvkm_therm_fan_user_get(struct nvkm_therm *);
118int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent); 67int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
119 68
120int nvkm_therm_fan_sense(struct nvkm_therm *);
121
122int nvkm_therm_preinit(struct nvkm_therm *); 69int nvkm_therm_preinit(struct nvkm_therm *);
123 70
124int nvkm_therm_sensor_init(struct nvkm_therm *); 71int nvkm_therm_sensor_init(struct nvkm_therm *);
@@ -134,18 +81,37 @@ void nvkm_therm_sensor_event(struct nvkm_therm *, enum nvkm_therm_thrs,
134 enum nvkm_therm_thrs_direction); 81 enum nvkm_therm_thrs_direction);
135void nvkm_therm_program_alarms_polling(struct nvkm_therm *); 82void nvkm_therm_program_alarms_polling(struct nvkm_therm *);
136 83
137void nv40_therm_intr(struct nvkm_subdev *); 84struct nvkm_therm_func {
85 void (*init)(struct nvkm_therm *);
86 void (*fini)(struct nvkm_therm *);
87 void (*intr)(struct nvkm_therm *);
88
89 int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
90 int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
91 int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
92 int (*pwm_clock)(struct nvkm_therm *, int line);
93
94 int (*temp_get)(struct nvkm_therm *);
95
96 int (*fan_sense)(struct nvkm_therm *);
97
98 void (*program_alarms)(struct nvkm_therm *);
99};
100
101void nv40_therm_intr(struct nvkm_therm *);
102
138int nv50_fan_pwm_ctrl(struct nvkm_therm *, int, bool); 103int nv50_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
139int nv50_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *); 104int nv50_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
140int nv50_fan_pwm_set(struct nvkm_therm *, int, u32, u32); 105int nv50_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
141int nv50_fan_pwm_clock(struct nvkm_therm *, int); 106int nv50_fan_pwm_clock(struct nvkm_therm *, int);
107
142int g84_temp_get(struct nvkm_therm *); 108int g84_temp_get(struct nvkm_therm *);
143void g84_sensor_setup(struct nvkm_therm *); 109void g84_sensor_setup(struct nvkm_therm *);
144int g84_therm_fini(struct nvkm_object *, bool suspend); 110void g84_therm_fini(struct nvkm_therm *);
145 111
146int gt215_therm_fan_sense(struct nvkm_therm *); 112int gt215_therm_fan_sense(struct nvkm_therm *);
147 113
148int gf110_therm_init(struct nvkm_object *); 114void gf119_therm_init(struct nvkm_therm *);
149 115
150int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *); 116int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
151int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *); 117int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index aa13744f3854..b9703c02d8ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -26,29 +26,25 @@
26static void 26static void
27nvkm_therm_temp_set_defaults(struct nvkm_therm *therm) 27nvkm_therm_temp_set_defaults(struct nvkm_therm *therm)
28{ 28{
29 struct nvkm_therm_priv *priv = (void *)therm; 29 therm->bios_sensor.offset_constant = 0;
30 30
31 priv->bios_sensor.offset_constant = 0; 31 therm->bios_sensor.thrs_fan_boost.temp = 90;
32 therm->bios_sensor.thrs_fan_boost.hysteresis = 3;
32 33
33 priv->bios_sensor.thrs_fan_boost.temp = 90; 34 therm->bios_sensor.thrs_down_clock.temp = 95;
34 priv->bios_sensor.thrs_fan_boost.hysteresis = 3; 35 therm->bios_sensor.thrs_down_clock.hysteresis = 3;
35 36
36 priv->bios_sensor.thrs_down_clock.temp = 95; 37 therm->bios_sensor.thrs_critical.temp = 105;
37 priv->bios_sensor.thrs_down_clock.hysteresis = 3; 38 therm->bios_sensor.thrs_critical.hysteresis = 5;
38 39
39 priv->bios_sensor.thrs_critical.temp = 105; 40 therm->bios_sensor.thrs_shutdown.temp = 135;
40 priv->bios_sensor.thrs_critical.hysteresis = 5; 41 therm->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
41
42 priv->bios_sensor.thrs_shutdown.temp = 135;
43 priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
44} 42}
45 43
46
47static void 44static void
48nvkm_therm_temp_safety_checks(struct nvkm_therm *therm) 45nvkm_therm_temp_safety_checks(struct nvkm_therm *therm)
49{ 46{
50 struct nvkm_therm_priv *priv = (void *)therm; 47 struct nvbios_therm_sensor *s = &therm->bios_sensor;
51 struct nvbios_therm_sensor *s = &priv->bios_sensor;
52 48
53 /* enforce a minimum hysteresis on thresholds */ 49 /* enforce a minimum hysteresis on thresholds */
54 s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); 50 s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
@@ -63,8 +59,7 @@ nvkm_therm_sensor_set_threshold_state(struct nvkm_therm *therm,
63 enum nvkm_therm_thrs thrs, 59 enum nvkm_therm_thrs thrs,
64 enum nvkm_therm_thrs_state st) 60 enum nvkm_therm_thrs_state st)
65{ 61{
66 struct nvkm_therm_priv *priv = (void *)therm; 62 therm->sensor.alarm_state[thrs] = st;
67 priv->sensor.alarm_state[thrs] = st;
68} 63}
69 64
70/* must be called with alarm_program_lock taken ! */ 65/* must be called with alarm_program_lock taken ! */
@@ -72,8 +67,7 @@ enum nvkm_therm_thrs_state
72nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *therm, 67nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *therm,
73 enum nvkm_therm_thrs thrs) 68 enum nvkm_therm_thrs thrs)
74{ 69{
75 struct nvkm_therm_priv *priv = (void *)therm; 70 return therm->sensor.alarm_state[thrs];
76 return priv->sensor.alarm_state[thrs];
77} 71}
78 72
79static void 73static void
@@ -87,22 +81,23 @@ void
87nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs, 81nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
88 enum nvkm_therm_thrs_direction dir) 82 enum nvkm_therm_thrs_direction dir)
89{ 83{
90 struct nvkm_therm_priv *priv = (void *)therm; 84 struct nvkm_subdev *subdev = &therm->subdev;
91 bool active; 85 bool active;
92 const char *thresolds[] = { 86 const char *thresolds[] = {
93 "fanboost", "downclock", "critical", "shutdown" 87 "fanboost", "downclock", "critical", "shutdown"
94 }; 88 };
95 int temperature = therm->temp_get(therm); 89 int temperature = therm->func->temp_get(therm);
96 90
97 if (thrs < 0 || thrs > 3) 91 if (thrs < 0 || thrs > 3)
98 return; 92 return;
99 93
100 if (dir == NVKM_THERM_THRS_FALLING) 94 if (dir == NVKM_THERM_THRS_FALLING)
101 nv_info(therm, "temperature (%i C) went below the '%s' threshold\n", 95 nvkm_info(subdev,
102 temperature, thresolds[thrs]); 96 "temperature (%i C) went below the '%s' threshold\n",
97 temperature, thresolds[thrs]);
103 else 98 else
104 nv_info(therm, "temperature (%i C) hit the '%s' threshold\n", 99 nvkm_info(subdev, "temperature (%i C) hit the '%s' threshold\n",
105 temperature, thresolds[thrs]); 100 temperature, thresolds[thrs]);
106 101
107 active = (dir == NVKM_THERM_THRS_RISING); 102 active = (dir == NVKM_THERM_THRS_RISING);
108 switch (thrs) { 103 switch (thrs) {
@@ -113,12 +108,12 @@ nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
113 } 108 }
114 break; 109 break;
115 case NVKM_THERM_THRS_DOWNCLOCK: 110 case NVKM_THERM_THRS_DOWNCLOCK:
116 if (priv->emergency.downclock) 111 if (therm->emergency.downclock)
117 priv->emergency.downclock(therm, active); 112 therm->emergency.downclock(therm, active);
118 break; 113 break;
119 case NVKM_THERM_THRS_CRITICAL: 114 case NVKM_THERM_THRS_CRITICAL:
120 if (priv->emergency.pause) 115 if (therm->emergency.pause)
121 priv->emergency.pause(therm, active); 116 therm->emergency.pause(therm, active);
122 break; 117 break;
123 case NVKM_THERM_THRS_SHUTDOWN: 118 case NVKM_THERM_THRS_SHUTDOWN:
124 if (active) { 119 if (active) {
@@ -145,7 +140,7 @@ nvkm_therm_threshold_hyst_polling(struct nvkm_therm *therm,
145{ 140{
146 enum nvkm_therm_thrs_direction direction; 141 enum nvkm_therm_thrs_direction direction;
147 enum nvkm_therm_thrs_state prev_state, new_state; 142 enum nvkm_therm_thrs_state prev_state, new_state;
148 int temp = therm->temp_get(therm); 143 int temp = therm->func->temp_get(therm);
149 144
150 prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name); 145 prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
151 146
@@ -166,19 +161,19 @@ nvkm_therm_threshold_hyst_polling(struct nvkm_therm *therm,
166static void 161static void
167alarm_timer_callback(struct nvkm_alarm *alarm) 162alarm_timer_callback(struct nvkm_alarm *alarm)
168{ 163{
169 struct nvkm_therm_priv *priv = 164 struct nvkm_therm *therm =
170 container_of(alarm, struct nvkm_therm_priv, sensor.therm_poll_alarm); 165 container_of(alarm, struct nvkm_therm, sensor.therm_poll_alarm);
171 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 166 struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
172 struct nvkm_timer *ptimer = nvkm_timer(priv); 167 struct nvkm_timer *tmr = therm->subdev.device->timer;
173 struct nvkm_therm *therm = &priv->base;
174 unsigned long flags; 168 unsigned long flags;
175 169
176 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); 170 spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
177 171
178 nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost, 172 nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
179 NVKM_THERM_THRS_FANBOOST); 173 NVKM_THERM_THRS_FANBOOST);
180 174
181 nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock, 175 nvkm_therm_threshold_hyst_polling(therm,
176 &sensor->thrs_down_clock,
182 NVKM_THERM_THRS_DOWNCLOCK); 177 NVKM_THERM_THRS_DOWNCLOCK);
183 178
184 nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_critical, 179 nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
@@ -187,46 +182,45 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
187 nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, 182 nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
188 NVKM_THERM_THRS_SHUTDOWN); 183 NVKM_THERM_THRS_SHUTDOWN);
189 184
190 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); 185 spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
191 186
192 /* schedule the next poll in one second */ 187 /* schedule the next poll in one second */
193 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) 188 if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
194 ptimer->alarm(ptimer, 1000000000ULL, alarm); 189 nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
195} 190}
196 191
197void 192void
198nvkm_therm_program_alarms_polling(struct nvkm_therm *therm) 193nvkm_therm_program_alarms_polling(struct nvkm_therm *therm)
199{ 194{
200 struct nvkm_therm_priv *priv = (void *)therm; 195 struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
201 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 196
202 197 nvkm_debug(&therm->subdev,
203 nv_debug(therm, 198 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
204 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n", 199 sensor->thrs_fan_boost.temp,
205 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis, 200 sensor->thrs_fan_boost.hysteresis,
206 sensor->thrs_down_clock.temp, 201 sensor->thrs_down_clock.temp,
207 sensor->thrs_down_clock.hysteresis, 202 sensor->thrs_down_clock.hysteresis,
208 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis, 203 sensor->thrs_critical.temp,
209 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis); 204 sensor->thrs_critical.hysteresis,
210 205 sensor->thrs_shutdown.temp,
211 alarm_timer_callback(&priv->sensor.therm_poll_alarm); 206 sensor->thrs_shutdown.hysteresis);
207
208 alarm_timer_callback(&therm->sensor.therm_poll_alarm);
212} 209}
213 210
214int 211int
215nvkm_therm_sensor_init(struct nvkm_therm *therm) 212nvkm_therm_sensor_init(struct nvkm_therm *therm)
216{ 213{
217 struct nvkm_therm_priv *priv = (void *)therm; 214 therm->func->program_alarms(therm);
218 priv->sensor.program_alarms(therm);
219 return 0; 215 return 0;
220} 216}
221 217
222int 218int
223nvkm_therm_sensor_fini(struct nvkm_therm *therm, bool suspend) 219nvkm_therm_sensor_fini(struct nvkm_therm *therm, bool suspend)
224{ 220{
225 struct nvkm_therm_priv *priv = (void *)therm; 221 struct nvkm_timer *tmr = therm->subdev.device->timer;
226 struct nvkm_timer *ptimer = nvkm_timer(therm);
227
228 if (suspend) 222 if (suspend)
229 ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm); 223 nvkm_timer_alarm_cancel(tmr, &therm->sensor.therm_poll_alarm);
230 return 0; 224 return 0;
231} 225}
232 226
@@ -235,24 +229,24 @@ nvkm_therm_sensor_preinit(struct nvkm_therm *therm)
235{ 229{
236 const char *sensor_avail = "yes"; 230 const char *sensor_avail = "yes";
237 231
238 if (therm->temp_get(therm) < 0) 232 if (therm->func->temp_get(therm) < 0)
239 sensor_avail = "no"; 233 sensor_avail = "no";
240 234
241 nv_info(therm, "internal sensor: %s\n", sensor_avail); 235 nvkm_debug(&therm->subdev, "internal sensor: %s\n", sensor_avail);
242} 236}
243 237
244int 238int
245nvkm_therm_sensor_ctor(struct nvkm_therm *therm) 239nvkm_therm_sensor_ctor(struct nvkm_therm *therm)
246{ 240{
247 struct nvkm_therm_priv *priv = (void *)therm; 241 struct nvkm_subdev *subdev = &therm->subdev;
248 struct nvkm_bios *bios = nvkm_bios(therm); 242 struct nvkm_bios *bios = subdev->device->bios;
249 243
250 nvkm_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback); 244 nvkm_alarm_init(&therm->sensor.therm_poll_alarm, alarm_timer_callback);
251 245
252 nvkm_therm_temp_set_defaults(therm); 246 nvkm_therm_temp_set_defaults(therm);
253 if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE, 247 if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
254 &priv->bios_sensor)) 248 &therm->bios_sensor))
255 nv_error(therm, "nvbios_therm_sensor_parse failed\n"); 249 nvkm_error(subdev, "nvbios_therm_sensor_parse failed\n");
256 nvkm_therm_temp_safety_checks(therm); 250 nvkm_therm_temp_safety_checks(therm);
257 251
258 return 0; 252 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
index d1d38b4ba30a..e436f0ffe3f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
@@ -1,3 +1,5 @@
1nvkm-y += nvkm/subdev/timer/base.o 1nvkm-y += nvkm/subdev/timer/base.o
2nvkm-y += nvkm/subdev/timer/nv04.o 2nvkm-y += nvkm/subdev/timer/nv04.o
3nvkm-y += nvkm/subdev/timer/nv40.o
4nvkm-y += nvkm/subdev/timer/nv41.o
3nvkm-y += nvkm/subdev/timer/gk20a.o 5nvkm-y += nvkm/subdev/timer/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d894061ced52..d4dae1f12d62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -21,73 +21,131 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/timer.h> 24#include "priv.h"
25 25
26bool 26u64
27nvkm_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data) 27nvkm_timer_read(struct nvkm_timer *tmr)
28{ 28{
29 struct nvkm_timer *ptimer = nvkm_timer(obj); 29 return tmr->func->read(tmr);
30 u64 time0; 30}
31 31
32 time0 = ptimer->read(ptimer); 32void
33 do { 33nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
34 if (nv_iclass(obj, NV_SUBDEV_CLASS)) { 34{
35 if ((nv_rd32(obj, addr) & mask) == data) 35 struct nvkm_alarm *alarm, *atemp;
36 return true; 36 unsigned long flags;
37 } else { 37 LIST_HEAD(exec);
38 if ((nv_ro32(obj, addr) & mask) == data) 38
39 return true; 39 /* move any due alarms off the pending list */
40 } 40 spin_lock_irqsave(&tmr->lock, flags);
41 } while (ptimer->read(ptimer) - time0 < nsec); 41 list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
42 if (alarm->timestamp <= nvkm_timer_read(tmr))
43 list_move_tail(&alarm->head, &exec);
44 }
42 45
43 return false; 46 /* reschedule interrupt for next alarm time */
47 if (!list_empty(&tmr->alarms)) {
48 alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
49 tmr->func->alarm_init(tmr, alarm->timestamp);
50 } else {
51 tmr->func->alarm_fini(tmr);
52 }
53 spin_unlock_irqrestore(&tmr->lock, flags);
54
55 /* execute any pending alarm handlers */
56 list_for_each_entry_safe(alarm, atemp, &exec, head) {
57 list_del_init(&alarm->head);
58 alarm->func(alarm);
59 }
44} 60}
45 61
46bool 62void
47nvkm_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data) 63nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
48{ 64{
49 struct nvkm_timer *ptimer = nvkm_timer(obj); 65 struct nvkm_alarm *list;
50 u64 time0; 66 unsigned long flags;
51 67
52 time0 = ptimer->read(ptimer); 68 alarm->timestamp = nvkm_timer_read(tmr) + nsec;
53 do { 69
54 if (nv_iclass(obj, NV_SUBDEV_CLASS)) { 70 /* append new alarm to list, in soonest-alarm-first order */
55 if ((nv_rd32(obj, addr) & mask) != data) 71 spin_lock_irqsave(&tmr->lock, flags);
56 return true; 72 if (!nsec) {
57 } else { 73 if (!list_empty(&alarm->head))
58 if ((nv_ro32(obj, addr) & mask) != data) 74 list_del(&alarm->head);
59 return true; 75 } else {
76 list_for_each_entry(list, &tmr->alarms, head) {
77 if (list->timestamp > alarm->timestamp)
78 break;
60 } 79 }
61 } while (ptimer->read(ptimer) - time0 < nsec); 80 list_add_tail(&alarm->head, &list->head);
81 }
82 spin_unlock_irqrestore(&tmr->lock, flags);
62 83
63 return false; 84 /* process pending alarms */
85 nvkm_timer_alarm_trigger(tmr);
64} 86}
65 87
66bool 88void
67nvkm_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data) 89nvkm_timer_alarm_cancel(struct nvkm_timer *tmr, struct nvkm_alarm *alarm)
68{ 90{
69 struct nvkm_timer *ptimer = nvkm_timer(obj); 91 unsigned long flags;
70 u64 time0; 92 spin_lock_irqsave(&tmr->lock, flags);
93 list_del_init(&alarm->head);
94 spin_unlock_irqrestore(&tmr->lock, flags);
95}
71 96
72 time0 = ptimer->read(ptimer); 97static void
73 do { 98nvkm_timer_intr(struct nvkm_subdev *subdev)
74 if (func(data) == true) 99{
75 return true; 100 struct nvkm_timer *tmr = nvkm_timer(subdev);
76 } while (ptimer->read(ptimer) - time0 < nsec); 101 tmr->func->intr(tmr);
102}
77 103
78 return false; 104static int
105nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend)
106{
107 struct nvkm_timer *tmr = nvkm_timer(subdev);
108 tmr->func->alarm_fini(tmr);
109 return 0;
79} 110}
80 111
81void 112static int
82nvkm_timer_alarm(void *obj, u32 nsec, struct nvkm_alarm *alarm) 113nvkm_timer_init(struct nvkm_subdev *subdev)
83{ 114{
84 struct nvkm_timer *ptimer = nvkm_timer(obj); 115 struct nvkm_timer *tmr = nvkm_timer(subdev);
85 ptimer->alarm(ptimer, nsec, alarm); 116 if (tmr->func->init)
117 tmr->func->init(tmr);
118 tmr->func->time(tmr, ktime_to_ns(ktime_get()));
119 nvkm_timer_alarm_trigger(tmr);
120 return 0;
86} 121}
87 122
88void 123static void *
89nvkm_timer_alarm_cancel(void *obj, struct nvkm_alarm *alarm) 124nvkm_timer_dtor(struct nvkm_subdev *subdev)
90{ 125{
91 struct nvkm_timer *ptimer = nvkm_timer(obj); 126 return nvkm_timer(subdev);
92 ptimer->alarm_cancel(ptimer, alarm); 127}
128
129static const struct nvkm_subdev_func
130nvkm_timer = {
131 .dtor = nvkm_timer_dtor,
132 .init = nvkm_timer_init,
133 .fini = nvkm_timer_fini,
134 .intr = nvkm_timer_intr,
135};
136
137int
138nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
139 int index, struct nvkm_timer **ptmr)
140{
141 struct nvkm_timer *tmr;
142
143 if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
144 return -ENOMEM;
145
146 nvkm_subdev_ctor(&nvkm_timer, device, index, 0, &tmr->subdev);
147 tmr->func = func;
148 INIT_LIST_HEAD(&tmr->alarms);
149 spin_lock_init(&tmr->lock);
150 return 0;
93} 151}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 80e38063dd9b..9ed5f64912d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -21,36 +21,19 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25 25
26static int 26static const struct nvkm_timer_func
27gk20a_timer_init(struct nvkm_object *object) 27gk20a_timer = {
28{ 28 .intr = nv04_timer_intr,
29 struct nv04_timer_priv *priv = (void *)object; 29 .read = nv04_timer_read,
30 u32 hi = upper_32_bits(priv->suspend_time); 30 .time = nv04_timer_time,
31 u32 lo = lower_32_bits(priv->suspend_time); 31 .alarm_init = nv04_timer_alarm_init,
32 int ret; 32 .alarm_fini = nv04_timer_alarm_fini,
33 33};
34 ret = nvkm_timer_init(&priv->base);
35 if (ret)
36 return ret;
37
38 nv_debug(priv, "time low : 0x%08x\n", lo);
39 nv_debug(priv, "time high : 0x%08x\n", hi);
40 34
41 /* restore the time before suspend */ 35int
42 nv_wr32(priv, NV04_PTIMER_TIME_1, hi); 36gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
43 nv_wr32(priv, NV04_PTIMER_TIME_0, lo); 37{
44 return 0; 38 return nvkm_timer_new_(&gk20a_timer, device, index, ptmr);
45} 39}
46
47struct nvkm_oclass
48gk20a_timer_oclass = {
49 .handle = NV_SUBDEV(TIMER, 0xff),
50 .ofuncs = &(struct nvkm_ofuncs) {
51 .ctor = nv04_timer_ctor,
52 .dtor = nv04_timer_dtor,
53 .init = gk20a_timer_init,
54 .fini = nv04_timer_fini,
55 }
56};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 6b7facbe59a2..7b9ce87f0617 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -21,165 +21,92 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "nv04.h" 24#include "priv.h"
25#include "regsnv04.h"
25 26
26#include <core/device.h> 27void
27 28nv04_timer_time(struct nvkm_timer *tmr, u64 time)
28static u64
29nv04_timer_read(struct nvkm_timer *ptimer)
30{ 29{
31 struct nv04_timer_priv *priv = (void *)ptimer; 30 struct nvkm_subdev *subdev = &tmr->subdev;
32 u32 hi, lo; 31 struct nvkm_device *device = subdev->device;
32 u32 hi = upper_32_bits(time);
33 u32 lo = lower_32_bits(time);
33 34
34 do { 35 nvkm_debug(subdev, "time low : %08x\n", lo);
35 hi = nv_rd32(priv, NV04_PTIMER_TIME_1); 36 nvkm_debug(subdev, "time high : %08x\n", hi);
36 lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
37 } while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
38 37
39 return ((u64)hi << 32 | lo); 38 nvkm_wr32(device, NV04_PTIMER_TIME_1, hi);
39 nvkm_wr32(device, NV04_PTIMER_TIME_0, lo);
40} 40}
41 41
42static void 42u64
43nv04_timer_alarm_trigger(struct nvkm_timer *ptimer) 43nv04_timer_read(struct nvkm_timer *tmr)
44{ 44{
45 struct nv04_timer_priv *priv = (void *)ptimer; 45 struct nvkm_device *device = tmr->subdev.device;
46 struct nvkm_alarm *alarm, *atemp; 46 u32 hi, lo;
47 unsigned long flags;
48 LIST_HEAD(exec);
49
50 /* move any due alarms off the pending list */
51 spin_lock_irqsave(&priv->lock, flags);
52 list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
53 if (alarm->timestamp <= ptimer->read(ptimer))
54 list_move_tail(&alarm->head, &exec);
55 }
56 47
57 /* reschedule interrupt for next alarm time */ 48 do {
58 if (!list_empty(&priv->alarms)) { 49 hi = nvkm_rd32(device, NV04_PTIMER_TIME_1);
59 alarm = list_first_entry(&priv->alarms, typeof(*alarm), head); 50 lo = nvkm_rd32(device, NV04_PTIMER_TIME_0);
60 nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp); 51 } while (hi != nvkm_rd32(device, NV04_PTIMER_TIME_1));
61 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
62 } else {
63 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
64 }
65 spin_unlock_irqrestore(&priv->lock, flags);
66 52
67 /* execute any pending alarm handlers */ 53 return ((u64)hi << 32 | lo);
68 list_for_each_entry_safe(alarm, atemp, &exec, head) {
69 list_del_init(&alarm->head);
70 alarm->func(alarm);
71 }
72} 54}
73 55
74static void 56void
75nv04_timer_alarm(struct nvkm_timer *ptimer, u64 time, struct nvkm_alarm *alarm) 57nv04_timer_alarm_fini(struct nvkm_timer *tmr)
76{ 58{
77 struct nv04_timer_priv *priv = (void *)ptimer; 59 struct nvkm_device *device = tmr->subdev.device;
78 struct nvkm_alarm *list; 60 nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000000);
79 unsigned long flags;
80
81 alarm->timestamp = ptimer->read(ptimer) + time;
82
83 /* append new alarm to list, in soonest-alarm-first order */
84 spin_lock_irqsave(&priv->lock, flags);
85 if (!time) {
86 if (!list_empty(&alarm->head))
87 list_del(&alarm->head);
88 } else {
89 list_for_each_entry(list, &priv->alarms, head) {
90 if (list->timestamp > alarm->timestamp)
91 break;
92 }
93 list_add_tail(&alarm->head, &list->head);
94 }
95 spin_unlock_irqrestore(&priv->lock, flags);
96
97 /* process pending alarms */
98 nv04_timer_alarm_trigger(ptimer);
99} 61}
100 62
101static void 63void
102nv04_timer_alarm_cancel(struct nvkm_timer *ptimer, struct nvkm_alarm *alarm) 64nv04_timer_alarm_init(struct nvkm_timer *tmr, u32 time)
103{ 65{
104 struct nv04_timer_priv *priv = (void *)ptimer; 66 struct nvkm_device *device = tmr->subdev.device;
105 unsigned long flags; 67 nvkm_wr32(device, NV04_PTIMER_ALARM_0, time);
106 spin_lock_irqsave(&priv->lock, flags); 68 nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000001);
107 list_del_init(&alarm->head);
108 spin_unlock_irqrestore(&priv->lock, flags);
109} 69}
110 70
111static void 71void
112nv04_timer_intr(struct nvkm_subdev *subdev) 72nv04_timer_intr(struct nvkm_timer *tmr)
113{ 73{
114 struct nv04_timer_priv *priv = (void *)subdev; 74 struct nvkm_subdev *subdev = &tmr->subdev;
115 u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0); 75 struct nvkm_device *device = subdev->device;
76 u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
116 77
117 if (stat & 0x00000001) { 78 if (stat & 0x00000001) {
118 nv04_timer_alarm_trigger(&priv->base); 79 nvkm_timer_alarm_trigger(tmr);
119 nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001); 80 nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
120 stat &= ~0x00000001; 81 stat &= ~0x00000001;
121 } 82 }
122 83
123 if (stat) { 84 if (stat) {
124 nv_error(priv, "unknown stat 0x%08x\n", stat); 85 nvkm_error(subdev, "intr %08x\n", stat);
125 nv_wr32(priv, NV04_PTIMER_INTR_0, stat); 86 nvkm_wr32(device, NV04_PTIMER_INTR_0, stat);
126 } 87 }
127} 88}
128 89
129int 90static void
130nv04_timer_fini(struct nvkm_object *object, bool suspend) 91nv04_timer_init(struct nvkm_timer *tmr)
131{
132 struct nv04_timer_priv *priv = (void *)object;
133 if (suspend)
134 priv->suspend_time = nv04_timer_read(&priv->base);
135 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
136 return nvkm_timer_fini(&priv->base, suspend);
137}
138
139static int
140nv04_timer_init(struct nvkm_object *object)
141{ 92{
142 struct nvkm_device *device = nv_device(object); 93 struct nvkm_subdev *subdev = &tmr->subdev;
143 struct nv04_timer_priv *priv = (void *)object; 94 struct nvkm_device *device = subdev->device;
144 u32 m = 1, f, n, d, lo, hi; 95 u32 f = 0; /*XXX: nvclk */
145 int ret; 96 u32 n, d;
146
147 ret = nvkm_timer_init(&priv->base);
148 if (ret)
149 return ret;
150 97
151 /* aim for 31.25MHz, which gives us nanosecond timestamps */ 98 /* aim for 31.25MHz, which gives us nanosecond timestamps */
152 d = 1000000 / 32; 99 d = 1000000 / 32;
153 100 n = f;
154 /* determine base clock for timer source */ 101
155#if 0 /*XXX*/ 102 if (!f) {
156 if (device->chipset < 0x40) { 103 n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
157 n = nvkm_hw_get_clock(device, PLL_CORE); 104 d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
158 } else 105 if (!n || !d) {
159#endif 106 n = 1;
160 if (device->chipset <= 0x40) { 107 d = 1;
161 /*XXX: figure this out */
162 f = -1;
163 n = 0;
164 } else {
165 f = device->crystal;
166 n = f;
167 while (n < (d * 2)) {
168 n += (n / m);
169 m++;
170 }
171
172 nv_wr32(priv, 0x009220, m - 1);
173 }
174
175 if (!n) {
176 nv_warn(priv, "unknown input clock freq\n");
177 if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
178 !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
179 nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
180 nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
181 } 108 }
182 return 0; 109 nvkm_warn(subdev, "unknown input clock freq\n");
183 } 110 }
184 111
185 /* reduce ratio to acceptable values */ 112 /* reduce ratio to acceptable values */
@@ -198,65 +125,27 @@ nv04_timer_init(struct nvkm_object *object)
198 d >>= 1; 125 d >>= 1;
199 } 126 }
200 127
201 /* restore the time before suspend */ 128 nvkm_debug(subdev, "input frequency : %dHz\n", f);
202 lo = priv->suspend_time; 129 nvkm_debug(subdev, "numerator : %08x\n", n);
203 hi = (priv->suspend_time >> 32); 130 nvkm_debug(subdev, "denominator : %08x\n", d);
131 nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
204 132
205 nv_debug(priv, "input frequency : %dHz\n", f); 133 nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
206 nv_debug(priv, "input multiplier: %d\n", m); 134 nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
207 nv_debug(priv, "numerator : 0x%08x\n", n);
208 nv_debug(priv, "denominator : 0x%08x\n", d);
209 nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
210 nv_debug(priv, "time low : 0x%08x\n", lo);
211 nv_debug(priv, "time high : 0x%08x\n", hi);
212
213 nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
214 nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
215 nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
216 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
217 nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
218 nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
219 return 0;
220} 135}
221 136
222void 137static const struct nvkm_timer_func
223nv04_timer_dtor(struct nvkm_object *object) 138nv04_timer = {
224{ 139 .init = nv04_timer_init,
225 struct nv04_timer_priv *priv = (void *)object; 140 .intr = nv04_timer_intr,
226 return nvkm_timer_destroy(&priv->base); 141 .read = nv04_timer_read,
227} 142 .time = nv04_timer_time,
143 .alarm_init = nv04_timer_alarm_init,
144 .alarm_fini = nv04_timer_alarm_fini,
145};
228 146
229int 147int
230nv04_timer_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 148nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
231 struct nvkm_oclass *oclass, void *data, u32 size,
232 struct nvkm_object **pobject)
233{ 149{
234 struct nv04_timer_priv *priv; 150 return nvkm_timer_new_(&nv04_timer, device, index, ptmr);
235 int ret;
236
237 ret = nvkm_timer_create(parent, engine, oclass, &priv);
238 *pobject = nv_object(priv);
239 if (ret)
240 return ret;
241
242 priv->base.base.intr = nv04_timer_intr;
243 priv->base.read = nv04_timer_read;
244 priv->base.alarm = nv04_timer_alarm;
245 priv->base.alarm_cancel = nv04_timer_alarm_cancel;
246 priv->suspend_time = 0;
247
248 INIT_LIST_HEAD(&priv->alarms);
249 spin_lock_init(&priv->lock);
250 return 0;
251} 151}
252
253struct nvkm_oclass
254nv04_timer_oclass = {
255 .handle = NV_SUBDEV(TIMER, 0x04),
256 .ofuncs = &(struct nvkm_ofuncs) {
257 .ctor = nv04_timer_ctor,
258 .dtor = nv04_timer_dtor,
259 .init = nv04_timer_init,
260 .fini = nv04_timer_fini,
261 }
262};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
deleted file mode 100644
index 89996a9826b1..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __NVKM_TIMER_NV04_H__
2#define __NVKM_TIMER_NV04_H__
3#include "priv.h"
4
5#define NV04_PTIMER_INTR_0 0x009100
6#define NV04_PTIMER_INTR_EN_0 0x009140
7#define NV04_PTIMER_NUMERATOR 0x009200
8#define NV04_PTIMER_DENOMINATOR 0x009210
9#define NV04_PTIMER_TIME_0 0x009400
10#define NV04_PTIMER_TIME_1 0x009410
11#define NV04_PTIMER_ALARM_0 0x009420
12
13struct nv04_timer_priv {
14 struct nvkm_timer base;
15 struct list_head alarms;
16 spinlock_t lock;
17 u64 suspend_time;
18};
19
20int nv04_timer_ctor(struct nvkm_object *, struct nvkm_object *,
21 struct nvkm_oclass *, void *, u32,
22 struct nvkm_object **);
23void nv04_timer_dtor(struct nvkm_object *);
24int nv04_timer_fini(struct nvkm_object *, bool);
25#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
new file mode 100644
index 000000000000..bb99a152f26e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "regsnv04.h"
26
27static void
28nv40_timer_init(struct nvkm_timer *tmr)
29{
30 struct nvkm_subdev *subdev = &tmr->subdev;
31 struct nvkm_device *device = subdev->device;
32 u32 f = 0; /*XXX: figure this out */
33 u32 n, d;
34
35 /* aim for 31.25MHz, which gives us nanosecond timestamps */
36 d = 1000000 / 32;
37 n = f;
38
39 if (!f) {
40 n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
41 d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
42 if (!n || !d) {
43 n = 1;
44 d = 1;
45 }
46 nvkm_warn(subdev, "unknown input clock freq\n");
47 }
48
49 /* reduce ratio to acceptable values */
50 while (((n % 5) == 0) && ((d % 5) == 0)) {
51 n /= 5;
52 d /= 5;
53 }
54
55 while (((n % 2) == 0) && ((d % 2) == 0)) {
56 n /= 2;
57 d /= 2;
58 }
59
60 while (n > 0xffff || d > 0xffff) {
61 n >>= 1;
62 d >>= 1;
63 }
64
65 nvkm_debug(subdev, "input frequency : %dHz\n", f);
66 nvkm_debug(subdev, "numerator : %08x\n", n);
67 nvkm_debug(subdev, "denominator : %08x\n", d);
68 nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
69
70 nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
71 nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
72}
73
74static const struct nvkm_timer_func
75nv40_timer = {
76 .init = nv40_timer_init,
77 .intr = nv04_timer_intr,
78 .read = nv04_timer_read,
79 .time = nv04_timer_time,
80 .alarm_init = nv04_timer_alarm_init,
81 .alarm_fini = nv04_timer_alarm_fini,
82};
83
84int
85nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
86{
87 return nvkm_timer_new_(&nv40_timer, device, index, ptmr);
88}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
new file mode 100644
index 000000000000..3cf9ec1b1b57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "regsnv04.h"
26
27static void
28nv41_timer_init(struct nvkm_timer *tmr)
29{
30 struct nvkm_subdev *subdev = &tmr->subdev;
31 struct nvkm_device *device = subdev->device;
32 u32 f = device->crystal;
33 u32 m = 1, n, d;
34
35 /* aim for 31.25MHz, which gives us nanosecond timestamps */
36 d = 1000000 / 32;
37 n = f;
38
39 while (n < (d * 2)) {
40 n += (n / m);
41 m++;
42 }
43
44 /* reduce ratio to acceptable values */
45 while (((n % 5) == 0) && ((d % 5) == 0)) {
46 n /= 5;
47 d /= 5;
48 }
49
50 while (((n % 2) == 0) && ((d % 2) == 0)) {
51 n /= 2;
52 d /= 2;
53 }
54
55 while (n > 0xffff || d > 0xffff) {
56 n >>= 1;
57 d >>= 1;
58 }
59
60 nvkm_debug(subdev, "input frequency : %dHz\n", f);
61 nvkm_debug(subdev, "input multiplier: %d\n", m);
62 nvkm_debug(subdev, "numerator : %08x\n", n);
63 nvkm_debug(subdev, "denominator : %08x\n", d);
64 nvkm_debug(subdev, "timer frequency : %dHz\n", (f * m) * d / n);
65
66 nvkm_wr32(device, 0x009220, m - 1);
67 nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
68 nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
69}
70
71static const struct nvkm_timer_func
72nv41_timer = {
73 .init = nv41_timer_init,
74 .intr = nv04_timer_intr,
75 .read = nv04_timer_read,
76 .time = nv04_timer_time,
77 .alarm_init = nv04_timer_alarm_init,
78 .alarm_fini = nv04_timer_alarm_fini,
79};
80
81int
82nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
83{
84 return nvkm_timer_new_(&nv41_timer, device, index, ptmr);
85}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 08e29a3da188..f820ca2aeda4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,4 +1,26 @@
1#ifndef __NVKM_TIMER_PRIV_H__ 1#ifndef __NVKM_TIMER_PRIV_H__
2#define __NVKM_TIMER_PRIV_H__ 2#define __NVKM_TIMER_PRIV_H__
3#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
3#include <subdev/timer.h> 4#include <subdev/timer.h>
5
6int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *,
7 int index, struct nvkm_timer **);
8
9struct nvkm_timer_func {
10 void (*init)(struct nvkm_timer *);
11 void (*intr)(struct nvkm_timer *);
12 u64 (*read)(struct nvkm_timer *);
13 void (*time)(struct nvkm_timer *, u64 time);
14 void (*alarm_init)(struct nvkm_timer *, u32 time);
15 void (*alarm_fini)(struct nvkm_timer *);
16};
17
18void nvkm_timer_alarm_trigger(struct nvkm_timer *);
19
20void nv04_timer_fini(struct nvkm_timer *);
21void nv04_timer_intr(struct nvkm_timer *);
22void nv04_timer_time(struct nvkm_timer *, u64);
23u64 nv04_timer_read(struct nvkm_timer *);
24void nv04_timer_alarm_init(struct nvkm_timer *, u32);
25void nv04_timer_alarm_fini(struct nvkm_timer *);
4#endif 26#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
new file mode 100644
index 000000000000..10bef85b485e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
@@ -0,0 +1,7 @@
1#define NV04_PTIMER_INTR_0 0x009100
2#define NV04_PTIMER_INTR_EN_0 0x009140
3#define NV04_PTIMER_NUMERATOR 0x009200
4#define NV04_PTIMER_DENOMINATOR 0x009210
5#define NV04_PTIMER_TIME_0 0x009400
6#define NV04_PTIMER_TIME_1 0x009410
7#define NV04_PTIMER_ALARM_0 0x009420
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 39f15803f2d4..4752dbd33923 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -21,49 +21,45 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/volt.h> 24#include "priv.h"
25
25#include <subdev/bios.h> 26#include <subdev/bios.h>
26#include <subdev/bios/vmap.h> 27#include <subdev/bios/vmap.h>
27#include <subdev/bios/volt.h> 28#include <subdev/bios/volt.h>
28 29
29static int 30int
30nvkm_volt_get(struct nvkm_volt *volt) 31nvkm_volt_get(struct nvkm_volt *volt)
31{ 32{
32 if (volt->vid_get) { 33 int ret = volt->func->vid_get(volt), i;
33 int ret = volt->vid_get(volt), i; 34 if (ret >= 0) {
34 if (ret >= 0) { 35 for (i = 0; i < volt->vid_nr; i++) {
35 for (i = 0; i < volt->vid_nr; i++) { 36 if (volt->vid[i].vid == ret)
36 if (volt->vid[i].vid == ret) 37 return volt->vid[i].uv;
37 return volt->vid[i].uv;
38 }
39 ret = -EINVAL;
40 } 38 }
41 return ret; 39 ret = -EINVAL;
42 } 40 }
43 return -ENODEV; 41 return ret;
44} 42}
45 43
46static int 44static int
47nvkm_volt_set(struct nvkm_volt *volt, u32 uv) 45nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
48{ 46{
49 if (volt->vid_set) { 47 struct nvkm_subdev *subdev = &volt->subdev;
50 int i, ret = -EINVAL; 48 int i, ret = -EINVAL;
51 for (i = 0; i < volt->vid_nr; i++) { 49 for (i = 0; i < volt->vid_nr; i++) {
52 if (volt->vid[i].uv == uv) { 50 if (volt->vid[i].uv == uv) {
53 ret = volt->vid_set(volt, volt->vid[i].vid); 51 ret = volt->func->vid_set(volt, volt->vid[i].vid);
54 nv_debug(volt, "set %duv: %d\n", uv, ret); 52 nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
55 break; 53 break;
56 }
57 } 54 }
58 return ret;
59 } 55 }
60 return -ENODEV; 56 return ret;
61} 57}
62 58
63static int 59static int
64nvkm_volt_map(struct nvkm_volt *volt, u8 id) 60nvkm_volt_map(struct nvkm_volt *volt, u8 id)
65{ 61{
66 struct nvkm_bios *bios = nvkm_bios(volt); 62 struct nvkm_bios *bios = volt->subdev.device->bios;
67 struct nvbios_vmap_entry info; 63 struct nvbios_vmap_entry info;
68 u8 ver, len; 64 u8 ver, len;
69 u16 vmap; 65 u16 vmap;
@@ -82,10 +78,15 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
82 return id ? id * 10000 : -ENODEV; 78 return id ? id * 10000 : -ENODEV;
83} 79}
84 80
85static int 81int
86nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition) 82nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
87{ 83{
88 int ret = nvkm_volt_map(volt, id); 84 int ret;
85
86 if (volt->func->set_id)
87 return volt->func->set_id(volt, id, condition);
88
89 ret = nvkm_volt_map(volt, id);
89 if (ret >= 0) { 90 if (ret >= 0) {
90 int prev = nvkm_volt_get(volt); 91 int prev = nvkm_volt_get(volt);
91 if (!condition || prev < 0 || 92 if (!condition || prev < 0 ||
@@ -134,51 +135,41 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
134 } 135 }
135} 136}
136 137
137int 138static int
138_nvkm_volt_init(struct nvkm_object *object) 139nvkm_volt_init(struct nvkm_subdev *subdev)
139{ 140{
140 struct nvkm_volt *volt = (void *)object; 141 struct nvkm_volt *volt = nvkm_volt(subdev);
141 int ret; 142 int ret = nvkm_volt_get(volt);
142
143 ret = nvkm_subdev_init(&volt->base);
144 if (ret)
145 return ret;
146
147 ret = volt->get(volt);
148 if (ret < 0) { 143 if (ret < 0) {
149 if (ret != -ENODEV) 144 if (ret != -ENODEV)
150 nv_debug(volt, "current voltage unknown\n"); 145 nvkm_debug(subdev, "current voltage unknown\n");
151 return 0; 146 return 0;
152 } 147 }
153 148 nvkm_debug(subdev, "current voltage: %duv\n", ret);
154 nv_info(volt, "GPU voltage: %duv\n", ret);
155 return 0; 149 return 0;
156} 150}
157 151
158void 152static void *
159_nvkm_volt_dtor(struct nvkm_object *object) 153nvkm_volt_dtor(struct nvkm_subdev *subdev)
160{ 154{
161 struct nvkm_volt *volt = (void *)object; 155 return nvkm_volt(subdev);
162 nvkm_subdev_destroy(&volt->base);
163} 156}
164 157
165int 158static const struct nvkm_subdev_func
166nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine, 159nvkm_volt = {
167 struct nvkm_oclass *oclass, int length, void **pobject) 160 .dtor = nvkm_volt_dtor,
168{ 161 .init = nvkm_volt_init,
169 struct nvkm_bios *bios = nvkm_bios(parent); 162};
170 struct nvkm_volt *volt;
171 int ret, i;
172 163
173 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "VOLT", 164void
174 "voltage", length, pobject); 165nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
175 volt = *pobject; 166 int index, struct nvkm_volt *volt)
176 if (ret) 167{
177 return ret; 168 struct nvkm_bios *bios = device->bios;
169 int i;
178 170
179 volt->get = nvkm_volt_get; 171 nvkm_subdev_ctor(&nvkm_volt, device, index, 0, &volt->subdev);
180 volt->set = nvkm_volt_set; 172 volt->func = func;
181 volt->set_id = nvkm_volt_set_id;
182 173
183 /* Assuming the non-bios device should build the voltage table later */ 174 /* Assuming the non-bios device should build the voltage table later */
184 if (bios) 175 if (bios)
@@ -186,19 +177,18 @@ nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine,
186 177
187 if (volt->vid_nr) { 178 if (volt->vid_nr) {
188 for (i = 0; i < volt->vid_nr; i++) { 179 for (i = 0; i < volt->vid_nr; i++) {
189 nv_debug(volt, "VID %02x: %duv\n", 180 nvkm_debug(&volt->subdev, "VID %02x: %duv\n",
190 volt->vid[i].vid, volt->vid[i].uv); 181 volt->vid[i].vid, volt->vid[i].uv);
191 }
192
193 /*XXX: this is an assumption.. there probably exists boards
194 * out there with i2c-connected voltage controllers too..
195 */
196 ret = nvkm_voltgpio_init(volt);
197 if (ret == 0) {
198 volt->vid_get = nvkm_voltgpio_get;
199 volt->vid_set = nvkm_voltgpio_set;
200 } 182 }
201 } 183 }
184}
202 185
203 return ret; 186int
187nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
188 int index, struct nvkm_volt **pvolt)
189{
190 if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
191 return -ENOMEM;
192 nvkm_volt_ctor(func, device, index, *pvolt);
193 return 0;
204} 194}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index 871fd51011db..fd56c6476064 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -19,10 +19,10 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22#include <subdev/volt.h> 22#define gk20a_volt(p) container_of((p), struct gk20a_volt, base)
23#ifdef __KERNEL__ 23#include "priv.h"
24#include <nouveau_platform.h> 24
25#endif 25#include <core/tegra.h>
26 26
27struct cvb_coef { 27struct cvb_coef {
28 int c0; 28 int c0;
@@ -33,7 +33,7 @@ struct cvb_coef {
33 int c5; 33 int c5;
34}; 34};
35 35
36struct gk20a_volt_priv { 36struct gk20a_volt {
37 struct nvkm_volt base; 37 struct nvkm_volt base;
38 struct regulator *vdd; 38 struct regulator *vdd;
39}; 39};
@@ -101,43 +101,45 @@ gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
101} 101}
102 102
103static int 103static int
104gk20a_volt_vid_get(struct nvkm_volt *volt) 104gk20a_volt_vid_get(struct nvkm_volt *base)
105{ 105{
106 struct gk20a_volt_priv *priv = (void *)volt; 106 struct gk20a_volt *volt = gk20a_volt(base);
107 int i, uv; 107 int i, uv;
108 108
109 uv = regulator_get_voltage(priv->vdd); 109 uv = regulator_get_voltage(volt->vdd);
110 110
111 for (i = 0; i < volt->vid_nr; i++) 111 for (i = 0; i < volt->base.vid_nr; i++)
112 if (volt->vid[i].uv >= uv) 112 if (volt->base.vid[i].uv >= uv)
113 return i; 113 return i;
114 114
115 return -EINVAL; 115 return -EINVAL;
116} 116}
117 117
118static int 118static int
119gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid) 119gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
120{ 120{
121 struct gk20a_volt_priv *priv = (void *)volt; 121 struct gk20a_volt *volt = gk20a_volt(base);
122 struct nvkm_subdev *subdev = &volt->base.subdev;
122 123
123 nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv); 124 nvkm_debug(subdev, "set voltage as %duv\n", volt->base.vid[vid].uv);
124 return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000); 125 return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
125} 126}
126 127
127static int 128static int
128gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition) 129gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
129{ 130{
130 struct gk20a_volt_priv *priv = (void *)volt; 131 struct gk20a_volt *volt = gk20a_volt(base);
131 int prev_uv = regulator_get_voltage(priv->vdd); 132 struct nvkm_subdev *subdev = &volt->base.subdev;
132 int target_uv = volt->vid[id].uv; 133 int prev_uv = regulator_get_voltage(volt->vdd);
134 int target_uv = volt->base.vid[id].uv;
133 int ret; 135 int ret;
134 136
135 nv_debug(volt, "prev=%d, target=%d, condition=%d\n", 137 nvkm_debug(subdev, "prev=%d, target=%d, condition=%d\n",
136 prev_uv, target_uv, condition); 138 prev_uv, target_uv, condition);
137 if (!condition || 139 if (!condition ||
138 (condition < 0 && target_uv < prev_uv) || 140 (condition < 0 && target_uv < prev_uv) ||
139 (condition > 0 && target_uv > prev_uv)) { 141 (condition > 0 && target_uv > prev_uv)) {
140 ret = gk20a_volt_vid_set(volt, volt->vid[id].vid); 142 ret = gk20a_volt_vid_set(&volt->base, volt->base.vid[id].vid);
141 } else { 143 } else {
142 ret = 0; 144 ret = 0;
143 } 145 }
@@ -145,53 +147,42 @@ gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
145 return ret; 147 return ret;
146} 148}
147 149
148static int 150static const struct nvkm_volt_func
149gk20a_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 151gk20a_volt = {
150 struct nvkm_oclass *oclass, void *data, u32 size, 152 .vid_get = gk20a_volt_vid_get,
151 struct nvkm_object **pobject) 153 .vid_set = gk20a_volt_vid_set,
154 .set_id = gk20a_volt_set_id,
155};
156
157int
158gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
152{ 159{
153 struct gk20a_volt_priv *priv; 160 struct nvkm_device_tegra *tdev = device->func->tegra(device);
154 struct nvkm_volt *volt; 161 struct gk20a_volt *volt;
155 struct nouveau_platform_device *plat; 162 int i, uv;
156 int i, ret, uv; 163
157 164 if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
158 ret = nvkm_volt_create(parent, engine, oclass, &priv); 165 return -ENOMEM;
159 *pobject = nv_object(priv); 166
160 if (ret) 167 nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
161 return ret; 168 *pvolt = &volt->base;
162 169
163 volt = &priv->base; 170 uv = regulator_get_voltage(tdev->vdd);
164 171 nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv);
165 plat = nv_device_to_platform(nv_device(parent)); 172
166 173 volt->vdd = tdev->vdd;
167 uv = regulator_get_voltage(plat->gpu->vdd); 174
168 nv_info(priv, "The default voltage is %duV\n", uv); 175 volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
169 176 nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__,
170 priv->vdd = plat->gpu->vdd; 177 volt->base.vid_nr);
171 priv->base.vid_get = gk20a_volt_vid_get; 178 for (i = 0; i < volt->base.vid_nr; i++) {
172 priv->base.vid_set = gk20a_volt_vid_set; 179 volt->base.vid[i].vid = i;
173 priv->base.set_id = gk20a_volt_set_id; 180 volt->base.vid[i].uv =
174 181 gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
175 volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef); 182 tdev->gpu_speedo);
176 nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr); 183 nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
177 for (i = 0; i < volt->vid_nr; i++) { 184 volt->base.vid[i].vid, volt->base.vid[i].uv);
178 volt->vid[i].vid = i;
179 volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
180 plat->gpu_speedo);
181 nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
182 volt->vid[i].uv);
183 } 185 }
184 186
185 return 0; 187 return 0;
186} 188}
187
188struct nvkm_oclass
189gk20a_volt_oclass = {
190 .handle = NV_SUBDEV(VOLT, 0xea),
191 .ofuncs = &(struct nvkm_ofuncs) {
192 .ctor = gk20a_volt_ctor,
193 .dtor = _nvkm_volt_dtor,
194 .init = _nvkm_volt_init,
195 .fini = _nvkm_volt_fini,
196 },
197};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index b778deb32d93..d2bac1d77819 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -34,13 +34,13 @@ static const u8 tags[] = {
34int 34int
35nvkm_voltgpio_get(struct nvkm_volt *volt) 35nvkm_voltgpio_get(struct nvkm_volt *volt)
36{ 36{
37 struct nvkm_gpio *gpio = nvkm_gpio(volt); 37 struct nvkm_gpio *gpio = volt->subdev.device->gpio;
38 u8 vid = 0; 38 u8 vid = 0;
39 int i; 39 int i;
40 40
41 for (i = 0; i < ARRAY_SIZE(tags); i++) { 41 for (i = 0; i < ARRAY_SIZE(tags); i++) {
42 if (volt->vid_mask & (1 << i)) { 42 if (volt->vid_mask & (1 << i)) {
43 int ret = gpio->get(gpio, 0, tags[i], 0xff); 43 int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff);
44 if (ret < 0) 44 if (ret < 0)
45 return ret; 45 return ret;
46 vid |= ret << i; 46 vid |= ret << i;
@@ -53,12 +53,12 @@ nvkm_voltgpio_get(struct nvkm_volt *volt)
53int 53int
54nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid) 54nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
55{ 55{
56 struct nvkm_gpio *gpio = nvkm_gpio(volt); 56 struct nvkm_gpio *gpio = volt->subdev.device->gpio;
57 int i; 57 int i;
58 58
59 for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) { 59 for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
60 if (volt->vid_mask & (1 << i)) { 60 if (volt->vid_mask & (1 << i)) {
61 int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1); 61 int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1);
62 if (ret < 0) 62 if (ret < 0)
63 return ret; 63 return ret;
64 } 64 }
@@ -70,7 +70,8 @@ nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
70int 70int
71nvkm_voltgpio_init(struct nvkm_volt *volt) 71nvkm_voltgpio_init(struct nvkm_volt *volt)
72{ 72{
73 struct nvkm_gpio *gpio = nvkm_gpio(volt); 73 struct nvkm_subdev *subdev = &volt->subdev;
74 struct nvkm_gpio *gpio = subdev->device->gpio;
74 struct dcb_gpio_func func; 75 struct dcb_gpio_func func;
75 int i; 76 int i;
76 77
@@ -82,11 +83,11 @@ nvkm_voltgpio_init(struct nvkm_volt *volt)
82 */ 83 */
83 for (i = 0; i < ARRAY_SIZE(tags); i++) { 84 for (i = 0; i < ARRAY_SIZE(tags); i++) {
84 if (volt->vid_mask & (1 << i)) { 85 if (volt->vid_mask & (1 << i)) {
85 int ret = gpio->find(gpio, 0, tags[i], 0xff, &func); 86 int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func);
86 if (ret) { 87 if (ret) {
87 if (ret != -ENOENT) 88 if (ret != -ENOENT)
88 return ret; 89 return ret;
89 nv_debug(volt, "VID bit %d has no GPIO\n", i); 90 nvkm_debug(subdev, "VID bit %d has no GPIO\n", i);
90 volt->vid_mask &= ~(1 << i); 91 volt->vid_mask &= ~(1 << i);
91 } 92 }
92 } 93 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 0ac5a3f8c9a8..23409387abb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -21,35 +21,24 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include <subdev/volt.h> 24#include "priv.h"
25 25
26struct nv40_volt_priv { 26static const struct nvkm_volt_func
27 struct nvkm_volt base; 27nv40_volt = {
28 .vid_get = nvkm_voltgpio_get,
29 .vid_set = nvkm_voltgpio_set,
28}; 30};
29 31
30static int 32int
31nv40_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 33nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
32 struct nvkm_oclass *oclass, void *data, u32 size,
33 struct nvkm_object **pobject)
34{ 34{
35 struct nv40_volt_priv *priv; 35 struct nvkm_volt *volt;
36 int ret; 36 int ret;
37 37
38 ret = nvkm_volt_create(parent, engine, oclass, &priv); 38 ret = nvkm_volt_new_(&nv40_volt, device, index, &volt);
39 *pobject = nv_object(priv); 39 *pvolt = volt;
40 if (ret) 40 if (ret)
41 return ret; 41 return ret;
42 42
43 return 0; 43 return nvkm_voltgpio_init(volt);
44} 44}
45
46struct nvkm_oclass
47nv40_volt_oclass = {
48 .handle = NV_SUBDEV(VOLT, 0x40),
49 .ofuncs = &(struct nvkm_ofuncs) {
50 .ctor = nv40_volt_ctor,
51 .dtor = _nvkm_volt_dtor,
52 .init = _nvkm_volt_init,
53 .fini = _nvkm_volt_fini,
54 },
55};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
new file mode 100644
index 000000000000..394f37c723af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -0,0 +1,20 @@
1#ifndef __NVKM_VOLT_PRIV_H__
2#define __NVKM_VOLT_PRIV_H__
3#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
4#include <subdev/volt.h>
5
6void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *,
7 int index, struct nvkm_volt *);
8int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
9 int index, struct nvkm_volt **);
10
11struct nvkm_volt_func {
12 int (*vid_get)(struct nvkm_volt *);
13 int (*vid_set)(struct nvkm_volt *, u8 vid);
14 int (*set_id)(struct nvkm_volt *, u8 id, int condition);
15};
16
17int nvkm_voltgpio_init(struct nvkm_volt *);
18int nvkm_voltgpio_get(struct nvkm_volt *);
19int nvkm_voltgpio_set(struct nvkm_volt *, u8);
20#endif