aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild51
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h222
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base.c53
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base.h31
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c286
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c71
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c110
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base917c.c48
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c70
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h50
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c115
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core827d.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c)35
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core907d.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c)34
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core917d.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c)34
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c110
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c52
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c145
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs907a.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c)22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/cursc37a.c50
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac507d.c44
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac907d.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c)30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2238
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h89
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c511
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h78
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c325
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c124
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c284
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c100
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c212
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.c95
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.h15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/oimm.c51
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/oimm.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/oimm507b.c52
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly.c57
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly.h30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c217
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c107
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c70
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly917e.c45
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c44
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c44
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c)31
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c)30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c47
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c86
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c641
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h96
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c278
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h47
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/clc37b.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/clc37e.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/disp.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/fifo.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mem.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h16
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/user.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c90
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4558
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvif/fifo.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvif/userc361.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c)21
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/changv100.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c)24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c146
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c204
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c179
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c427
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c105
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c175
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c120
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c373
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c155
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c225
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c306
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c423
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c114
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c120
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c417
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c143
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c175
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c120
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c179
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c206
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c3
322 files changed, 15430 insertions, 7999 deletions
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 9c0c650655e9..b17843dd050d 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -38,14 +38,16 @@ nouveau-y += nouveau_vmm.o
38 38
39# DRM - modesetting 39# DRM - modesetting
40nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o 40nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
41nouveau-y += nouveau_bios.o
41nouveau-y += nouveau_connector.o 42nouveau-y += nouveau_connector.o
42nouveau-y += nouveau_display.o 43nouveau-y += nouveau_display.o
43nouveau-y += nv50_display.o
44nouveau-y += nouveau_dp.o 44nouveau-y += nouveau_dp.o
45nouveau-y += nouveau_fbcon.o 45nouveau-y += nouveau_fbcon.o
46nouveau-y += nv04_fbcon.o 46nouveau-y += nv04_fbcon.o
47nouveau-y += nv50_fbcon.o 47nouveau-y += nv50_fbcon.o
48nouveau-y += nvc0_fbcon.o 48nouveau-y += nvc0_fbcon.o
49include $(src)/dispnv04/Kbuild
50include $(src)/dispnv50/Kbuild
49 51
50# DRM - command submission 52# DRM - command submission
51nouveau-y += nouveau_abi16.o 53nouveau-y += nouveau_abi16.o
@@ -59,8 +61,4 @@ nouveau-y += nv50_fence.o
59nouveau-y += nv84_fence.o 61nouveau-y += nv84_fence.o
60nouveau-y += nvc0_fence.o 62nouveau-y += nvc0_fence.o
61 63
62# DRM - prehistoric modesetting (NV04-G7x)
63nouveau-y += nouveau_bios.o
64include $(src)/dispnv04/Kbuild
65
66obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o 64obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
new file mode 100644
index 000000000000..849b0f45afb8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -0,0 +1,51 @@
1nouveau-y += dispnv50/disp.o
2nouveau-y += dispnv50/lut.o
3
4nouveau-y += dispnv50/core.o
5nouveau-y += dispnv50/core507d.o
6nouveau-y += dispnv50/core827d.o
7nouveau-y += dispnv50/core907d.o
8nouveau-y += dispnv50/core917d.o
9nouveau-y += dispnv50/corec37d.o
10
11nouveau-y += dispnv50/dac507d.o
12nouveau-y += dispnv50/dac907d.o
13
14nouveau-y += dispnv50/pior507d.o
15
16nouveau-y += dispnv50/sor507d.o
17nouveau-y += dispnv50/sor907d.o
18nouveau-y += dispnv50/sorc37d.o
19
20nouveau-y += dispnv50/head.o
21nouveau-y += dispnv50/head507d.o
22nouveau-y += dispnv50/head827d.o
23nouveau-y += dispnv50/head907d.o
24nouveau-y += dispnv50/head917d.o
25nouveau-y += dispnv50/headc37d.o
26
27nouveau-y += dispnv50/wimm.o
28nouveau-y += dispnv50/wimmc37b.o
29
30nouveau-y += dispnv50/wndw.o
31nouveau-y += dispnv50/wndwc37e.o
32
33nouveau-y += dispnv50/base.o
34nouveau-y += dispnv50/base507c.o
35nouveau-y += dispnv50/base827c.o
36nouveau-y += dispnv50/base907c.o
37nouveau-y += dispnv50/base917c.o
38
39nouveau-y += dispnv50/curs.o
40nouveau-y += dispnv50/curs507a.o
41nouveau-y += dispnv50/curs907a.o
42nouveau-y += dispnv50/cursc37a.o
43
44nouveau-y += dispnv50/oimm.o
45nouveau-y += dispnv50/oimm507b.o
46
47nouveau-y += dispnv50/ovly.o
48nouveau-y += dispnv50/ovly507e.o
49nouveau-y += dispnv50/ovly827e.o
50nouveau-y += dispnv50/ovly907e.o
51nouveau-y += dispnv50/ovly917e.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
new file mode 100644
index 000000000000..908feb1fc60f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -0,0 +1,222 @@
1#ifndef __NV50_KMS_ATOM_H__
2#define __NV50_KMS_ATOM_H__
3#define nv50_atom(p) container_of((p), struct nv50_atom, state)
4#include <drm/drm_atomic.h>
5
6struct nv50_atom {
7 struct drm_atomic_state state;
8
9 struct list_head outp;
10 bool lock_core;
11 bool flush_disable;
12};
13
14#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
15
16struct nv50_head_atom {
17 struct drm_crtc_state state;
18
19 struct {
20 u32 mask;
21 u32 olut;
22 } wndw;
23
24 struct {
25 u16 iW;
26 u16 iH;
27 u16 oW;
28 u16 oH;
29 } view;
30
31 struct nv50_head_mode {
32 bool interlace;
33 u32 clock;
34 struct {
35 u16 active;
36 u16 synce;
37 u16 blanke;
38 u16 blanks;
39 } h;
40 struct {
41 u32 active;
42 u16 synce;
43 u16 blanke;
44 u16 blanks;
45 u16 blank2s;
46 u16 blank2e;
47 u16 blankus;
48 } v;
49 } mode;
50
51 struct {
52 bool visible;
53 u32 handle;
54 u64 offset:40;
55 u8 buffer:1;
56 u8 mode:4;
57 u8 size:2;
58 u8 range:2;
59 u8 output_mode:2;
60 } olut;
61
62 struct {
63 bool visible;
64 u32 handle;
65 u64 offset:40;
66 u8 format;
67 u8 kind:7;
68 u8 layout:1;
69 u8 blockh:4;
70 u16 blocks:12;
71 u32 pitch:20;
72 u16 x;
73 u16 y;
74 u16 w;
75 u16 h;
76 } core;
77
78 struct {
79 bool visible;
80 u32 handle;
81 u64 offset:40;
82 u8 layout:2;
83 u8 format:8;
84 } curs;
85
86 struct {
87 u8 depth;
88 u8 cpp;
89 u16 x;
90 u16 y;
91 u16 w;
92 u16 h;
93 } base;
94
95 struct {
96 u8 cpp;
97 } ovly;
98
99 struct {
100 bool enable:1;
101 u8 bits:2;
102 u8 mode:4;
103 } dither;
104
105 struct {
106 struct {
107 u16 cos:12;
108 u16 sin:12;
109 } sat;
110 } procamp;
111
112 struct {
113 u8 nhsync:1;
114 u8 nvsync:1;
115 u8 depth:4;
116 } or;
117
118 union nv50_head_atom_mask {
119 struct {
120 bool olut:1;
121 bool core:1;
122 bool curs:1;
123 bool view:1;
124 bool mode:1;
125 bool base:1;
126 bool ovly:1;
127 bool dither:1;
128 bool procamp:1;
129 bool or:1;
130 };
131 u16 mask;
132 } set, clr;
133};
134
135static inline struct nv50_head_atom *
136nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
137{
138 struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
139 if (IS_ERR(statec))
140 return (void *)statec;
141 return nv50_head_atom(statec);
142}
143
144#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
145
146struct nv50_wndw_atom {
147 struct drm_plane_state state;
148
149 struct drm_property_blob *ilut;
150 bool visible;
151
152 struct {
153 u32 handle;
154 u16 offset:12;
155 bool awaken:1;
156 } ntfy;
157
158 struct {
159 u32 handle;
160 u16 offset:12;
161 u32 acquire;
162 u32 release;
163 } sema;
164
165 struct {
166 u32 handle;
167 struct {
168 u64 offset:40;
169 u8 buffer:1;
170 u8 enable:2;
171 u8 mode:4;
172 u8 size:2;
173 u8 range:2;
174 u8 output_mode:2;
175 } i;
176 } xlut;
177
178 struct {
179 u8 mode:2;
180 u8 interval:4;
181
182 u8 colorspace:2;
183 u8 format;
184 u8 kind:7;
185 u8 layout:1;
186 u8 blockh:4;
187 u16 blocks[3];
188 u32 pitch[3];
189 u16 w;
190 u16 h;
191
192 u32 handle[6];
193 u64 offset[6];
194 } image;
195
196 struct {
197 u16 sx;
198 u16 sy;
199 u16 sw;
200 u16 sh;
201 u16 dw;
202 u16 dh;
203 } scale;
204
205 struct {
206 u16 x;
207 u16 y;
208 } point;
209
210 union nv50_wndw_atom_mask {
211 struct {
212 bool ntfy:1;
213 bool sema:1;
214 bool xlut:1;
215 bool image:1;
216 bool scale:1;
217 bool point:1;
218 };
219 u8 mask;
220 } set, clr;
221};
222#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.c b/drivers/gpu/drm/nouveau/dispnv50/base.c
new file mode 100644
index 000000000000..7c752acf2b48
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "base.h"
23
24#include <nvif/class.h>
25
26int
27nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
28{
29 struct {
30 s32 oclass;
31 int version;
32 int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
33 } bases[] = {
34 { GK110_DISP_BASE_CHANNEL_DMA, 0, base917c_new },
35 { GK104_DISP_BASE_CHANNEL_DMA, 0, base917c_new },
36 { GF110_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
37 { GT214_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
38 { GT200_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
39 { G82_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
40 { NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
41 {}
42 };
43 struct nv50_disp *disp = nv50_disp(drm->dev);
44 int cid;
45
46 cid = nvif_mclass(&disp->disp->object, bases);
47 if (cid < 0) {
48 NV_ERROR(drm, "No supported base class\n");
49 return cid;
50 }
51
52 return bases[cid].new(drm, head, bases[cid].oclass, pwndw);
53}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
new file mode 100644
index 000000000000..e7f14f230f35
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -0,0 +1,31 @@
1#ifndef __NV50_KMS_BASE_H__
2#define __NV50_KMS_BASE_H__
3#include "wndw.h"
4
5int base507c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
6int base507c_new_(const struct nv50_wndw_func *, const u32 *format,
7 struct nouveau_drm *, int head, s32 oclass,
8 u32 interlock_data, struct nv50_wndw **);
9extern const u32 base507c_format[];
10int base507c_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
11 struct nv50_head_atom *);
12void base507c_release(struct nv50_wndw *, struct nv50_wndw_atom *,
13 struct nv50_head_atom *);
14void base507c_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
15void base507c_sema_clr(struct nv50_wndw *);
16void base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
17void base507c_ntfy_clr(struct nv50_wndw *);
18void base507c_xlut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
19void base507c_xlut_clr(struct nv50_wndw *);
20void base507c_image_clr(struct nv50_wndw *);
21void base507c_update(struct nv50_wndw *, u32 *);
22
23int base827c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
24
25int base907c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
26extern const struct nv50_wndw_func base907c;
27
28int base917c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
29
30int nv50_base_new(struct nouveau_drm *, int head, struct nv50_wndw **);
31#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
new file mode 100644
index 000000000000..d5e295ca2caa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -0,0 +1,286 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "base.h"
23
24#include <nvif/cl507c.h>
25#include <nvif/event.h>
26
27#include <drm/drm_atomic_helper.h>
28#include <drm/drm_plane_helper.h>
29#include "nouveau_bo.h"
30
31void
32base507c_update(struct nv50_wndw *wndw, u32 *interlock)
33{
34 u32 *push;
35 if ((push = evo_wait(&wndw->wndw, 2))) {
36 evo_mthd(push, 0x0080, 1);
37 evo_data(push, interlock[NV50_DISP_INTERLOCK_CORE]);
38 evo_kick(push, &wndw->wndw);
39 }
40}
41
42void
43base507c_image_clr(struct nv50_wndw *wndw)
44{
45 u32 *push;
46 if ((push = evo_wait(&wndw->wndw, 4))) {
47 evo_mthd(push, 0x0084, 1);
48 evo_data(push, 0x00000000);
49 evo_mthd(push, 0x00c0, 1);
50 evo_data(push, 0x00000000);
51 evo_kick(push, &wndw->wndw);
52 }
53}
54
55static void
56base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
57{
58 u32 *push;
59 if ((push = evo_wait(&wndw->wndw, 10))) {
60 evo_mthd(push, 0x0084, 1);
61 evo_data(push, asyw->image.mode << 8 |
62 asyw->image.interval << 4);
63 evo_mthd(push, 0x00c0, 1);
64 evo_data(push, asyw->image.handle[0]);
65 evo_mthd(push, 0x0800, 5);
66 evo_data(push, asyw->image.offset[0] >> 8);
67 evo_data(push, 0x00000000);
68 evo_data(push, asyw->image.h << 16 | asyw->image.w);
69 evo_data(push, asyw->image.layout << 20 |
70 (asyw->image.pitch[0] >> 8) << 8 |
71 asyw->image.blocks[0] << 8 |
72 asyw->image.blockh);
73 evo_data(push, asyw->image.kind << 16 |
74 asyw->image.format << 8);
75 evo_kick(push, &wndw->wndw);
76 }
77}
78
79void
80base507c_xlut_clr(struct nv50_wndw *wndw)
81{
82 u32 *push;
83 if ((push = evo_wait(&wndw->wndw, 2))) {
84 evo_mthd(push, 0x00e0, 1);
85 evo_data(push, 0x00000000);
86 evo_kick(push, &wndw->wndw);
87 }
88}
89
90void
91base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
92{
93 u32 *push;
94 if ((push = evo_wait(&wndw->wndw, 2))) {
95 evo_mthd(push, 0x00e0, 1);
96 evo_data(push, 0x40000000);
97 evo_kick(push, &wndw->wndw);
98 }
99}
100
101int
102base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
103 struct nvif_device *device)
104{
105 s64 time = nvif_msec(device, 2000ULL,
106 u32 data = nouveau_bo_rd32(bo, offset / 4);
107 if ((data & 0xc0000000) == 0x40000000)
108 break;
109 usleep_range(1, 2);
110 );
111 return time < 0 ? time : 0;
112}
113
114void
115base507c_ntfy_clr(struct nv50_wndw *wndw)
116{
117 u32 *push;
118 if ((push = evo_wait(&wndw->wndw, 2))) {
119 evo_mthd(push, 0x00a4, 1);
120 evo_data(push, 0x00000000);
121 evo_kick(push, &wndw->wndw);
122 }
123}
124
125void
126base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
127{
128 u32 *push;
129 if ((push = evo_wait(&wndw->wndw, 3))) {
130 evo_mthd(push, 0x00a0, 2);
131 evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset);
132 evo_data(push, asyw->ntfy.handle);
133 evo_kick(push, &wndw->wndw);
134 }
135}
136
137void
138base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
139{
140 nouveau_bo_wr32(bo, offset / 4, 0x00000000);
141}
142
143void
144base507c_sema_clr(struct nv50_wndw *wndw)
145{
146 u32 *push;
147 if ((push = evo_wait(&wndw->wndw, 2))) {
148 evo_mthd(push, 0x0094, 1);
149 evo_data(push, 0x00000000);
150 evo_kick(push, &wndw->wndw);
151 }
152}
153
154void
155base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
156{
157 u32 *push;
158 if ((push = evo_wait(&wndw->wndw, 5))) {
159 evo_mthd(push, 0x0088, 4);
160 evo_data(push, asyw->sema.offset);
161 evo_data(push, asyw->sema.acquire);
162 evo_data(push, asyw->sema.release);
163 evo_data(push, asyw->sema.handle);
164 evo_kick(push, &wndw->wndw);
165 }
166}
167
168void
169base507c_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
170 struct nv50_head_atom *asyh)
171{
172 asyh->base.cpp = 0;
173}
174
175int
176base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
177 struct nv50_head_atom *asyh)
178{
179 const struct drm_framebuffer *fb = asyw->state.fb;
180 int ret;
181
182 if (!fb->format->depth)
183 return -EINVAL;
184
185 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
186 DRM_PLANE_HELPER_NO_SCALING,
187 DRM_PLANE_HELPER_NO_SCALING,
188 false, true);
189 if (ret)
190 return ret;
191
192 if (!wndw->func->ilut) {
193 if ((asyh->base.cpp != 1) ^ (fb->format->cpp[0] != 1))
194 asyh->state.color_mgmt_changed = true;
195 }
196
197 asyh->base.depth = fb->format->depth;
198 asyh->base.cpp = fb->format->cpp[0];
199 asyh->base.x = asyw->state.src.x1 >> 16;
200 asyh->base.y = asyw->state.src.y1 >> 16;
201 asyh->base.w = asyw->state.fb->width;
202 asyh->base.h = asyw->state.fb->height;
203 return 0;
204}
205
206const u32
207base507c_format[] = {
208 DRM_FORMAT_C8,
209 DRM_FORMAT_RGB565,
210 DRM_FORMAT_XRGB1555,
211 DRM_FORMAT_ARGB1555,
212 DRM_FORMAT_XRGB8888,
213 DRM_FORMAT_ARGB8888,
214 DRM_FORMAT_XBGR2101010,
215 DRM_FORMAT_ABGR2101010,
216 DRM_FORMAT_XBGR8888,
217 DRM_FORMAT_ABGR8888,
218 0
219};
220
221static const struct nv50_wndw_func
222base507c = {
223 .acquire = base507c_acquire,
224 .release = base507c_release,
225 .sema_set = base507c_sema_set,
226 .sema_clr = base507c_sema_clr,
227 .ntfy_reset = base507c_ntfy_reset,
228 .ntfy_set = base507c_ntfy_set,
229 .ntfy_clr = base507c_ntfy_clr,
230 .ntfy_wait_begun = base507c_ntfy_wait_begun,
231 .olut_core = 1,
232 .xlut_set = base507c_xlut_set,
233 .xlut_clr = base507c_xlut_clr,
234 .image_set = base507c_image_set,
235 .image_clr = base507c_image_clr,
236 .update = base507c_update,
237};
238
239int
240base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
241 struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data,
242 struct nv50_wndw **pwndw)
243{
244 struct nv50_disp_base_channel_dma_v0 args = {
245 .head = head,
246 };
247 struct nv50_disp *disp = nv50_disp(drm->dev);
248 struct nv50_wndw *wndw;
249 int ret;
250
251 ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
252 "base", head, format, BIT(head),
253 NV50_DISP_INTERLOCK_BASE, interlock_data, &wndw);
254 if (*pwndw = wndw, ret)
255 return ret;
256
257 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
258 &oclass, head, &args, sizeof(args),
259 disp->sync->bo.offset, &wndw->wndw);
260 if (ret) {
261 NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
262 return ret;
263 }
264
265 ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func,
266 false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
267 &(struct nvif_notify_uevent_req) {},
268 sizeof(struct nvif_notify_uevent_req),
269 sizeof(struct nvif_notify_uevent_rep),
270 &wndw->notify);
271 if (ret)
272 return ret;
273
274 wndw->ntfy = NV50_DISP_BASE_NTFY(wndw->id);
275 wndw->sema = NV50_DISP_BASE_SEM0(wndw->id);
276 wndw->data = 0x00000000;
277 return 0;
278}
279
280int
281base507c_new(struct nouveau_drm *drm, int head, s32 oclass,
282 struct nv50_wndw **pwndw)
283{
284 return base507c_new_(&base507c, base507c_format, drm, head, oclass,
285 0x00000002 << (head * 8), pwndw);
286}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
new file mode 100644
index 000000000000..73646819a0d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "base.h"
23
24static void
25base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
26{
27 u32 *push;
28 if ((push = evo_wait(&wndw->wndw, 10))) {
29 evo_mthd(push, 0x0084, 1);
30 evo_data(push, asyw->image.mode << 8 |
31 asyw->image.interval << 4);
32 evo_mthd(push, 0x00c0, 1);
33 evo_data(push, asyw->image.handle[0]);
34 evo_mthd(push, 0x0800, 5);
35 evo_data(push, asyw->image.offset[0] >> 8);
36 evo_data(push, 0x00000000);
37 evo_data(push, asyw->image.h << 16 | asyw->image.w);
38 evo_data(push, asyw->image.layout << 20 |
39 (asyw->image.pitch[0] >> 8) << 8 |
40 asyw->image.blocks[0] << 8 |
41 asyw->image.blockh);
42 evo_data(push, asyw->image.format << 8);
43 evo_kick(push, &wndw->wndw);
44 }
45}
46
47static const struct nv50_wndw_func
48base827c = {
49 .acquire = base507c_acquire,
50 .release = base507c_release,
51 .sema_set = base507c_sema_set,
52 .sema_clr = base507c_sema_clr,
53 .ntfy_reset = base507c_ntfy_reset,
54 .ntfy_set = base507c_ntfy_set,
55 .ntfy_clr = base507c_ntfy_clr,
56 .ntfy_wait_begun = base507c_ntfy_wait_begun,
57 .olut_core = 1,
58 .xlut_set = base507c_xlut_set,
59 .xlut_clr = base507c_xlut_clr,
60 .image_set = base827c_image_set,
61 .image_clr = base507c_image_clr,
62 .update = base507c_update,
63};
64
65int
66base827c_new(struct nouveau_drm *drm, int head, s32 oclass,
67 struct nv50_wndw **pwndw)
68{
69 return base507c_new_(&base827c, base507c_format, drm, head, oclass,
70 0x00000002 << (head * 8), pwndw);
71}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
new file mode 100644
index 000000000000..a562fc94ce59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "base.h"
23
24static void
25base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
26{
27 u32 *push;
28 if ((push = evo_wait(&wndw->wndw, 10))) {
29 evo_mthd(push, 0x0084, 1);
30 evo_data(push, asyw->image.mode << 8 |
31 asyw->image.interval << 4);
32 evo_mthd(push, 0x00c0, 1);
33 evo_data(push, asyw->image.handle[0]);
34 evo_mthd(push, 0x0400, 5);
35 evo_data(push, asyw->image.offset[0] >> 8);
36 evo_data(push, 0x00000000);
37 evo_data(push, asyw->image.h << 16 | asyw->image.w);
38 evo_data(push, asyw->image.layout << 24 |
39 (asyw->image.pitch[0] >> 8) << 8 |
40 asyw->image.blocks[0] << 8 |
41 asyw->image.blockh);
42 evo_data(push, asyw->image.format << 8);
43 evo_kick(push, &wndw->wndw);
44 }
45}
46
47static void
48base907c_xlut_clr(struct nv50_wndw *wndw)
49{
50 u32 *push;
51 if ((push = evo_wait(&wndw->wndw, 6))) {
52 evo_mthd(push, 0x00e0, 1);
53 evo_data(push, 0x00000000);
54 evo_mthd(push, 0x00e8, 1);
55 evo_data(push, 0x00000000);
56 evo_mthd(push, 0x00fc, 1);
57 evo_data(push, 0x00000000);
58 evo_kick(push, &wndw->wndw);
59 }
60}
61
62static void
63base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
64{
65 u32 *push;
66 if ((push = evo_wait(&wndw->wndw, 6))) {
67 evo_mthd(push, 0x00e0, 3);
68 evo_data(push, asyw->xlut.i.enable << 30 |
69 asyw->xlut.i.mode << 24);
70 evo_data(push, asyw->xlut.i.offset >> 8);
71 evo_data(push, 0x40000000);
72 evo_mthd(push, 0x00fc, 1);
73 evo_data(push, asyw->xlut.handle);
74 evo_kick(push, &wndw->wndw);
75 }
76}
77
78static void
79base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
80{
81 asyw->xlut.i.mode = 7;
82 asyw->xlut.i.enable = 2;
83}
84
85const struct nv50_wndw_func
86base907c = {
87 .acquire = base507c_acquire,
88 .release = base507c_release,
89 .sema_set = base507c_sema_set,
90 .sema_clr = base507c_sema_clr,
91 .ntfy_reset = base507c_ntfy_reset,
92 .ntfy_set = base507c_ntfy_set,
93 .ntfy_clr = base507c_ntfy_clr,
94 .ntfy_wait_begun = base507c_ntfy_wait_begun,
95 .ilut = base907c_ilut,
96 .olut_core = true,
97 .xlut_set = base907c_xlut_set,
98 .xlut_clr = base907c_xlut_clr,
99 .image_set = base907c_image_set,
100 .image_clr = base507c_image_clr,
101 .update = base507c_update,
102};
103
104int
105base907c_new(struct nouveau_drm *drm, int head, s32 oclass,
106 struct nv50_wndw **pwndw)
107{
108 return base507c_new_(&base907c, base507c_format, drm, head, oclass,
109 0x00000002 << (head * 4), pwndw);
110}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base917c.c b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
new file mode 100644
index 000000000000..54d705bb81a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "base.h"
23#include "atom.h"
24
25const u32
26base917c_format[] = {
27 DRM_FORMAT_C8,
28 DRM_FORMAT_XRGB8888,
29 DRM_FORMAT_ARGB8888,
30 DRM_FORMAT_RGB565,
31 DRM_FORMAT_XRGB1555,
32 DRM_FORMAT_ARGB1555,
33 DRM_FORMAT_XBGR2101010,
34 DRM_FORMAT_ABGR2101010,
35 DRM_FORMAT_XBGR8888,
36 DRM_FORMAT_ABGR8888,
37 DRM_FORMAT_XRGB2101010,
38 DRM_FORMAT_ARGB2101010,
39 0
40};
41
42int
43base917c_new(struct nouveau_drm *drm, int head, s32 oclass,
44 struct nv50_wndw **pwndw)
45{
46 return base507c_new_(&base907c, base917c_format, drm, head, oclass,
47 0x00000002 << (head * 4), pwndw);
48}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
new file mode 100644
index 000000000000..f3c49adb1bdb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "core.h"
23
24#include <nvif/class.h>
25
26void
27nv50_core_del(struct nv50_core **pcore)
28{
29 struct nv50_core *core = *pcore;
30 if (core) {
31 nv50_dmac_destroy(&core->chan);
32 kfree(*pcore);
33 *pcore = NULL;
34 }
35}
36
37int
38nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
39{
40 struct {
41 s32 oclass;
42 int version;
43 int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
44 } cores[] = {
45 { GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
46 { GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
47 { GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
48 { GM200_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
49 { GM107_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
50 { GK110_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
51 { GK104_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
52 { GF110_DISP_CORE_CHANNEL_DMA, 0, core907d_new },
53 { GT214_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
54 { GT206_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
55 { GT200_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
56 { G82_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
57 { NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
58 {}
59 };
60 struct nv50_disp *disp = nv50_disp(drm->dev);
61 int cid;
62
63 cid = nvif_mclass(&disp->disp->object, cores);
64 if (cid < 0) {
65 NV_ERROR(drm, "No supported core channel class\n");
66 return cid;
67 }
68
69 return cores[cid].new(drm, cores[cid].oclass, pcore);
70}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
new file mode 100644
index 000000000000..8470df9dd13d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -0,0 +1,50 @@
1#ifndef __NV50_KMS_CORE_H__
2#define __NV50_KMS_CORE_H__
3#include "disp.h"
4#include "atom.h"
5
6struct nv50_core {
7 const struct nv50_core_func *func;
8 struct nv50_dmac chan;
9};
10
11int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
12void nv50_core_del(struct nv50_core **);
13
14struct nv50_core_func {
15 void (*init)(struct nv50_core *);
16 void (*ntfy_init)(struct nouveau_bo *, u32 offset);
17 int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
18 struct nvif_device *);
19 void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
20
21 const struct nv50_head_func *head;
22 const struct nv50_outp_func {
23 void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
24 struct nv50_head_atom *);
25 } *dac, *pior, *sor;
26};
27
28int core507d_new(struct nouveau_drm *, s32, struct nv50_core **);
29int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
30 struct nv50_core **);
31void core507d_init(struct nv50_core *);
32void core507d_ntfy_init(struct nouveau_bo *, u32);
33int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
34void core507d_update(struct nv50_core *, u32 *, bool);
35
36extern const struct nv50_outp_func dac507d;
37extern const struct nv50_outp_func sor507d;
38extern const struct nv50_outp_func pior507d;
39
40int core827d_new(struct nouveau_drm *, s32, struct nv50_core **);
41
42int core907d_new(struct nouveau_drm *, s32, struct nv50_core **);
43extern const struct nv50_outp_func dac907d;
44extern const struct nv50_outp_func sor907d;
45
46int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
47
48int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
49extern const struct nv50_outp_func sorc37d;
50#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
new file mode 100644
index 000000000000..e7fcfa6e6467
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "core.h"
23#include "head.h"
24
25#include <nvif/cl507d.h>
26
27#include "nouveau_bo.h"
28
29void
30core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
31{
32 u32 *push;
33 if ((push = evo_wait(&core->chan, 5))) {
34 if (ntfy) {
35 evo_mthd(push, 0x0084, 1);
36 evo_data(push, 0x80000000 | NV50_DISP_CORE_NTFY);
37 }
38 evo_mthd(push, 0x0080, 2);
39 evo_data(push, interlock[NV50_DISP_INTERLOCK_BASE] |
40 interlock[NV50_DISP_INTERLOCK_OVLY]);
41 evo_data(push, 0x00000000);
42 evo_kick(push, &core->chan);
43 }
44}
45
46int
47core507d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
48 struct nvif_device *device)
49{
50 s64 time = nvif_msec(device, 2000ULL,
51 if (nouveau_bo_rd32(bo, offset / 4))
52 break;
53 usleep_range(1, 2);
54 );
55 return time < 0 ? time : 0;
56}
57
58void
59core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
60{
61 nouveau_bo_wr32(bo, offset / 4, 0x00000000);
62}
63
64void
65core507d_init(struct nv50_core *core)
66{
67 u32 *push;
68 if ((push = evo_wait(&core->chan, 2))) {
69 evo_mthd(push, 0x0088, 1);
70 evo_data(push, core->chan.sync.handle);
71 evo_kick(push, &core->chan);
72 }
73}
74
75static const struct nv50_core_func
76core507d = {
77 .init = core507d_init,
78 .ntfy_init = core507d_ntfy_init,
79 .ntfy_wait_done = core507d_ntfy_wait_done,
80 .update = core507d_update,
81 .head = &head507d,
82 .dac = &dac507d,
83 .sor = &sor507d,
84 .pior = &pior507d,
85};
86
87int
88core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
89 s32 oclass, struct nv50_core **pcore)
90{
91 struct nv50_disp_core_channel_dma_v0 args = {};
92 struct nv50_disp *disp = nv50_disp(drm->dev);
93 struct nv50_core *core;
94 int ret;
95
96 if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
97 return -ENOMEM;
98 core->func = func;
99
100 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
101 &oclass, 0, &args, sizeof(args),
102 disp->sync->bo.offset, &core->chan);
103 if (ret) {
104 NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret);
105 return ret;
106 }
107
108 return 0;
109}
110
111int
112core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
113{
114 return core507d_new_(&core507d, drm, oclass, pcore);
115}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
index 93451e46570c..6123a068f836 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,21 +18,24 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 21 */
24#include "dmacnv50.h" 22#include "core.h"
25#include "rootnv50.h" 23#include "head.h"
26
27#include <nvif/class.h>
28 24
29const struct nv50_disp_dmac_oclass 25static const struct nv50_core_func
30gt200_disp_base_oclass = { 26core827d = {
31 .base.oclass = GT200_DISP_BASE_CHANNEL_DMA, 27 .init = core507d_init,
32 .base.minver = 0, 28 .ntfy_init = core507d_ntfy_init,
33 .base.maxver = 0, 29 .ntfy_wait_done = core507d_ntfy_wait_done,
34 .ctor = nv50_disp_base_new, 30 .update = core507d_update,
35 .func = &nv50_disp_dmac_func, 31 .head = &head827d,
36 .mthd = &g84_disp_base_chan_mthd, 32 .dac = &dac507d,
37 .chid = 1, 33 .sor = &sor507d,
34 .pior = &pior507d,
38}; 35};
36
37int
38core827d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
39{
40 return core507d_new_(&core827d, drm, oclass, pcore);
41}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
index 780a1d973634..ef822f813435 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,21 +18,23 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 21 */
24#include "dmacnv50.h" 22#include "core.h"
25#include "rootnv50.h" 23#include "head.h"
26
27#include <nvif/class.h>
28 24
29const struct nv50_disp_dmac_oclass 25static const struct nv50_core_func
30gk104_disp_base_oclass = { 26core907d = {
31 .base.oclass = GK104_DISP_BASE_CHANNEL_DMA, 27 .init = core507d_init,
32 .base.minver = 0, 28 .ntfy_init = core507d_ntfy_init,
33 .base.maxver = 0, 29 .ntfy_wait_done = core507d_ntfy_wait_done,
34 .ctor = nv50_disp_base_new, 30 .update = core507d_update,
35 .func = &gf119_disp_dmac_func, 31 .head = &head907d,
36 .mthd = &gf119_disp_base_chan_mthd, 32 .dac = &dac907d,
37 .chid = 1, 33 .sor = &sor907d,
38}; 34};
35
36int
37core907d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
38{
39 return core507d_new_(&core907d, drm, oclass, pcore);
40}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
index d8bdd246c8ed..392338df5bfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,21 +18,23 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 21 */
24#include "dmacnv50.h" 22#include "core.h"
25#include "rootnv50.h" 23#include "head.h"
26
27#include <nvif/class.h>
28 24
29const struct nv50_disp_dmac_oclass 25static const struct nv50_core_func
30gk110_disp_base_oclass = { 26core917d = {
31 .base.oclass = GK110_DISP_BASE_CHANNEL_DMA, 27 .init = core507d_init,
32 .base.minver = 0, 28 .ntfy_init = core507d_ntfy_init,
33 .base.maxver = 0, 29 .ntfy_wait_done = core507d_ntfy_wait_done,
34 .ctor = nv50_disp_base_new, 30 .update = core507d_update,
35 .func = &gf119_disp_dmac_func, 31 .head = &head917d,
36 .mthd = &gf119_disp_base_chan_mthd, 32 .dac = &dac907d,
37 .chid = 1, 33 .sor = &sor907d,
38}; 34};
35
36int
37core917d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
38{
39 return core507d_new_(&core917d, drm, oclass, pcore);
40}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
new file mode 100644
index 000000000000..b5c17c948918
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "core.h"
23#include "head.h"
24
25#include <nouveau_bo.h>
26
27static void
28corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
29{
30 u32 *push;
31 if ((push = evo_wait(&core->chan, 9))) {
32 if (ntfy) {
33 evo_mthd(push, 0x020c, 1);
34 evo_data(push, 0x00001000 | NV50_DISP_CORE_NTFY);
35 }
36
37 evo_mthd(push, 0x0218, 2);
38 evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS]);
39 evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
40 evo_mthd(push, 0x0200, 1);
41 evo_data(push, 0x00000001);
42
43 if (ntfy) {
44 evo_mthd(push, 0x020c, 1);
45 evo_data(push, 0x00000000);
46 }
47 evo_kick(push, &core->chan);
48 }
49}
50
51int
52corec37d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
53 struct nvif_device *device)
54{
55 u32 data;
56 s64 time = nvif_msec(device, 2000ULL,
57 data = nouveau_bo_rd32(bo, offset / 4 + 0);
58 if ((data & 0xc0000000) == 0x80000000)
59 break;
60 usleep_range(1, 2);
61 );
62 return time < 0 ? time : 0;
63}
64
65void
66corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
67{
68 nouveau_bo_wr32(bo, offset / 4 + 0, 0x00000000);
69 nouveau_bo_wr32(bo, offset / 4 + 1, 0x00000000);
70 nouveau_bo_wr32(bo, offset / 4 + 2, 0x00000000);
71 nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
72}
73
74void
75corec37d_init(struct nv50_core *core)
76{
77 const u32 windows = 8; /*XXX*/
78 u32 *push, i;
79 if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
80 evo_mthd(push, 0x0208, 1);
81 evo_data(push, core->chan.sync.handle);
82 for (i = 0; i < windows; i++) {
83 evo_mthd(push, 0x1000 + (i * 0x080), 3);
84 evo_data(push, i >> 1);
85 evo_data(push, 0x00000017);
86 evo_data(push, 0x00000000);
87 evo_mthd(push, 0x1010 + (i * 0x080), 1);
88 evo_data(push, 0x00127fff);
89 }
90 evo_mthd(push, 0x0200, 1);
91 evo_data(push, 0x00000001);
92 evo_kick(push, &core->chan);
93 }
94}
95
96static const struct nv50_core_func
97corec37d = {
98 .init = corec37d_init,
99 .ntfy_init = corec37d_ntfy_init,
100 .ntfy_wait_done = corec37d_ntfy_wait_done,
101 .update = corec37d_update,
102 .head = &headc37d,
103 .sor = &sorc37d,
104};
105
106int
107corec37d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
108{
109 return core507d_new_(&corec37d, drm, oclass, pcore);
110}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
new file mode 100644
index 000000000000..f592087338c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "curs.h"
23
24#include <nvif/class.h>
25
26int
27nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
28{
29 struct {
30 s32 oclass;
31 int version;
32 int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
33 } curses[] = {
34 { GV100_DISP_CURSOR, 0, cursc37a_new },
35 { GK104_DISP_CURSOR, 0, curs907a_new },
36 { GF110_DISP_CURSOR, 0, curs907a_new },
37 { GT214_DISP_CURSOR, 0, curs507a_new },
38 { G82_DISP_CURSOR, 0, curs507a_new },
39 { NV50_DISP_CURSOR, 0, curs507a_new },
40 {}
41 };
42 struct nv50_disp *disp = nv50_disp(drm->dev);
43 int cid;
44
45 cid = nvif_mclass(&disp->disp->object, curses);
46 if (cid < 0) {
47 NV_ERROR(drm, "No supported cursor immediate class\n");
48 return cid;
49 }
50
51 return curses[cid].new(drm, head, curses[cid].oclass, pwndw);
52}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.h b/drivers/gpu/drm/nouveau/dispnv50/curs.h
new file mode 100644
index 000000000000..23aff5fd6747
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.h
@@ -0,0 +1,14 @@
1#ifndef __NV50_KMS_CURS_H__
2#define __NV50_KMS_CURS_H__
3#include "wndw.h"
4
5int curs507a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
6int curs507a_new_(const struct nv50_wimm_func *, struct nouveau_drm *,
7 int head, s32 oclass, u32 interlock_data,
8 struct nv50_wndw **);
9
10int curs907a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
11int cursc37a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
12
13int nv50_curs_new(struct nouveau_drm *, int head, struct nv50_wndw **);
14#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
new file mode 100644
index 000000000000..291c08117ab6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "curs.h"
23#include "core.h"
24#include "head.h"
25
26#include <nvif/cl507a.h>
27
28#include <drm/drm_atomic_helper.h>
29#include <drm/drm_plane_helper.h>
30
31static void
32curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
33{
34 nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
35}
36
37static void
38curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
39{
40 nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
41 asyw->point.x);
42}
43
44const struct nv50_wimm_func
45curs507a = {
46 .point = curs507a_point,
47 .update = curs507a_update,
48};
49
50static void
51curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
52 struct nv50_wndw_atom *asyw)
53{
54 u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle;
55 u32 offset = asyw->image.offset[0];
56 if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
57 asyh->curs.handle = handle;
58 asyh->curs.offset = offset;
59 asyh->set.curs = asyh->curs.visible;
60 }
61}
62
63static void
64curs507a_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
65 struct nv50_head_atom *asyh)
66{
67 asyh->curs.visible = false;
68}
69
70static int
71curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
72 struct nv50_head_atom *asyh)
73{
74 struct nv50_head *head = nv50_head(asyw->state.crtc);
75 int ret;
76
77 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
78 DRM_PLANE_HELPER_NO_SCALING,
79 DRM_PLANE_HELPER_NO_SCALING,
80 true, true);
81 asyh->curs.visible = asyw->state.visible;
82 if (ret || !asyh->curs.visible)
83 return ret;
84
85 if (asyw->image.w != asyw->image.h)
86 return -EINVAL;
87
88 ret = head->func->curs_layout(head, asyw, asyh);
89 if (ret)
90 return ret;
91
92 return head->func->curs_format(head, asyw, asyh);
93}
94
95static const u32
96curs507a_format[] = {
97 DRM_FORMAT_ARGB8888,
98 0
99};
100
101static const struct nv50_wndw_func
102curs507a_wndw = {
103 .acquire = curs507a_acquire,
104 .release = curs507a_release,
105 .prepare = curs507a_prepare,
106};
107
108int
109curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
110 int head, s32 oclass, u32 interlock_data,
111 struct nv50_wndw **pwndw)
112{
113 struct nv50_disp_cursor_v0 args = {
114 .head = head,
115 };
116 struct nv50_disp *disp = nv50_disp(drm->dev);
117 struct nv50_wndw *wndw;
118 int ret;
119
120 ret = nv50_wndw_new_(&curs507a_wndw, drm->dev, DRM_PLANE_TYPE_CURSOR,
121 "curs", head, curs507a_format, BIT(head),
122 NV50_DISP_INTERLOCK_CURS, interlock_data, &wndw);
123 if (*pwndw = wndw, ret)
124 return ret;
125
126 ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
127 sizeof(args), &wndw->wimm.base.user);
128 if (ret) {
129 NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
130 return ret;
131 }
132
133 nvif_object_map(&wndw->wimm.base.user, NULL, 0);
134 wndw->immd = func;
135 wndw->ctxdma.parent = &disp->core->chan.base.user;
136 return 0;
137}
138
139int
140curs507a_new(struct nouveau_drm *drm, int head, s32 oclass,
141 struct nv50_wndw **pwndw)
142{
143 return curs507a_new_(&curs507a, drm, head, oclass,
144 0x00000001 << (head * 8), pwndw);
145}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c b/drivers/gpu/drm/nouveau/dispnv50/curs907a.c
index a9aa69c82e8e..d742362de03e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs907a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2016 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,17 +18,13 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */ 21 */
24#include "changk104.h" 22#include "curs.h"
25
26#include <nvif/class.h>
27 23
28const struct nvkm_fifo_chan_oclass 24int
29gk110_fifo_gpfifo_oclass = { 25curs907a_new(struct nouveau_drm *drm, int head, s32 oclass,
30 .base.oclass = KEPLER_CHANNEL_GPFIFO_B, 26 struct nv50_wndw **pwndw)
31 .base.minver = 0, 27{
32 .base.maxver = 0, 28 return curs507a_new_(&curs507a, drm, head, oclass,
33 .ctor = gk104_fifo_gpfifo_new, 29 0x00000001 << (head * 4), pwndw);
34}; 30}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
new file mode 100644
index 000000000000..23fb29d41efe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "curs.h"
23#include "atom.h"
24
25static void
26cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
27{
28 nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
29}
30
31static void
32cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
33{
34 nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
35 asyw->point.x);
36}
37
38static const struct nv50_wimm_func
39cursc37a = {
40 .point = cursc37a_point,
41 .update = cursc37a_update,
42};
43
44int
45cursc37a_new(struct nouveau_drm *drm, int head, s32 oclass,
46 struct nv50_wndw **pwndw)
47{
48 return curs507a_new_(&cursc37a, drm, head, oclass,
49 0x00000001 << head, pwndw);
50}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
new file mode 100644
index 000000000000..2a10ef7d30a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "core.h"
23
24static void
25dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
26 struct nv50_head_atom *asyh)
27{
28 u32 *push, sync = 0;
29 if ((push = evo_wait(&core->chan, 3))) {
30 if (asyh) {
31 sync |= asyh->or.nvsync << 1;
32 sync |= asyh->or.nhsync;
33 }
34 evo_mthd(push, 0x0400 + (or * 0x080), 2);
35 evo_data(push, ctrl);
36 evo_data(push, sync);
37 evo_kick(push, &core->chan);
38 }
39}
40
41const struct nv50_outp_func
42dac507d = {
43 .ctrl = dac507d_ctrl,
44};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
index 00a7f3564450..11e87fa53fac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,20 +18,22 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 21 */
24#include "channv50.h" 22#include "core.h"
25#include "rootnv50.h"
26 23
27#include <nvif/class.h> 24static void
25dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
26 struct nv50_head_atom *asyh)
27{
28 u32 *push;
29 if ((push = evo_wait(&core->chan, 2))) {
30 evo_mthd(push, 0x0180 + (or * 0x020), 1);
31 evo_data(push, ctrl);
32 evo_kick(push, &core->chan);
33 }
34}
28 35
29const struct nv50_disp_pioc_oclass 36const struct nv50_outp_func
30gt215_disp_curs_oclass = { 37dac907d = {
31 .base.oclass = GT214_DISP_CURSOR, 38 .ctrl = dac907d_ctrl,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_curs_new,
35 .func = &nv50_disp_pioc_func,
36 .chid = { 7, 7 },
37}; 39};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
new file mode 100644
index 000000000000..b83465ae7c1b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -0,0 +1,2238 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "disp.h"
25#include "atom.h"
26#include "core.h"
27#include "head.h"
28#include "wndw.h"
29
30#include <linux/dma-mapping.h>
31#include <linux/hdmi.h>
32
33#include <drm/drmP.h>
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_crtc_helper.h>
36#include <drm/drm_dp_helper.h>
37#include <drm/drm_fb_helper.h>
38#include <drm/drm_plane_helper.h>
39#include <drm/drm_edid.h>
40
41#include <nvif/class.h>
42#include <nvif/cl0002.h>
43#include <nvif/cl5070.h>
44#include <nvif/cl507d.h>
45#include <nvif/event.h>
46
47#include "nouveau_drv.h"
48#include "nouveau_dma.h"
49#include "nouveau_gem.h"
50#include "nouveau_connector.h"
51#include "nouveau_encoder.h"
52#include "nouveau_fence.h"
53#include "nouveau_fbcon.h"
54
55#include <subdev/bios/dp.h>
56
57/******************************************************************************
58 * Atomic state
59 *****************************************************************************/
60
61struct nv50_outp_atom {
62 struct list_head head;
63
64 struct drm_encoder *encoder;
65 bool flush_disable;
66
67 union nv50_outp_atom_mask {
68 struct {
69 bool ctrl:1;
70 };
71 u8 mask;
72 } set, clr;
73};
74
75/******************************************************************************
76 * EVO channel
77 *****************************************************************************/
78
79static int
80nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
81 const s32 *oclass, u8 head, void *data, u32 size,
82 struct nv50_chan *chan)
83{
84 struct nvif_sclass *sclass;
85 int ret, i, n;
86
87 chan->device = device;
88
89 ret = n = nvif_object_sclass_get(disp, &sclass);
90 if (ret < 0)
91 return ret;
92
93 while (oclass[0]) {
94 for (i = 0; i < n; i++) {
95 if (sclass[i].oclass == oclass[0]) {
96 ret = nvif_object_init(disp, 0, oclass[0],
97 data, size, &chan->user);
98 if (ret == 0)
99 nvif_object_map(&chan->user, NULL, 0);
100 nvif_object_sclass_put(&sclass);
101 return ret;
102 }
103 }
104 oclass++;
105 }
106
107 nvif_object_sclass_put(&sclass);
108 return -ENOSYS;
109}
110
111static void
112nv50_chan_destroy(struct nv50_chan *chan)
113{
114 nvif_object_fini(&chan->user);
115}
116
117/******************************************************************************
118 * DMA EVO channel
119 *****************************************************************************/
120
121void
122nv50_dmac_destroy(struct nv50_dmac *dmac)
123{
124 nvif_object_fini(&dmac->vram);
125 nvif_object_fini(&dmac->sync);
126
127 nv50_chan_destroy(&dmac->base);
128
129 nvif_mem_fini(&dmac->push);
130}
131
132int
133nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
134 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
135 struct nv50_dmac *dmac)
136{
137 struct nouveau_cli *cli = (void *)device->object.client;
138 struct nv50_disp_core_channel_dma_v0 *args = data;
139 int ret;
140
141 mutex_init(&dmac->lock);
142
143 ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
144 &dmac->push);
145 if (ret)
146 return ret;
147
148 dmac->ptr = dmac->push.object.map.ptr;
149
150 args->pushbuf = nvif_handle(&dmac->push.object);
151
152 ret = nv50_chan_create(device, disp, oclass, head, data, size,
153 &dmac->base);
154 if (ret)
155 return ret;
156
157 if (!syncbuf)
158 return 0;
159
160 ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
161 &(struct nv_dma_v0) {
162 .target = NV_DMA_V0_TARGET_VRAM,
163 .access = NV_DMA_V0_ACCESS_RDWR,
164 .start = syncbuf + 0x0000,
165 .limit = syncbuf + 0x0fff,
166 }, sizeof(struct nv_dma_v0),
167 &dmac->sync);
168 if (ret)
169 return ret;
170
171 ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
172 &(struct nv_dma_v0) {
173 .target = NV_DMA_V0_TARGET_VRAM,
174 .access = NV_DMA_V0_ACCESS_RDWR,
175 .start = 0,
176 .limit = device->info.ram_user - 1,
177 }, sizeof(struct nv_dma_v0),
178 &dmac->vram);
179 if (ret)
180 return ret;
181
182 return ret;
183}
184
185/******************************************************************************
186 * EVO channel helpers
187 *****************************************************************************/
188u32 *
189evo_wait(struct nv50_dmac *evoc, int nr)
190{
191 struct nv50_dmac *dmac = evoc;
192 struct nvif_device *device = dmac->base.device;
193 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
194
195 mutex_lock(&dmac->lock);
196 if (put + nr >= (PAGE_SIZE / 4) - 8) {
197 dmac->ptr[put] = 0x20000000;
198
199 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
200 if (nvif_msec(device, 2000,
201 if (!nvif_rd32(&dmac->base.user, 0x0004))
202 break;
203 ) < 0) {
204 mutex_unlock(&dmac->lock);
205 pr_err("nouveau: evo channel stalled\n");
206 return NULL;
207 }
208
209 put = 0;
210 }
211
212 return dmac->ptr + put;
213}
214
215void
216evo_kick(u32 *push, struct nv50_dmac *evoc)
217{
218 struct nv50_dmac *dmac = evoc;
219 nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
220 mutex_unlock(&dmac->lock);
221}
222
223/******************************************************************************
224 * Output path helpers
225 *****************************************************************************/
226static void
227nv50_outp_release(struct nouveau_encoder *nv_encoder)
228{
229 struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
230 struct {
231 struct nv50_disp_mthd_v1 base;
232 } args = {
233 .base.version = 1,
234 .base.method = NV50_DISP_MTHD_V1_RELEASE,
235 .base.hasht = nv_encoder->dcb->hasht,
236 .base.hashm = nv_encoder->dcb->hashm,
237 };
238
239 nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
240 nv_encoder->or = -1;
241 nv_encoder->link = 0;
242}
243
244static int
245nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
246{
247 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
248 struct nv50_disp *disp = nv50_disp(drm->dev);
249 struct {
250 struct nv50_disp_mthd_v1 base;
251 struct nv50_disp_acquire_v0 info;
252 } args = {
253 .base.version = 1,
254 .base.method = NV50_DISP_MTHD_V1_ACQUIRE,
255 .base.hasht = nv_encoder->dcb->hasht,
256 .base.hashm = nv_encoder->dcb->hashm,
257 };
258 int ret;
259
260 ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
261 if (ret) {
262 NV_ERROR(drm, "error acquiring output path: %d\n", ret);
263 return ret;
264 }
265
266 nv_encoder->or = args.info.or;
267 nv_encoder->link = args.info.link;
268 return 0;
269}
270
271static int
272nv50_outp_atomic_check_view(struct drm_encoder *encoder,
273 struct drm_crtc_state *crtc_state,
274 struct drm_connector_state *conn_state,
275 struct drm_display_mode *native_mode)
276{
277 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
278 struct drm_display_mode *mode = &crtc_state->mode;
279 struct drm_connector *connector = conn_state->connector;
280 struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
281 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
282
283 NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
284 asyc->scaler.full = false;
285 if (!native_mode)
286 return 0;
287
288 if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
289 switch (connector->connector_type) {
290 case DRM_MODE_CONNECTOR_LVDS:
291 case DRM_MODE_CONNECTOR_eDP:
292 /* Force use of scaler for non-EDID modes. */
293 if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
294 break;
295 mode = native_mode;
296 asyc->scaler.full = true;
297 break;
298 default:
299 break;
300 }
301 } else {
302 mode = native_mode;
303 }
304
305 if (!drm_mode_equal(adjusted_mode, mode)) {
306 drm_mode_copy(adjusted_mode, mode);
307 crtc_state->mode_changed = true;
308 }
309
310 return 0;
311}
312
313static int
314nv50_outp_atomic_check(struct drm_encoder *encoder,
315 struct drm_crtc_state *crtc_state,
316 struct drm_connector_state *conn_state)
317{
318 struct nouveau_connector *nv_connector =
319 nouveau_connector(conn_state->connector);
320 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
321 nv_connector->native_mode);
322}
323
324/******************************************************************************
325 * DAC
326 *****************************************************************************/
327static void
328nv50_dac_disable(struct drm_encoder *encoder)
329{
330 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
331 struct nv50_core *core = nv50_disp(encoder->dev)->core;
332 if (nv_encoder->crtc)
333 core->func->dac->ctrl(core, nv_encoder->or, 0x00000000, NULL);
334 nv_encoder->crtc = NULL;
335 nv50_outp_release(nv_encoder);
336}
337
338static void
339nv50_dac_enable(struct drm_encoder *encoder)
340{
341 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
342 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
343 struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
344 struct nv50_core *core = nv50_disp(encoder->dev)->core;
345
346 nv50_outp_acquire(nv_encoder);
347
348 core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
349 asyh->or.depth = 0;
350
351 nv_encoder->crtc = encoder->crtc;
352}
353
354static enum drm_connector_status
355nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
356{
357 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
358 struct nv50_disp *disp = nv50_disp(encoder->dev);
359 struct {
360 struct nv50_disp_mthd_v1 base;
361 struct nv50_disp_dac_load_v0 load;
362 } args = {
363 .base.version = 1,
364 .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
365 .base.hasht = nv_encoder->dcb->hasht,
366 .base.hashm = nv_encoder->dcb->hashm,
367 };
368 int ret;
369
370 args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
371 if (args.load.data == 0)
372 args.load.data = 340;
373
374 ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
375 if (ret || !args.load.load)
376 return connector_status_disconnected;
377
378 return connector_status_connected;
379}
380
381static const struct drm_encoder_helper_funcs
382nv50_dac_help = {
383 .atomic_check = nv50_outp_atomic_check,
384 .enable = nv50_dac_enable,
385 .disable = nv50_dac_disable,
386 .detect = nv50_dac_detect
387};
388
389static void
390nv50_dac_destroy(struct drm_encoder *encoder)
391{
392 drm_encoder_cleanup(encoder);
393 kfree(encoder);
394}
395
396static const struct drm_encoder_funcs
397nv50_dac_func = {
398 .destroy = nv50_dac_destroy,
399};
400
401static int
402nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
403{
404 struct nouveau_drm *drm = nouveau_drm(connector->dev);
405 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
406 struct nvkm_i2c_bus *bus;
407 struct nouveau_encoder *nv_encoder;
408 struct drm_encoder *encoder;
409 int type = DRM_MODE_ENCODER_DAC;
410
411 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
412 if (!nv_encoder)
413 return -ENOMEM;
414 nv_encoder->dcb = dcbe;
415
416 bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
417 if (bus)
418 nv_encoder->i2c = &bus->i2c;
419
420 encoder = to_drm_encoder(nv_encoder);
421 encoder->possible_crtcs = dcbe->heads;
422 encoder->possible_clones = 0;
423 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
424 "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
425 drm_encoder_helper_add(encoder, &nv50_dac_help);
426
427 drm_mode_connector_attach_encoder(connector, encoder);
428 return 0;
429}
430
431/******************************************************************************
432 * Audio
433 *****************************************************************************/
434static void
435nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
436{
437 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
438 struct nv50_disp *disp = nv50_disp(encoder->dev);
439 struct {
440 struct nv50_disp_mthd_v1 base;
441 struct nv50_disp_sor_hda_eld_v0 eld;
442 } args = {
443 .base.version = 1,
444 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
445 .base.hasht = nv_encoder->dcb->hasht,
446 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
447 (0x0100 << nv_crtc->index),
448 };
449
450 nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
451}
452
453static void
454nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
455{
456 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
457 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
458 struct nouveau_connector *nv_connector;
459 struct nv50_disp *disp = nv50_disp(encoder->dev);
460 struct __packed {
461 struct {
462 struct nv50_disp_mthd_v1 mthd;
463 struct nv50_disp_sor_hda_eld_v0 eld;
464 } base;
465 u8 data[sizeof(nv_connector->base.eld)];
466 } args = {
467 .base.mthd.version = 1,
468 .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
469 .base.mthd.hasht = nv_encoder->dcb->hasht,
470 .base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
471 (0x0100 << nv_crtc->index),
472 };
473
474 nv_connector = nouveau_encoder_connector_get(nv_encoder);
475 if (!drm_detect_monitor_audio(nv_connector->edid))
476 return;
477
478 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
479
480 nvif_mthd(&disp->disp->object, 0, &args,
481 sizeof(args.base) + drm_eld_size(args.data));
482}
483
484/******************************************************************************
485 * HDMI
486 *****************************************************************************/
487static void
488nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
489{
490 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
491 struct nv50_disp *disp = nv50_disp(encoder->dev);
492 struct {
493 struct nv50_disp_mthd_v1 base;
494 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
495 } args = {
496 .base.version = 1,
497 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
498 .base.hasht = nv_encoder->dcb->hasht,
499 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
500 (0x0100 << nv_crtc->index),
501 };
502
503 nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
504}
505
506static void
507nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
508{
509 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
510 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
511 struct nv50_disp *disp = nv50_disp(encoder->dev);
512 struct {
513 struct nv50_disp_mthd_v1 base;
514 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
515 u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
516 } args = {
517 .base.version = 1,
518 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
519 .base.hasht = nv_encoder->dcb->hasht,
520 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
521 (0x0100 << nv_crtc->index),
522 .pwr.state = 1,
523 .pwr.rekey = 56, /* binary driver, and tegra, constant */
524 };
525 struct nouveau_connector *nv_connector;
526 u32 max_ac_packet;
527 union hdmi_infoframe avi_frame;
528 union hdmi_infoframe vendor_frame;
529 int ret;
530 int size;
531
532 nv_connector = nouveau_encoder_connector_get(nv_encoder);
533 if (!drm_detect_hdmi_monitor(nv_connector->edid))
534 return;
535
536 ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
537 false);
538 if (!ret) {
539 /* We have an AVI InfoFrame, populate it to the display */
540 args.pwr.avi_infoframe_length
541 = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
542 }
543
544 ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
545 &nv_connector->base, mode);
546 if (!ret) {
547 /* We have a Vendor InfoFrame, populate it to the display */
548 args.pwr.vendor_infoframe_length
549 = hdmi_infoframe_pack(&vendor_frame,
550 args.infoframes
551 + args.pwr.avi_infoframe_length,
552 17);
553 }
554
555 max_ac_packet = mode->htotal - mode->hdisplay;
556 max_ac_packet -= args.pwr.rekey;
557 max_ac_packet -= 18; /* constant from tegra */
558 args.pwr.max_ac_packet = max_ac_packet / 32;
559
560 size = sizeof(args.base)
561 + sizeof(args.pwr)
562 + args.pwr.avi_infoframe_length
563 + args.pwr.vendor_infoframe_length;
564 nvif_mthd(&disp->disp->object, 0, &args, size);
565 nv50_audio_enable(encoder, mode);
566}
567
568/******************************************************************************
569 * MST
570 *****************************************************************************/
571#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
572#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
573#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
574
575struct nv50_mstm {
576 struct nouveau_encoder *outp;
577
578 struct drm_dp_mst_topology_mgr mgr;
579 struct nv50_msto *msto[4];
580
581 bool modified;
582 bool disabled;
583 int links;
584};
585
586struct nv50_mstc {
587 struct nv50_mstm *mstm;
588 struct drm_dp_mst_port *port;
589 struct drm_connector connector;
590
591 struct drm_display_mode *native;
592 struct edid *edid;
593
594 int pbn;
595};
596
597struct nv50_msto {
598 struct drm_encoder encoder;
599
600 struct nv50_head *head;
601 struct nv50_mstc *mstc;
602 bool disabled;
603};
604
605static struct drm_dp_payload *
606nv50_msto_payload(struct nv50_msto *msto)
607{
608 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
609 struct nv50_mstc *mstc = msto->mstc;
610 struct nv50_mstm *mstm = mstc->mstm;
611 int vcpi = mstc->port->vcpi.vcpi, i;
612
613 NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
614 for (i = 0; i < mstm->mgr.max_payloads; i++) {
615 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
616 NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
617 mstm->outp->base.base.name, i, payload->vcpi,
618 payload->start_slot, payload->num_slots);
619 }
620
621 for (i = 0; i < mstm->mgr.max_payloads; i++) {
622 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
623 if (payload->vcpi == vcpi)
624 return payload;
625 }
626
627 return NULL;
628}
629
630static void
631nv50_msto_cleanup(struct nv50_msto *msto)
632{
633 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
634 struct nv50_mstc *mstc = msto->mstc;
635 struct nv50_mstm *mstm = mstc->mstm;
636
637 NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
638 if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
639 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
640 if (msto->disabled) {
641 msto->mstc = NULL;
642 msto->head = NULL;
643 msto->disabled = false;
644 }
645}
646
647static void
648nv50_msto_prepare(struct nv50_msto *msto)
649{
650 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
651 struct nv50_mstc *mstc = msto->mstc;
652 struct nv50_mstm *mstm = mstc->mstm;
653 struct {
654 struct nv50_disp_mthd_v1 base;
655 struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
656 } args = {
657 .base.version = 1,
658 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
659 .base.hasht = mstm->outp->dcb->hasht,
660 .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
661 (0x0100 << msto->head->base.index),
662 };
663
664 NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
665 if (mstc->port && mstc->port->vcpi.vcpi > 0) {
666 struct drm_dp_payload *payload = nv50_msto_payload(msto);
667 if (payload) {
668 args.vcpi.start_slot = payload->start_slot;
669 args.vcpi.num_slots = payload->num_slots;
670 args.vcpi.pbn = mstc->port->vcpi.pbn;
671 args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
672 }
673 }
674
675 NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
676 msto->encoder.name, msto->head->base.base.name,
677 args.vcpi.start_slot, args.vcpi.num_slots,
678 args.vcpi.pbn, args.vcpi.aligned_pbn);
679 nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
680}
681
682static int
683nv50_msto_atomic_check(struct drm_encoder *encoder,
684 struct drm_crtc_state *crtc_state,
685 struct drm_connector_state *conn_state)
686{
687 struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
688 struct nv50_mstm *mstm = mstc->mstm;
689 int bpp = conn_state->connector->display_info.bpc * 3;
690 int slots;
691
692 mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
693
694 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
695 if (slots < 0)
696 return slots;
697
698 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
699 mstc->native);
700}
701
702static void
703nv50_msto_enable(struct drm_encoder *encoder)
704{
705 struct nv50_head *head = nv50_head(encoder->crtc);
706 struct nv50_msto *msto = nv50_msto(encoder);
707 struct nv50_mstc *mstc = NULL;
708 struct nv50_mstm *mstm = NULL;
709 struct drm_connector *connector;
710 struct drm_connector_list_iter conn_iter;
711 u8 proto, depth;
712 int slots;
713 bool r;
714
715 drm_connector_list_iter_begin(encoder->dev, &conn_iter);
716 drm_for_each_connector_iter(connector, &conn_iter) {
717 if (connector->state->best_encoder == &msto->encoder) {
718 mstc = nv50_mstc(connector);
719 mstm = mstc->mstm;
720 break;
721 }
722 }
723 drm_connector_list_iter_end(&conn_iter);
724
725 if (WARN_ON(!mstc))
726 return;
727
728 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
729 r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
730 WARN_ON(!r);
731
732 if (!mstm->links++)
733 nv50_outp_acquire(mstm->outp);
734
735 if (mstm->outp->link & 1)
736 proto = 0x8;
737 else
738 proto = 0x9;
739
740 switch (mstc->connector.display_info.bpc) {
741 case 6: depth = 0x2; break;
742 case 8: depth = 0x5; break;
743 case 10:
744 default: depth = 0x6; break;
745 }
746
747 mstm->outp->update(mstm->outp, head->base.index,
748 nv50_head_atom(head->base.base.state), proto, depth);
749
750 msto->head = head;
751 msto->mstc = mstc;
752 mstm->modified = true;
753}
754
755static void
756nv50_msto_disable(struct drm_encoder *encoder)
757{
758 struct nv50_msto *msto = nv50_msto(encoder);
759 struct nv50_mstc *mstc = msto->mstc;
760 struct nv50_mstm *mstm = mstc->mstm;
761
762 if (mstc->port)
763 drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
764
765 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
766 mstm->modified = true;
767 if (!--mstm->links)
768 mstm->disabled = true;
769 msto->disabled = true;
770}
771
772static const struct drm_encoder_helper_funcs
773nv50_msto_help = {
774 .disable = nv50_msto_disable,
775 .enable = nv50_msto_enable,
776 .atomic_check = nv50_msto_atomic_check,
777};
778
779static void
780nv50_msto_destroy(struct drm_encoder *encoder)
781{
782 struct nv50_msto *msto = nv50_msto(encoder);
783 drm_encoder_cleanup(&msto->encoder);
784 kfree(msto);
785}
786
787static const struct drm_encoder_funcs
788nv50_msto = {
789 .destroy = nv50_msto_destroy,
790};
791
792static int
793nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
794 struct nv50_msto **pmsto)
795{
796 struct nv50_msto *msto;
797 int ret;
798
799 if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
800 return -ENOMEM;
801
802 ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
803 DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
804 if (ret) {
805 kfree(*pmsto);
806 *pmsto = NULL;
807 return ret;
808 }
809
810 drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
811 msto->encoder.possible_crtcs = heads;
812 return 0;
813}
814
815static struct drm_encoder *
816nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
817 struct drm_connector_state *connector_state)
818{
819 struct nv50_head *head = nv50_head(connector_state->crtc);
820 struct nv50_mstc *mstc = nv50_mstc(connector);
821 if (mstc->port) {
822 struct nv50_mstm *mstm = mstc->mstm;
823 return &mstm->msto[head->base.index]->encoder;
824 }
825 return NULL;
826}
827
828static struct drm_encoder *
829nv50_mstc_best_encoder(struct drm_connector *connector)
830{
831 struct nv50_mstc *mstc = nv50_mstc(connector);
832 if (mstc->port) {
833 struct nv50_mstm *mstm = mstc->mstm;
834 return &mstm->msto[0]->encoder;
835 }
836 return NULL;
837}
838
839static enum drm_mode_status
840nv50_mstc_mode_valid(struct drm_connector *connector,
841 struct drm_display_mode *mode)
842{
843 return MODE_OK;
844}
845
846static int
847nv50_mstc_get_modes(struct drm_connector *connector)
848{
849 struct nv50_mstc *mstc = nv50_mstc(connector);
850 int ret = 0;
851
852 mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
853 drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
854 if (mstc->edid)
855 ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
856
857 if (!mstc->connector.display_info.bpc)
858 mstc->connector.display_info.bpc = 8;
859
860 if (mstc->native)
861 drm_mode_destroy(mstc->connector.dev, mstc->native);
862 mstc->native = nouveau_conn_native_mode(&mstc->connector);
863 return ret;
864}
865
866static const struct drm_connector_helper_funcs
867nv50_mstc_help = {
868 .get_modes = nv50_mstc_get_modes,
869 .mode_valid = nv50_mstc_mode_valid,
870 .best_encoder = nv50_mstc_best_encoder,
871 .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
872};
873
874static enum drm_connector_status
875nv50_mstc_detect(struct drm_connector *connector, bool force)
876{
877 struct nv50_mstc *mstc = nv50_mstc(connector);
878 if (!mstc->port)
879 return connector_status_disconnected;
880 return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
881}
882
883static void
884nv50_mstc_destroy(struct drm_connector *connector)
885{
886 struct nv50_mstc *mstc = nv50_mstc(connector);
887 drm_connector_cleanup(&mstc->connector);
888 kfree(mstc);
889}
890
891static const struct drm_connector_funcs
892nv50_mstc = {
893 .reset = nouveau_conn_reset,
894 .detect = nv50_mstc_detect,
895 .fill_modes = drm_helper_probe_single_connector_modes,
896 .destroy = nv50_mstc_destroy,
897 .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
898 .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
899 .atomic_set_property = nouveau_conn_atomic_set_property,
900 .atomic_get_property = nouveau_conn_atomic_get_property,
901};
902
903static int
904nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
905 const char *path, struct nv50_mstc **pmstc)
906{
907 struct drm_device *dev = mstm->outp->base.base.dev;
908 struct nv50_mstc *mstc;
909 int ret, i;
910
911 if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
912 return -ENOMEM;
913 mstc->mstm = mstm;
914 mstc->port = port;
915
916 ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
917 DRM_MODE_CONNECTOR_DisplayPort);
918 if (ret) {
919 kfree(*pmstc);
920 *pmstc = NULL;
921 return ret;
922 }
923
924 drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
925
926 mstc->connector.funcs->reset(&mstc->connector);
927 nouveau_conn_attach_properties(&mstc->connector);
928
929 for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
930 drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
931
932 drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
933 drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
934 drm_mode_connector_set_path_property(&mstc->connector, path);
935 return 0;
936}
937
938static void
939nv50_mstm_cleanup(struct nv50_mstm *mstm)
940{
941 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
942 struct drm_encoder *encoder;
943 int ret;
944
945 NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
946 ret = drm_dp_check_act_status(&mstm->mgr);
947
948 ret = drm_dp_update_payload_part2(&mstm->mgr);
949
950 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
951 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
952 struct nv50_msto *msto = nv50_msto(encoder);
953 struct nv50_mstc *mstc = msto->mstc;
954 if (mstc && mstc->mstm == mstm)
955 nv50_msto_cleanup(msto);
956 }
957 }
958
959 mstm->modified = false;
960}
961
962static void
963nv50_mstm_prepare(struct nv50_mstm *mstm)
964{
965 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
966 struct drm_encoder *encoder;
967 int ret;
968
969 NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
970 ret = drm_dp_update_payload_part1(&mstm->mgr);
971
972 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
973 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
974 struct nv50_msto *msto = nv50_msto(encoder);
975 struct nv50_mstc *mstc = msto->mstc;
976 if (mstc && mstc->mstm == mstm)
977 nv50_msto_prepare(msto);
978 }
979 }
980
981 if (mstm->disabled) {
982 if (!mstm->links)
983 nv50_outp_release(mstm->outp);
984 mstm->disabled = false;
985 }
986}
987
988static void
989nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
990{
991 struct nv50_mstm *mstm = nv50_mstm(mgr);
992 drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
993}
994
995static void
996nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
997 struct drm_connector *connector)
998{
999 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1000 struct nv50_mstc *mstc = nv50_mstc(connector);
1001
1002 drm_connector_unregister(&mstc->connector);
1003
1004 drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
1005
1006 drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
1007 mstc->port = NULL;
1008 drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
1009
1010 drm_connector_unreference(&mstc->connector);
1011}
1012
1013static void
1014nv50_mstm_register_connector(struct drm_connector *connector)
1015{
1016 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1017
1018 drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
1019
1020 drm_connector_register(connector);
1021}
1022
1023static struct drm_connector *
1024nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
1025 struct drm_dp_mst_port *port, const char *path)
1026{
1027 struct nv50_mstm *mstm = nv50_mstm(mgr);
1028 struct nv50_mstc *mstc;
1029 int ret;
1030
1031 ret = nv50_mstc_new(mstm, port, path, &mstc);
1032 if (ret) {
1033 if (mstc)
1034 mstc->connector.funcs->destroy(&mstc->connector);
1035 return NULL;
1036 }
1037
1038 return &mstc->connector;
1039}
1040
1041static const struct drm_dp_mst_topology_cbs
1042nv50_mstm = {
1043 .add_connector = nv50_mstm_add_connector,
1044 .register_connector = nv50_mstm_register_connector,
1045 .destroy_connector = nv50_mstm_destroy_connector,
1046 .hotplug = nv50_mstm_hotplug,
1047};
1048
1049void
1050nv50_mstm_service(struct nv50_mstm *mstm)
1051{
1052 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
1053 bool handled = true;
1054 int ret;
1055 u8 esi[8] = {};
1056
1057 if (!aux)
1058 return;
1059
1060 while (handled) {
1061 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
1062 if (ret != 8) {
1063 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
1064 return;
1065 }
1066
1067 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
1068 if (!handled)
1069 break;
1070
1071 drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
1072 }
1073}
1074
1075void
1076nv50_mstm_remove(struct nv50_mstm *mstm)
1077{
1078 if (mstm)
1079 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
1080}
1081
1082static int
1083nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
1084{
1085 struct nouveau_encoder *outp = mstm->outp;
1086 struct {
1087 struct nv50_disp_mthd_v1 base;
1088 struct nv50_disp_sor_dp_mst_link_v0 mst;
1089 } args = {
1090 .base.version = 1,
1091 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
1092 .base.hasht = outp->dcb->hasht,
1093 .base.hashm = outp->dcb->hashm,
1094 .mst.state = state,
1095 };
1096 struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
1097 struct nvif_object *disp = &drm->display->disp.object;
1098 int ret;
1099
1100 if (dpcd >= 0x12) {
1101 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
1102 if (ret < 0)
1103 return ret;
1104
1105 dpcd &= ~DP_MST_EN;
1106 if (state)
1107 dpcd |= DP_MST_EN;
1108
1109 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
1110 if (ret < 0)
1111 return ret;
1112 }
1113
1114 return nvif_mthd(disp, 0, &args, sizeof(args));
1115}
1116
1117int
1118nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
1119{
1120 int ret, state = 0;
1121
1122 if (!mstm)
1123 return 0;
1124
1125 if (dpcd[0] >= 0x12) {
1126 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
1127 if (ret < 0)
1128 return ret;
1129
1130 if (!(dpcd[1] & DP_MST_CAP))
1131 dpcd[0] = 0x11;
1132 else
1133 state = allow;
1134 }
1135
1136 ret = nv50_mstm_enable(mstm, dpcd[0], state);
1137 if (ret)
1138 return ret;
1139
1140 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
1141 if (ret)
1142 return nv50_mstm_enable(mstm, dpcd[0], 0);
1143
1144 return mstm->mgr.mst_state;
1145}
1146
1147static void
1148nv50_mstm_fini(struct nv50_mstm *mstm)
1149{
1150 if (mstm && mstm->mgr.mst_state)
1151 drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
1152}
1153
1154static void
1155nv50_mstm_init(struct nv50_mstm *mstm)
1156{
1157 if (mstm && mstm->mgr.mst_state)
1158 drm_dp_mst_topology_mgr_resume(&mstm->mgr);
1159}
1160
1161static void
1162nv50_mstm_del(struct nv50_mstm **pmstm)
1163{
1164 struct nv50_mstm *mstm = *pmstm;
1165 if (mstm) {
1166 kfree(*pmstm);
1167 *pmstm = NULL;
1168 }
1169}
1170
1171static int
1172nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
1173 int conn_base_id, struct nv50_mstm **pmstm)
1174{
1175 const int max_payloads = hweight8(outp->dcb->heads);
1176 struct drm_device *dev = outp->base.base.dev;
1177 struct nv50_mstm *mstm;
1178 int ret, i;
1179 u8 dpcd;
1180
1181 /* This is a workaround for some monitors not functioning
1182 * correctly in MST mode on initial module load. I think
1183 * some bad interaction with the VBIOS may be responsible.
1184 *
1185 * A good ol' off and on again seems to work here ;)
1186 */
1187 ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
1188 if (ret >= 0 && dpcd >= 0x12)
1189 drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
1190
1191 if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
1192 return -ENOMEM;
1193 mstm->outp = outp;
1194 mstm->mgr.cbs = &nv50_mstm;
1195
1196 ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
1197 max_payloads, conn_base_id);
1198 if (ret)
1199 return ret;
1200
1201 for (i = 0; i < max_payloads; i++) {
1202 ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
1203 i, &mstm->msto[i]);
1204 if (ret)
1205 return ret;
1206 }
1207
1208 return 0;
1209}
1210
1211/******************************************************************************
1212 * SOR
1213 *****************************************************************************/
1214static void
1215nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
1216 struct nv50_head_atom *asyh, u8 proto, u8 depth)
1217{
1218 struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
1219 struct nv50_core *core = disp->core;
1220
1221 if (!asyh) {
1222 nv_encoder->ctrl &= ~BIT(head);
1223 if (!(nv_encoder->ctrl & 0x0000000f))
1224 nv_encoder->ctrl = 0;
1225 } else {
1226 nv_encoder->ctrl |= proto << 8;
1227 nv_encoder->ctrl |= BIT(head);
1228 asyh->or.depth = depth;
1229 }
1230
1231 core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh);
1232}
1233
1234static void
1235nv50_sor_disable(struct drm_encoder *encoder)
1236{
1237 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1238 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1239
1240 nv_encoder->crtc = NULL;
1241
1242 if (nv_crtc) {
1243 struct nvkm_i2c_aux *aux = nv_encoder->aux;
1244 u8 pwr;
1245
1246 if (aux) {
1247 int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
1248 if (ret == 0) {
1249 pwr &= ~DP_SET_POWER_MASK;
1250 pwr |= DP_SET_POWER_D3;
1251 nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
1252 }
1253 }
1254
1255 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
1256 nv50_audio_disable(encoder, nv_crtc);
1257 nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
1258 nv50_outp_release(nv_encoder);
1259 }
1260}
1261
1262static void
1263nv50_sor_enable(struct drm_encoder *encoder)
1264{
1265 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1266 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1267 struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
1268 struct drm_display_mode *mode = &asyh->state.adjusted_mode;
1269 struct {
1270 struct nv50_disp_mthd_v1 base;
1271 struct nv50_disp_sor_lvds_script_v0 lvds;
1272 } lvds = {
1273 .base.version = 1,
1274 .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
1275 .base.hasht = nv_encoder->dcb->hasht,
1276 .base.hashm = nv_encoder->dcb->hashm,
1277 };
1278 struct nv50_disp *disp = nv50_disp(encoder->dev);
1279 struct drm_device *dev = encoder->dev;
1280 struct nouveau_drm *drm = nouveau_drm(dev);
1281 struct nouveau_connector *nv_connector;
1282 struct nvbios *bios = &drm->vbios;
1283 u8 proto = 0xf;
1284 u8 depth = 0x0;
1285
1286 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1287 nv_encoder->crtc = encoder->crtc;
1288 nv50_outp_acquire(nv_encoder);
1289
1290 switch (nv_encoder->dcb->type) {
1291 case DCB_OUTPUT_TMDS:
1292 if (nv_encoder->link & 1) {
1293 proto = 0x1;
1294 /* Only enable dual-link if:
1295 * - Need to (i.e. rate > 165MHz)
1296 * - DCB says we can
1297 * - Not an HDMI monitor, since there's no dual-link
1298 * on HDMI.
1299 */
1300 if (mode->clock >= 165000 &&
1301 nv_encoder->dcb->duallink_possible &&
1302 !drm_detect_hdmi_monitor(nv_connector->edid))
1303 proto |= 0x4;
1304 } else {
1305 proto = 0x2;
1306 }
1307
1308 nv50_hdmi_enable(&nv_encoder->base.base, mode);
1309 break;
1310 case DCB_OUTPUT_LVDS:
1311 proto = 0x0;
1312
1313 if (bios->fp_no_ddc) {
1314 if (bios->fp.dual_link)
1315 lvds.lvds.script |= 0x0100;
1316 if (bios->fp.if_is_24bit)
1317 lvds.lvds.script |= 0x0200;
1318 } else {
1319 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
1320 if (((u8 *)nv_connector->edid)[121] == 2)
1321 lvds.lvds.script |= 0x0100;
1322 } else
1323 if (mode->clock >= bios->fp.duallink_transition_clk) {
1324 lvds.lvds.script |= 0x0100;
1325 }
1326
1327 if (lvds.lvds.script & 0x0100) {
1328 if (bios->fp.strapless_is_24bit & 2)
1329 lvds.lvds.script |= 0x0200;
1330 } else {
1331 if (bios->fp.strapless_is_24bit & 1)
1332 lvds.lvds.script |= 0x0200;
1333 }
1334
1335 if (nv_connector->base.display_info.bpc == 8)
1336 lvds.lvds.script |= 0x0200;
1337 }
1338
1339 nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
1340 break;
1341 case DCB_OUTPUT_DP:
1342 if (nv_connector->base.display_info.bpc == 6)
1343 depth = 0x2;
1344 else
1345 if (nv_connector->base.display_info.bpc == 8)
1346 depth = 0x5;
1347 else
1348 depth = 0x6;
1349
1350 if (nv_encoder->link & 1)
1351 proto = 0x8;
1352 else
1353 proto = 0x9;
1354
1355 nv50_audio_enable(encoder, mode);
1356 break;
1357 default:
1358 BUG();
1359 break;
1360 }
1361
1362 nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
1363}
1364
1365static const struct drm_encoder_helper_funcs
1366nv50_sor_help = {
1367 .atomic_check = nv50_outp_atomic_check,
1368 .enable = nv50_sor_enable,
1369 .disable = nv50_sor_disable,
1370};
1371
1372static void
1373nv50_sor_destroy(struct drm_encoder *encoder)
1374{
1375 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1376 nv50_mstm_del(&nv_encoder->dp.mstm);
1377 drm_encoder_cleanup(encoder);
1378 kfree(encoder);
1379}
1380
1381static const struct drm_encoder_funcs
1382nv50_sor_func = {
1383 .destroy = nv50_sor_destroy,
1384};
1385
1386static int
1387nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1388{
1389 struct nouveau_connector *nv_connector = nouveau_connector(connector);
1390 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1391 struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
1392 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
1393 struct nouveau_encoder *nv_encoder;
1394 struct drm_encoder *encoder;
1395 u8 ver, hdr, cnt, len;
1396 u32 data;
1397 int type, ret;
1398
1399 switch (dcbe->type) {
1400 case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
1401 case DCB_OUTPUT_TMDS:
1402 case DCB_OUTPUT_DP:
1403 default:
1404 type = DRM_MODE_ENCODER_TMDS;
1405 break;
1406 }
1407
1408 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1409 if (!nv_encoder)
1410 return -ENOMEM;
1411 nv_encoder->dcb = dcbe;
1412 nv_encoder->update = nv50_sor_update;
1413
1414 encoder = to_drm_encoder(nv_encoder);
1415 encoder->possible_crtcs = dcbe->heads;
1416 encoder->possible_clones = 0;
1417 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
1418 "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
1419 drm_encoder_helper_add(encoder, &nv50_sor_help);
1420
1421 drm_mode_connector_attach_encoder(connector, encoder);
1422
1423 if (dcbe->type == DCB_OUTPUT_DP) {
1424 struct nv50_disp *disp = nv50_disp(encoder->dev);
1425 struct nvkm_i2c_aux *aux =
1426 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
1427 if (aux) {
1428 if (disp->disp->object.oclass < GF110_DISP) {
1429 /* HW has no support for address-only
1430 * transactions, so we're required to
1431 * use custom I2C-over-AUX code.
1432 */
1433 nv_encoder->i2c = &aux->i2c;
1434 } else {
1435 nv_encoder->i2c = &nv_connector->aux.ddc;
1436 }
1437 nv_encoder->aux = aux;
1438 }
1439
1440 if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
1441 ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
1442 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
1443 nv_connector->base.base.id,
1444 &nv_encoder->dp.mstm);
1445 if (ret)
1446 return ret;
1447 }
1448 } else {
1449 struct nvkm_i2c_bus *bus =
1450 nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
1451 if (bus)
1452 nv_encoder->i2c = &bus->i2c;
1453 }
1454
1455 return 0;
1456}
1457
1458/******************************************************************************
1459 * PIOR
1460 *****************************************************************************/
1461static int
1462nv50_pior_atomic_check(struct drm_encoder *encoder,
1463 struct drm_crtc_state *crtc_state,
1464 struct drm_connector_state *conn_state)
1465{
1466 int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
1467 if (ret)
1468 return ret;
1469 crtc_state->adjusted_mode.clock *= 2;
1470 return 0;
1471}
1472
1473static void
1474nv50_pior_disable(struct drm_encoder *encoder)
1475{
1476 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1477 struct nv50_core *core = nv50_disp(encoder->dev)->core;
1478 if (nv_encoder->crtc)
1479 core->func->pior->ctrl(core, nv_encoder->or, 0x00000000, NULL);
1480 nv_encoder->crtc = NULL;
1481 nv50_outp_release(nv_encoder);
1482}
1483
1484static void
1485nv50_pior_enable(struct drm_encoder *encoder)
1486{
1487 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1488 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1489 struct nouveau_connector *nv_connector;
1490 struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
1491 struct nv50_core *core = nv50_disp(encoder->dev)->core;
1492 u8 owner = 1 << nv_crtc->index;
1493 u8 proto;
1494
1495 nv50_outp_acquire(nv_encoder);
1496
1497 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1498 switch (nv_connector->base.display_info.bpc) {
1499 case 10: asyh->or.depth = 0x6; break;
1500 case 8: asyh->or.depth = 0x5; break;
1501 case 6: asyh->or.depth = 0x2; break;
1502 default: asyh->or.depth = 0x0; break;
1503 }
1504
1505 switch (nv_encoder->dcb->type) {
1506 case DCB_OUTPUT_TMDS:
1507 case DCB_OUTPUT_DP:
1508 proto = 0x0;
1509 break;
1510 default:
1511 BUG();
1512 break;
1513 }
1514
1515 core->func->pior->ctrl(core, nv_encoder->or, (proto << 8) | owner, asyh);
1516 nv_encoder->crtc = encoder->crtc;
1517}
1518
1519static const struct drm_encoder_helper_funcs
1520nv50_pior_help = {
1521 .atomic_check = nv50_pior_atomic_check,
1522 .enable = nv50_pior_enable,
1523 .disable = nv50_pior_disable,
1524};
1525
1526static void
1527nv50_pior_destroy(struct drm_encoder *encoder)
1528{
1529 drm_encoder_cleanup(encoder);
1530 kfree(encoder);
1531}
1532
1533static const struct drm_encoder_funcs
1534nv50_pior_func = {
1535 .destroy = nv50_pior_destroy,
1536};
1537
1538static int
1539nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
1540{
1541 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1542 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
1543 struct nvkm_i2c_bus *bus = NULL;
1544 struct nvkm_i2c_aux *aux = NULL;
1545 struct i2c_adapter *ddc;
1546 struct nouveau_encoder *nv_encoder;
1547 struct drm_encoder *encoder;
1548 int type;
1549
1550 switch (dcbe->type) {
1551 case DCB_OUTPUT_TMDS:
1552 bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
1553 ddc = bus ? &bus->i2c : NULL;
1554 type = DRM_MODE_ENCODER_TMDS;
1555 break;
1556 case DCB_OUTPUT_DP:
1557 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
1558 ddc = aux ? &aux->i2c : NULL;
1559 type = DRM_MODE_ENCODER_TMDS;
1560 break;
1561 default:
1562 return -ENODEV;
1563 }
1564
1565 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1566 if (!nv_encoder)
1567 return -ENOMEM;
1568 nv_encoder->dcb = dcbe;
1569 nv_encoder->i2c = ddc;
1570 nv_encoder->aux = aux;
1571
1572 encoder = to_drm_encoder(nv_encoder);
1573 encoder->possible_crtcs = dcbe->heads;
1574 encoder->possible_clones = 0;
1575 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
1576 "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
1577 drm_encoder_helper_add(encoder, &nv50_pior_help);
1578
1579 drm_mode_connector_attach_encoder(connector, encoder);
1580 return 0;
1581}
1582
1583/******************************************************************************
1584 * Atomic
1585 *****************************************************************************/
1586
1587static void
1588nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
1589{
1590 struct nv50_disp *disp = nv50_disp(drm->dev);
1591 struct nv50_core *core = disp->core;
1592 struct nv50_mstm *mstm;
1593 struct drm_encoder *encoder;
1594
1595 NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
1596
1597 drm_for_each_encoder(encoder, drm->dev) {
1598 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
1599 mstm = nouveau_encoder(encoder)->dp.mstm;
1600 if (mstm && mstm->modified)
1601 nv50_mstm_prepare(mstm);
1602 }
1603 }
1604
1605 core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
1606 core->func->update(core, interlock, true);
1607 if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
1608 disp->core->chan.base.device))
1609 NV_ERROR(drm, "core notifier timeout\n");
1610
1611 drm_for_each_encoder(encoder, drm->dev) {
1612 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
1613 mstm = nouveau_encoder(encoder)->dp.mstm;
1614 if (mstm && mstm->modified)
1615 nv50_mstm_cleanup(mstm);
1616 }
1617 }
1618}
1619
1620static void
1621nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
1622{
1623 struct drm_device *dev = state->dev;
1624 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
1625 struct drm_crtc *crtc;
1626 struct drm_plane_state *new_plane_state;
1627 struct drm_plane *plane;
1628 struct nouveau_drm *drm = nouveau_drm(dev);
1629 struct nv50_disp *disp = nv50_disp(dev);
1630 struct nv50_atom *atom = nv50_atom(state);
1631 struct nv50_outp_atom *outp, *outt;
1632 u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
1633 int i;
1634
1635 NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
1636 drm_atomic_helper_wait_for_fences(dev, state, false);
1637 drm_atomic_helper_wait_for_dependencies(state);
1638 drm_atomic_helper_update_legacy_modeset_state(dev, state);
1639
1640 if (atom->lock_core)
1641 mutex_lock(&disp->mutex);
1642
1643 /* Disable head(s). */
1644 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1645 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
1646 struct nv50_head *head = nv50_head(crtc);
1647
1648 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
1649 asyh->clr.mask, asyh->set.mask);
1650 if (old_crtc_state->active && !new_crtc_state->active)
1651 drm_crtc_vblank_off(crtc);
1652
1653 if (asyh->clr.mask) {
1654 nv50_head_flush_clr(head, asyh, atom->flush_disable);
1655 interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
1656 }
1657 }
1658
1659 /* Disable plane(s). */
1660 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1661 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
1662 struct nv50_wndw *wndw = nv50_wndw(plane);
1663
1664 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
1665 asyw->clr.mask, asyw->set.mask);
1666 if (!asyw->clr.mask)
1667 continue;
1668
1669 nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
1670 }
1671
1672 /* Disable output path(s). */
1673 list_for_each_entry(outp, &atom->outp, head) {
1674 const struct drm_encoder_helper_funcs *help;
1675 struct drm_encoder *encoder;
1676
1677 encoder = outp->encoder;
1678 help = encoder->helper_private;
1679
1680 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
1681 outp->clr.mask, outp->set.mask);
1682
1683 if (outp->clr.mask) {
1684 help->disable(encoder);
1685 interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
1686 if (outp->flush_disable) {
1687 nv50_disp_atomic_commit_core(drm, interlock);
1688 memset(interlock, 0x00, sizeof(interlock));
1689 }
1690 }
1691 }
1692
1693 /* Flush disable. */
1694 if (interlock[NV50_DISP_INTERLOCK_CORE]) {
1695 if (atom->flush_disable) {
1696 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1697 struct nv50_wndw *wndw = nv50_wndw(plane);
1698 if (interlock[wndw->interlock.type] & wndw->interlock.data) {
1699 if (wndw->func->update)
1700 wndw->func->update(wndw, interlock);
1701 }
1702 }
1703
1704 nv50_disp_atomic_commit_core(drm, interlock);
1705 memset(interlock, 0x00, sizeof(interlock));
1706 }
1707 }
1708
1709 /* Update output path(s). */
1710 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
1711 const struct drm_encoder_helper_funcs *help;
1712 struct drm_encoder *encoder;
1713
1714 encoder = outp->encoder;
1715 help = encoder->helper_private;
1716
1717 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
1718 outp->set.mask, outp->clr.mask);
1719
1720 if (outp->set.mask) {
1721 help->enable(encoder);
1722 interlock[NV50_DISP_INTERLOCK_CORE] = 1;
1723 }
1724
1725 list_del(&outp->head);
1726 kfree(outp);
1727 }
1728
1729 /* Update head(s). */
1730 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1731 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
1732 struct nv50_head *head = nv50_head(crtc);
1733
1734 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
1735 asyh->set.mask, asyh->clr.mask);
1736
1737 if (asyh->set.mask) {
1738 nv50_head_flush_set(head, asyh);
1739 interlock[NV50_DISP_INTERLOCK_CORE] = 1;
1740 }
1741
1742 if (new_crtc_state->active) {
1743 if (!old_crtc_state->active)
1744 drm_crtc_vblank_on(crtc);
1745 if (new_crtc_state->event)
1746 drm_crtc_vblank_get(crtc);
1747 }
1748 }
1749
1750 /* Update plane(s). */
1751 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1752 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
1753 struct nv50_wndw *wndw = nv50_wndw(plane);
1754
1755 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
1756 asyw->set.mask, asyw->clr.mask);
1757 if ( !asyw->set.mask &&
1758 (!asyw->clr.mask || atom->flush_disable))
1759 continue;
1760
1761 nv50_wndw_flush_set(wndw, interlock, asyw);
1762 }
1763
1764 /* Flush update. */
1765 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1766 struct nv50_wndw *wndw = nv50_wndw(plane);
1767 if (interlock[wndw->interlock.type] & wndw->interlock.data) {
1768 if (wndw->func->update)
1769 wndw->func->update(wndw, interlock);
1770 }
1771 }
1772
1773 if (interlock[NV50_DISP_INTERLOCK_CORE]) {
1774 if (interlock[NV50_DISP_INTERLOCK_BASE] ||
1775 !atom->state.legacy_cursor_update)
1776 nv50_disp_atomic_commit_core(drm, interlock);
1777 else
1778 disp->core->func->update(disp->core, interlock, false);
1779 }
1780
1781 if (atom->lock_core)
1782 mutex_unlock(&disp->mutex);
1783
1784 /* Wait for HW to signal completion. */
1785 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1786 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
1787 struct nv50_wndw *wndw = nv50_wndw(plane);
1788 int ret = nv50_wndw_wait_armed(wndw, asyw);
1789 if (ret)
1790 NV_ERROR(drm, "%s: timeout\n", plane->name);
1791 }
1792
1793 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1794 if (new_crtc_state->event) {
1795 unsigned long flags;
1796 /* Get correct count/ts if racing with vblank irq */
1797 if (new_crtc_state->active)
1798 drm_crtc_accurate_vblank_count(crtc);
1799 spin_lock_irqsave(&crtc->dev->event_lock, flags);
1800 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
1801 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
1802
1803 new_crtc_state->event = NULL;
1804 if (new_crtc_state->active)
1805 drm_crtc_vblank_put(crtc);
1806 }
1807 }
1808
1809 drm_atomic_helper_commit_hw_done(state);
1810 drm_atomic_helper_cleanup_planes(dev, state);
1811 drm_atomic_helper_commit_cleanup_done(state);
1812 drm_atomic_state_put(state);
1813}
1814
1815static void
1816nv50_disp_atomic_commit_work(struct work_struct *work)
1817{
1818 struct drm_atomic_state *state =
1819 container_of(work, typeof(*state), commit_work);
1820 nv50_disp_atomic_commit_tail(state);
1821}
1822
1823static int
1824nv50_disp_atomic_commit(struct drm_device *dev,
1825 struct drm_atomic_state *state, bool nonblock)
1826{
1827 struct nouveau_drm *drm = nouveau_drm(dev);
1828 struct drm_plane_state *new_plane_state;
1829 struct drm_plane *plane;
1830 struct drm_crtc *crtc;
1831 bool active = false;
1832 int ret, i;
1833
1834 ret = pm_runtime_get_sync(dev->dev);
1835 if (ret < 0 && ret != -EACCES)
1836 return ret;
1837
1838 ret = drm_atomic_helper_setup_commit(state, nonblock);
1839 if (ret)
1840 goto done;
1841
1842 INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
1843
1844 ret = drm_atomic_helper_prepare_planes(dev, state);
1845 if (ret)
1846 goto done;
1847
1848 if (!nonblock) {
1849 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1850 if (ret)
1851 goto err_cleanup;
1852 }
1853
1854 ret = drm_atomic_helper_swap_state(state, true);
1855 if (ret)
1856 goto err_cleanup;
1857
1858 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1859 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
1860 struct nv50_wndw *wndw = nv50_wndw(plane);
1861
1862 if (asyw->set.image)
1863 nv50_wndw_ntfy_enable(wndw, asyw);
1864 }
1865
1866 drm_atomic_state_get(state);
1867
1868 if (nonblock)
1869 queue_work(system_unbound_wq, &state->commit_work);
1870 else
1871 nv50_disp_atomic_commit_tail(state);
1872
1873 drm_for_each_crtc(crtc, dev) {
1874 if (crtc->state->enable) {
1875 if (!drm->have_disp_power_ref) {
1876 drm->have_disp_power_ref = true;
1877 return 0;
1878 }
1879 active = true;
1880 break;
1881 }
1882 }
1883
1884 if (!active && drm->have_disp_power_ref) {
1885 pm_runtime_put_autosuspend(dev->dev);
1886 drm->have_disp_power_ref = false;
1887 }
1888
1889err_cleanup:
1890 if (ret)
1891 drm_atomic_helper_cleanup_planes(dev, state);
1892done:
1893 pm_runtime_put_autosuspend(dev->dev);
1894 return ret;
1895}
1896
1897static struct nv50_outp_atom *
1898nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
1899{
1900 struct nv50_outp_atom *outp;
1901
1902 list_for_each_entry(outp, &atom->outp, head) {
1903 if (outp->encoder == encoder)
1904 return outp;
1905 }
1906
1907 outp = kzalloc(sizeof(*outp), GFP_KERNEL);
1908 if (!outp)
1909 return ERR_PTR(-ENOMEM);
1910
1911 list_add(&outp->head, &atom->outp);
1912 outp->encoder = encoder;
1913 return outp;
1914}
1915
1916static int
1917nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
1918 struct drm_connector_state *old_connector_state)
1919{
1920 struct drm_encoder *encoder = old_connector_state->best_encoder;
1921 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1922 struct drm_crtc *crtc;
1923 struct nv50_outp_atom *outp;
1924
1925 if (!(crtc = old_connector_state->crtc))
1926 return 0;
1927
1928 old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
1929 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
1930 if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
1931 outp = nv50_disp_outp_atomic_add(atom, encoder);
1932 if (IS_ERR(outp))
1933 return PTR_ERR(outp);
1934
1935 if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
1936 outp->flush_disable = true;
1937 atom->flush_disable = true;
1938 }
1939 outp->clr.ctrl = true;
1940 atom->lock_core = true;
1941 }
1942
1943 return 0;
1944}
1945
1946static int
1947nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
1948 struct drm_connector_state *connector_state)
1949{
1950 struct drm_encoder *encoder = connector_state->best_encoder;
1951 struct drm_crtc_state *new_crtc_state;
1952 struct drm_crtc *crtc;
1953 struct nv50_outp_atom *outp;
1954
1955 if (!(crtc = connector_state->crtc))
1956 return 0;
1957
1958 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
1959 if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
1960 outp = nv50_disp_outp_atomic_add(atom, encoder);
1961 if (IS_ERR(outp))
1962 return PTR_ERR(outp);
1963
1964 outp->set.ctrl = true;
1965 atom->lock_core = true;
1966 }
1967
1968 return 0;
1969}
1970
1971static int
1972nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
1973{
1974 struct nv50_atom *atom = nv50_atom(state);
1975 struct drm_connector_state *old_connector_state, *new_connector_state;
1976 struct drm_connector *connector;
1977 struct drm_crtc_state *new_crtc_state;
1978 struct drm_crtc *crtc;
1979 int ret, i;
1980
1981 /* We need to handle colour management on a per-plane basis. */
1982 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1983 if (new_crtc_state->color_mgmt_changed) {
1984 ret = drm_atomic_add_affected_planes(state, crtc);
1985 if (ret)
1986 return ret;
1987 }
1988 }
1989
1990 ret = drm_atomic_helper_check(dev, state);
1991 if (ret)
1992 return ret;
1993
1994 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
1995 ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
1996 if (ret)
1997 return ret;
1998
1999 ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
2000 if (ret)
2001 return ret;
2002 }
2003
2004 return 0;
2005}
2006
2007static void
2008nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
2009{
2010 struct nv50_atom *atom = nv50_atom(state);
2011 struct nv50_outp_atom *outp, *outt;
2012
2013 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
2014 list_del(&outp->head);
2015 kfree(outp);
2016 }
2017
2018 drm_atomic_state_default_clear(state);
2019}
2020
2021static void
2022nv50_disp_atomic_state_free(struct drm_atomic_state *state)
2023{
2024 struct nv50_atom *atom = nv50_atom(state);
2025 drm_atomic_state_default_release(&atom->state);
2026 kfree(atom);
2027}
2028
2029static struct drm_atomic_state *
2030nv50_disp_atomic_state_alloc(struct drm_device *dev)
2031{
2032 struct nv50_atom *atom;
2033 if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
2034 drm_atomic_state_init(dev, &atom->state) < 0) {
2035 kfree(atom);
2036 return NULL;
2037 }
2038 INIT_LIST_HEAD(&atom->outp);
2039 return &atom->state;
2040}
2041
2042static const struct drm_mode_config_funcs
2043nv50_disp_func = {
2044 .fb_create = nouveau_user_framebuffer_create,
2045 .output_poll_changed = drm_fb_helper_output_poll_changed,
2046 .atomic_check = nv50_disp_atomic_check,
2047 .atomic_commit = nv50_disp_atomic_commit,
2048 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
2049 .atomic_state_clear = nv50_disp_atomic_state_clear,
2050 .atomic_state_free = nv50_disp_atomic_state_free,
2051};
2052
2053/******************************************************************************
2054 * Init
2055 *****************************************************************************/
2056
2057void
2058nv50_display_fini(struct drm_device *dev)
2059{
2060 struct nouveau_encoder *nv_encoder;
2061 struct drm_encoder *encoder;
2062 struct drm_plane *plane;
2063
2064 drm_for_each_plane(plane, dev) {
2065 struct nv50_wndw *wndw = nv50_wndw(plane);
2066 if (plane->funcs != &nv50_wndw)
2067 continue;
2068 nv50_wndw_fini(wndw);
2069 }
2070
2071 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2072 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
2073 nv_encoder = nouveau_encoder(encoder);
2074 nv50_mstm_fini(nv_encoder->dp.mstm);
2075 }
2076 }
2077}
2078
2079int
2080nv50_display_init(struct drm_device *dev)
2081{
2082 struct nv50_core *core = nv50_disp(dev)->core;
2083 struct drm_encoder *encoder;
2084 struct drm_plane *plane;
2085
2086 core->func->init(core);
2087
2088 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2089 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
2090 struct nouveau_encoder *nv_encoder =
2091 nouveau_encoder(encoder);
2092 nv50_mstm_init(nv_encoder->dp.mstm);
2093 }
2094 }
2095
2096 drm_for_each_plane(plane, dev) {
2097 struct nv50_wndw *wndw = nv50_wndw(plane);
2098 if (plane->funcs != &nv50_wndw)
2099 continue;
2100 nv50_wndw_init(wndw);
2101 }
2102
2103 return 0;
2104}
2105
2106void
2107nv50_display_destroy(struct drm_device *dev)
2108{
2109 struct nv50_disp *disp = nv50_disp(dev);
2110
2111 nv50_core_del(&disp->core);
2112
2113 nouveau_bo_unmap(disp->sync);
2114 if (disp->sync)
2115 nouveau_bo_unpin(disp->sync);
2116 nouveau_bo_ref(NULL, &disp->sync);
2117
2118 nouveau_display(dev)->priv = NULL;
2119 kfree(disp);
2120}
2121
2122MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
2123static int nouveau_atomic = 0;
2124module_param_named(atomic, nouveau_atomic, int, 0400);
2125
2126int
2127nv50_display_create(struct drm_device *dev)
2128{
2129 struct nvif_device *device = &nouveau_drm(dev)->client.device;
2130 struct nouveau_drm *drm = nouveau_drm(dev);
2131 struct dcb_table *dcb = &drm->vbios.dcb;
2132 struct drm_connector *connector, *tmp;
2133 struct nv50_disp *disp;
2134 struct dcb_output *dcbe;
2135 int crtcs, ret, i;
2136
2137 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
2138 if (!disp)
2139 return -ENOMEM;
2140
2141 mutex_init(&disp->mutex);
2142
2143 nouveau_display(dev)->priv = disp;
2144 nouveau_display(dev)->dtor = nv50_display_destroy;
2145 nouveau_display(dev)->init = nv50_display_init;
2146 nouveau_display(dev)->fini = nv50_display_fini;
2147 disp->disp = &nouveau_display(dev)->disp;
2148 dev->mode_config.funcs = &nv50_disp_func;
2149 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
2150 if (nouveau_atomic)
2151 dev->driver->driver_features |= DRIVER_ATOMIC;
2152
2153 /* small shared memory area we use for notifiers and semaphores */
2154 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
2155 0, 0x0000, NULL, NULL, &disp->sync);
2156 if (!ret) {
2157 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
2158 if (!ret) {
2159 ret = nouveau_bo_map(disp->sync);
2160 if (ret)
2161 nouveau_bo_unpin(disp->sync);
2162 }
2163 if (ret)
2164 nouveau_bo_ref(NULL, &disp->sync);
2165 }
2166
2167 if (ret)
2168 goto out;
2169
2170 /* allocate master evo channel */
2171 ret = nv50_core_new(drm, &disp->core);
2172 if (ret)
2173 goto out;
2174
2175 /* create crtc objects to represent the hw heads */
2176 if (disp->disp->object.oclass >= GV100_DISP)
2177 crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
2178 else
2179 if (disp->disp->object.oclass >= GF110_DISP)
2180 crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
2181 else
2182 crtcs = 0x3;
2183
2184 for (i = 0; i < fls(crtcs); i++) {
2185 if (!(crtcs & (1 << i)))
2186 continue;
2187 ret = nv50_head_create(dev, i);
2188 if (ret)
2189 goto out;
2190 }
2191
2192 /* create encoder/connector objects based on VBIOS DCB table */
2193 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
2194 connector = nouveau_connector_create(dev, dcbe->connector);
2195 if (IS_ERR(connector))
2196 continue;
2197
2198 if (dcbe->location == DCB_LOC_ON_CHIP) {
2199 switch (dcbe->type) {
2200 case DCB_OUTPUT_TMDS:
2201 case DCB_OUTPUT_LVDS:
2202 case DCB_OUTPUT_DP:
2203 ret = nv50_sor_create(connector, dcbe);
2204 break;
2205 case DCB_OUTPUT_ANALOG:
2206 ret = nv50_dac_create(connector, dcbe);
2207 break;
2208 default:
2209 ret = -ENODEV;
2210 break;
2211 }
2212 } else {
2213 ret = nv50_pior_create(connector, dcbe);
2214 }
2215
2216 if (ret) {
2217 NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
2218 dcbe->location, dcbe->type,
2219 ffs(dcbe->or) - 1, ret);
2220 ret = 0;
2221 }
2222 }
2223
2224 /* cull any connectors we created that don't have an encoder */
2225 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
2226 if (connector->encoder_ids[0])
2227 continue;
2228
2229 NV_WARN(drm, "%s has no encoders, removing\n",
2230 connector->name);
2231 connector->funcs->destroy(connector);
2232 }
2233
2234out:
2235 if (ret)
2236 nv50_display_destroy(dev);
2237 return ret;
2238}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
new file mode 100644
index 000000000000..e48c5eb35b49
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -0,0 +1,89 @@
1#ifndef __NV50_KMS_H__
2#define __NV50_KMS_H__
3#include <nvif/mem.h>
4
5#include "nouveau_display.h"
6
7struct nv50_disp {
8 struct nvif_disp *disp;
9 struct nv50_core *core;
10
11#define NV50_DISP_SYNC(c, o) ((c) * 0x040 + (o))
12#define NV50_DISP_CORE_NTFY NV50_DISP_SYNC(0 , 0x00)
13#define NV50_DISP_WNDW_SEM0(c) NV50_DISP_SYNC(1 + (c), 0x00)
14#define NV50_DISP_WNDW_SEM1(c) NV50_DISP_SYNC(1 + (c), 0x10)
15#define NV50_DISP_WNDW_NTFY(c) NV50_DISP_SYNC(1 + (c), 0x20)
16#define NV50_DISP_BASE_SEM0(c) NV50_DISP_WNDW_SEM0(0 + (c))
17#define NV50_DISP_BASE_SEM1(c) NV50_DISP_WNDW_SEM1(0 + (c))
18#define NV50_DISP_BASE_NTFY(c) NV50_DISP_WNDW_NTFY(0 + (c))
19#define NV50_DISP_OVLY_SEM0(c) NV50_DISP_WNDW_SEM0(4 + (c))
20#define NV50_DISP_OVLY_SEM1(c) NV50_DISP_WNDW_SEM1(4 + (c))
21#define NV50_DISP_OVLY_NTFY(c) NV50_DISP_WNDW_NTFY(4 + (c))
22 struct nouveau_bo *sync;
23
24 struct mutex mutex;
25};
26
27static inline struct nv50_disp *
28nv50_disp(struct drm_device *dev)
29{
30 return nouveau_display(dev)->priv;
31}
32
33struct nv50_disp_interlock {
34 enum nv50_disp_interlock_type {
35 NV50_DISP_INTERLOCK_CORE = 0,
36 NV50_DISP_INTERLOCK_CURS,
37 NV50_DISP_INTERLOCK_BASE,
38 NV50_DISP_INTERLOCK_OVLY,
39 NV50_DISP_INTERLOCK_WNDW,
40 NV50_DISP_INTERLOCK_WIMM,
41 NV50_DISP_INTERLOCK__SIZE
42 } type;
43 u32 data;
44};
45
46void corec37d_ntfy_init(struct nouveau_bo *, u32);
47
48struct nv50_chan {
49 struct nvif_object user;
50 struct nvif_device *device;
51};
52
53struct nv50_dmac {
54 struct nv50_chan base;
55
56 struct nvif_mem push;
57 u32 *ptr;
58
59 struct nvif_object sync;
60 struct nvif_object vram;
61
62 /* Protects against concurrent pushbuf access to this channel, lock is
63 * grabbed by evo_wait (if the pushbuf reservation is successful) and
64 * dropped again by evo_kick. */
65 struct mutex lock;
66};
67
68int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
69 const s32 *oclass, u8 head, void *data, u32 size,
70 u64 syncbuf, struct nv50_dmac *dmac);
71void nv50_dmac_destroy(struct nv50_dmac *);
72
73u32 *evo_wait(struct nv50_dmac *, int nr);
74void evo_kick(u32 *, struct nv50_dmac *);
75
76#define evo_mthd(p, m, s) do { \
77 const u32 _m = (m), _s = (s); \
78 if (drm_debug & DRM_UT_KMS) \
79 pr_err("%04x %d %s\n", _m, _s, __func__); \
80 *((p)++) = ((_s << 18) | _m); \
81} while(0)
82
83#define evo_data(p, d) do { \
84 const u32 _d = (d); \
85 if (drm_debug & DRM_UT_KMS) \
86 pr_err("\t%08x\n", _d); \
87 *((p)++) = _d; \
88} while(0)
89#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
new file mode 100644
index 000000000000..4f57e5379796
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -0,0 +1,511 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "head.h"
23#include "base.h"
24#include "core.h"
25#include "curs.h"
26#include "ovly.h"
27
28#include <nvif/class.h>
29
30#include <drm/drm_atomic_helper.h>
31#include <drm/drm_crtc_helper.h>
32#include "nouveau_connector.h"
33void
34nv50_head_flush_clr(struct nv50_head *head,
35 struct nv50_head_atom *asyh, bool flush)
36{
37 union nv50_head_atom_mask clr = {
38 .mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
39 };
40 if (clr.olut) head->func->olut_clr(head);
41 if (clr.core) head->func->core_clr(head);
42 if (clr.curs) head->func->curs_clr(head);
43}
44
45void
46nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
47{
48 if (asyh->set.view ) head->func->view (head, asyh);
49 if (asyh->set.mode ) head->func->mode (head, asyh);
50 if (asyh->set.core ) head->func->core_set(head, asyh);
51 if (asyh->set.olut ) {
52 asyh->olut.offset = nv50_lut_load(&head->olut,
53 asyh->olut.mode <= 1,
54 asyh->olut.buffer,
55 asyh->state.gamma_lut);
56 head->func->olut_set(head, asyh);
57 }
58 if (asyh->set.curs ) head->func->curs_set(head, asyh);
59 if (asyh->set.base ) head->func->base (head, asyh);
60 if (asyh->set.ovly ) head->func->ovly (head, asyh);
61 if (asyh->set.dither ) head->func->dither (head, asyh);
62 if (asyh->set.procamp) head->func->procamp (head, asyh);
63 if (asyh->set.or ) head->func->or (head, asyh);
64}
65
66static void
67nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
68 struct nv50_head_atom *asyh,
69 struct nouveau_conn_atom *asyc)
70{
71 const int vib = asyc->procamp.color_vibrance - 100;
72 const int hue = asyc->procamp.vibrant_hue - 90;
73 const int adj = (vib > 0) ? 50 : 0;
74 asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
75 asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
76 asyh->set.procamp = true;
77}
78
79static void
80nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
81 struct nv50_head_atom *asyh,
82 struct nouveau_conn_atom *asyc)
83{
84 struct drm_connector *connector = asyc->state.connector;
85 u32 mode = 0x00;
86
87 if (asyc->dither.mode == DITHERING_MODE_AUTO) {
88 if (asyh->base.depth > connector->display_info.bpc * 3)
89 mode = DITHERING_MODE_DYNAMIC2X2;
90 } else {
91 mode = asyc->dither.mode;
92 }
93
94 if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
95 if (connector->display_info.bpc >= 8)
96 mode |= DITHERING_DEPTH_8BPC;
97 } else {
98 mode |= asyc->dither.depth;
99 }
100
101 asyh->dither.enable = mode;
102 asyh->dither.bits = mode >> 1;
103 asyh->dither.mode = mode >> 3;
104 asyh->set.dither = true;
105}
106
107static void
108nv50_head_atomic_check_view(struct nv50_head_atom *armh,
109 struct nv50_head_atom *asyh,
110 struct nouveau_conn_atom *asyc)
111{
112 struct drm_connector *connector = asyc->state.connector;
113 struct drm_display_mode *omode = &asyh->state.adjusted_mode;
114 struct drm_display_mode *umode = &asyh->state.mode;
115 int mode = asyc->scaler.mode;
116 struct edid *edid;
117 int umode_vdisplay, omode_hdisplay, omode_vdisplay;
118
119 if (connector->edid_blob_ptr)
120 edid = (struct edid *)connector->edid_blob_ptr->data;
121 else
122 edid = NULL;
123
124 if (!asyc->scaler.full) {
125 if (mode == DRM_MODE_SCALE_NONE)
126 omode = umode;
127 } else {
128 /* Non-EDID LVDS/eDP mode. */
129 mode = DRM_MODE_SCALE_FULLSCREEN;
130 }
131
132 /* For the user-specified mode, we must ignore doublescan and
133 * the like, but honor frame packing.
134 */
135 umode_vdisplay = umode->vdisplay;
136 if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
137 umode_vdisplay += umode->vtotal;
138 asyh->view.iW = umode->hdisplay;
139 asyh->view.iH = umode_vdisplay;
140 /* For the output mode, we can just use the stock helper. */
141 drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
142 asyh->view.oW = omode_hdisplay;
143 asyh->view.oH = omode_vdisplay;
144
145 /* Add overscan compensation if necessary, will keep the aspect
146 * ratio the same as the backend mode unless overridden by the
147 * user setting both hborder and vborder properties.
148 */
149 if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
150 (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
151 drm_detect_hdmi_monitor(edid)))) {
152 u32 bX = asyc->scaler.underscan.hborder;
153 u32 bY = asyc->scaler.underscan.vborder;
154 u32 r = (asyh->view.oH << 19) / asyh->view.oW;
155
156 if (bX) {
157 asyh->view.oW -= (bX * 2);
158 if (bY) asyh->view.oH -= (bY * 2);
159 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
160 } else {
161 asyh->view.oW -= (asyh->view.oW >> 4) + 32;
162 if (bY) asyh->view.oH -= (bY * 2);
163 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
164 }
165 }
166
167 /* Handle CENTER/ASPECT scaling, taking into account the areas
168 * removed already for overscan compensation.
169 */
170 switch (mode) {
171 case DRM_MODE_SCALE_CENTER:
172 asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
173 asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
174 /* fall-through */
175 case DRM_MODE_SCALE_ASPECT:
176 if (asyh->view.oH < asyh->view.oW) {
177 u32 r = (asyh->view.iW << 19) / asyh->view.iH;
178 asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
179 } else {
180 u32 r = (asyh->view.iH << 19) / asyh->view.iW;
181 asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
182 }
183 break;
184 default:
185 break;
186 }
187
188 asyh->set.view = true;
189}
190
191static int
192nv50_head_atomic_check_lut(struct nv50_head *head,
193 struct nv50_head_atom *asyh)
194{
195 struct nv50_disp *disp = nv50_disp(head->base.base.dev);
196 struct drm_property_blob *olut = asyh->state.gamma_lut;
197
198 /* Determine whether core output LUT should be enabled. */
199 if (olut) {
200 /* Check if any window(s) have stolen the core output LUT
201 * to as an input LUT for legacy gamma + I8 colour format.
202 */
203 if (asyh->wndw.olut) {
204 /* If any window has stolen the core output LUT,
205 * all of them must.
206 */
207 if (asyh->wndw.olut != asyh->wndw.mask)
208 return -EINVAL;
209 olut = NULL;
210 }
211 }
212
213 if (!olut) {
214 asyh->olut.handle = 0;
215 return 0;
216 }
217
218 asyh->olut.handle = disp->core->chan.vram.handle;
219 asyh->olut.buffer = !asyh->olut.buffer;
220 head->func->olut(head, asyh);
221 return 0;
222}
223
224static void
225nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
226{
227 struct drm_display_mode *mode = &asyh->state.adjusted_mode;
228 struct nv50_head_mode *m = &asyh->mode;
229 u32 blankus;
230
231 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
232
233 /*
234 * DRM modes are defined in terms of a repeating interval
235 * starting with the active display area. The hardware modes
236 * are defined in terms of a repeating interval starting one
237 * unit (pixel or line) into the sync pulse. So, add bias.
238 */
239
240 m->h.active = mode->crtc_htotal;
241 m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
242 m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
243 m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
244
245 m->v.active = mode->crtc_vtotal;
246 m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
247 m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
248 m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
249
250 /*XXX: Safe underestimate, even "0" works */
251 blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
252 blankus *= 1000;
253 blankus /= mode->crtc_clock;
254 m->v.blankus = blankus;
255
256 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
257 m->v.blank2e = m->v.active + m->v.blanke;
258 m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay;
259 m->v.active = (m->v.active * 2) + 1;
260 m->interlace = true;
261 } else {
262 m->v.blank2e = 0;
263 m->v.blank2s = 1;
264 m->interlace = false;
265 }
266 m->clock = mode->crtc_clock;
267
268 asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
269 asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
270 asyh->set.or = head->func->or != NULL;
271 asyh->set.mode = true;
272}
273
274static int
275nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
276{
277 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
278 struct nv50_head *head = nv50_head(crtc);
279 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
280 struct nv50_head_atom *asyh = nv50_head_atom(state);
281 struct nouveau_conn_atom *asyc = NULL;
282 struct drm_connector_state *conns;
283 struct drm_connector *conn;
284 int i;
285
286 NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
287 if (asyh->state.active) {
288 for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
289 if (conns->crtc == crtc) {
290 asyc = nouveau_conn_atom(conns);
291 break;
292 }
293 }
294
295 if (armh->state.active) {
296 if (asyc) {
297 if (asyh->state.mode_changed)
298 asyc->set.scaler = true;
299 if (armh->base.depth != asyh->base.depth)
300 asyc->set.dither = true;
301 }
302 } else {
303 if (asyc)
304 asyc->set.mask = ~0;
305 asyh->set.mask = ~0;
306 asyh->set.or = head->func->or != NULL;
307 }
308
309 if (asyh->state.mode_changed)
310 nv50_head_atomic_check_mode(head, asyh);
311
312 if (asyh->state.color_mgmt_changed ||
313 memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw))) {
314 int ret = nv50_head_atomic_check_lut(head, asyh);
315 if (ret)
316 return ret;
317
318 asyh->olut.visible = asyh->olut.handle != 0;
319 }
320
321 if (asyc) {
322 if (asyc->set.scaler)
323 nv50_head_atomic_check_view(armh, asyh, asyc);
324 if (asyc->set.dither)
325 nv50_head_atomic_check_dither(armh, asyh, asyc);
326 if (asyc->set.procamp)
327 nv50_head_atomic_check_procamp(armh, asyh, asyc);
328 }
329
330 if (head->func->core_calc) {
331 head->func->core_calc(head, asyh);
332 if (!asyh->core.visible)
333 asyh->olut.visible = false;
334 }
335
336 asyh->set.base = armh->base.cpp != asyh->base.cpp;
337 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
338 } else {
339 asyh->olut.visible = false;
340 asyh->core.visible = false;
341 asyh->curs.visible = false;
342 asyh->base.cpp = 0;
343 asyh->ovly.cpp = 0;
344 }
345
346 if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
347 if (asyh->core.visible) {
348 if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
349 asyh->set.core = true;
350 } else
351 if (armh->core.visible) {
352 asyh->clr.core = true;
353 }
354
355 if (asyh->curs.visible) {
356 if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
357 asyh->set.curs = true;
358 } else
359 if (armh->curs.visible) {
360 asyh->clr.curs = true;
361 }
362
363 if (asyh->olut.visible) {
364 if (memcmp(&armh->olut, &asyh->olut, sizeof(asyh->olut)))
365 asyh->set.olut = true;
366 } else
367 if (armh->olut.visible) {
368 asyh->clr.olut = true;
369 }
370 } else {
371 asyh->clr.olut = armh->olut.visible;
372 asyh->clr.core = armh->core.visible;
373 asyh->clr.curs = armh->curs.visible;
374 asyh->set.olut = asyh->olut.visible;
375 asyh->set.core = asyh->core.visible;
376 asyh->set.curs = asyh->curs.visible;
377 }
378
379 if (asyh->clr.mask || asyh->set.mask)
380 nv50_atom(asyh->state.state)->lock_core = true;
381 return 0;
382}
383
384static const struct drm_crtc_helper_funcs
385nv50_head_help = {
386 .atomic_check = nv50_head_atomic_check,
387};
388
389static void
390nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
391 struct drm_crtc_state *state)
392{
393 struct nv50_head_atom *asyh = nv50_head_atom(state);
394 __drm_atomic_helper_crtc_destroy_state(&asyh->state);
395 kfree(asyh);
396}
397
398static struct drm_crtc_state *
399nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
400{
401 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
402 struct nv50_head_atom *asyh;
403 if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
404 return NULL;
405 __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
406 asyh->wndw = armh->wndw;
407 asyh->view = armh->view;
408 asyh->mode = armh->mode;
409 asyh->olut = armh->olut;
410 asyh->core = armh->core;
411 asyh->curs = armh->curs;
412 asyh->base = armh->base;
413 asyh->ovly = armh->ovly;
414 asyh->dither = armh->dither;
415 asyh->procamp = armh->procamp;
416 asyh->clr.mask = 0;
417 asyh->set.mask = 0;
418 return &asyh->state;
419}
420
421static void
422__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
423 struct drm_crtc_state *state)
424{
425 if (crtc->state)
426 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
427 crtc->state = state;
428 crtc->state->crtc = crtc;
429}
430
431static void
432nv50_head_reset(struct drm_crtc *crtc)
433{
434 struct nv50_head_atom *asyh;
435
436 if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
437 return;
438
439 __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
440}
441
442static void
443nv50_head_destroy(struct drm_crtc *crtc)
444{
445 struct nv50_head *head = nv50_head(crtc);
446 nv50_lut_fini(&head->olut);
447 drm_crtc_cleanup(crtc);
448 kfree(head);
449}
450
451static const struct drm_crtc_funcs
452nv50_head_func = {
453 .reset = nv50_head_reset,
454 .gamma_set = drm_atomic_helper_legacy_gamma_set,
455 .destroy = nv50_head_destroy,
456 .set_config = drm_atomic_helper_set_config,
457 .page_flip = drm_atomic_helper_page_flip,
458 .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
459 .atomic_destroy_state = nv50_head_atomic_destroy_state,
460};
461
462int
463nv50_head_create(struct drm_device *dev, int index)
464{
465 struct nouveau_drm *drm = nouveau_drm(dev);
466 struct nv50_disp *disp = nv50_disp(dev);
467 struct nv50_head *head;
468 struct nv50_wndw *curs, *wndw;
469 struct drm_crtc *crtc;
470 int ret;
471
472 head = kzalloc(sizeof(*head), GFP_KERNEL);
473 if (!head)
474 return -ENOMEM;
475
476 head->func = disp->core->func->head;
477 head->base.index = index;
478
479 if (disp->disp->object.oclass < GV100_DISP) {
480 ret = nv50_ovly_new(drm, head->base.index, &wndw);
481 ret = nv50_base_new(drm, head->base.index, &wndw);
482 } else {
483 ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_OVERLAY,
484 head->base.index * 2 + 1, &wndw);
485 ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_PRIMARY,
486 head->base.index * 2 + 0, &wndw);
487 }
488 if (ret == 0)
489 ret = nv50_curs_new(drm, head->base.index, &curs);
490 if (ret) {
491 kfree(head);
492 return ret;
493 }
494
495 crtc = &head->base.base;
496 drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane,
497 &nv50_head_func, "head-%d", head->base.index);
498 drm_crtc_helper_add(crtc, &nv50_head_help);
499 drm_mode_crtc_set_gamma_size(crtc, 256);
500
501 if (head->func->olut_set) {
502 ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
503 if (ret)
504 goto out;
505 }
506
507out:
508 if (ret)
509 nv50_head_destroy(crtc);
510 return ret;
511}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
new file mode 100644
index 000000000000..37b3248c6dae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -0,0 +1,78 @@
1#ifndef __NV50_KMS_HEAD_H__
2#define __NV50_KMS_HEAD_H__
3#define nv50_head(c) container_of((c), struct nv50_head, base.base)
4#include "disp.h"
5#include "atom.h"
6#include "lut.h"
7
8#include "nouveau_crtc.h"
9
10struct nv50_head {
11 const struct nv50_head_func *func;
12 struct nouveau_crtc base;
13 struct nv50_lut olut;
14};
15
16int nv50_head_create(struct drm_device *, int index);
17void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *);
18void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y);
19
20struct nv50_head_func {
21 void (*view)(struct nv50_head *, struct nv50_head_atom *);
22 void (*mode)(struct nv50_head *, struct nv50_head_atom *);
23 void (*olut)(struct nv50_head *, struct nv50_head_atom *);
24 void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
25 void (*olut_clr)(struct nv50_head *);
26 void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
27 void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
28 void (*core_clr)(struct nv50_head *);
29 int (*curs_layout)(struct nv50_head *, struct nv50_wndw_atom *,
30 struct nv50_head_atom *);
31 int (*curs_format)(struct nv50_head *, struct nv50_wndw_atom *,
32 struct nv50_head_atom *);
33 void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
34 void (*curs_clr)(struct nv50_head *);
35 void (*base)(struct nv50_head *, struct nv50_head_atom *);
36 void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
37 void (*dither)(struct nv50_head *, struct nv50_head_atom *);
38 void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
39 void (*or)(struct nv50_head *, struct nv50_head_atom *);
40};
41
42extern const struct nv50_head_func head507d;
43void head507d_view(struct nv50_head *, struct nv50_head_atom *);
44void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
45void head507d_olut(struct nv50_head *, struct nv50_head_atom *);
46void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
47void head507d_core_clr(struct nv50_head *);
48int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
49 struct nv50_head_atom *);
50int head507d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
51 struct nv50_head_atom *);
52void head507d_base(struct nv50_head *, struct nv50_head_atom *);
53void head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
54void head507d_dither(struct nv50_head *, struct nv50_head_atom *);
55void head507d_procamp(struct nv50_head *, struct nv50_head_atom *);
56
57extern const struct nv50_head_func head827d;
58
59extern const struct nv50_head_func head907d;
60void head907d_view(struct nv50_head *, struct nv50_head_atom *);
61void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
62void head907d_olut(struct nv50_head *, struct nv50_head_atom *);
63void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
64void head907d_olut_clr(struct nv50_head *);
65void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
66void head907d_core_clr(struct nv50_head *);
67void head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
68void head907d_curs_clr(struct nv50_head *);
69void head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
70void head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
71void head907d_or(struct nv50_head *, struct nv50_head_atom *);
72
73extern const struct nv50_head_func head917d;
74int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
75 struct nv50_head_atom *);
76
77extern const struct nv50_head_func headc37d;
78#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
new file mode 100644
index 000000000000..51bc5996fd37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -0,0 +1,325 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "head.h"
23#include "core.h"
24
25void
26head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
27{
28 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
29 u32 *push;
30 if ((push = evo_wait(core, 2))) {
31 evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
32 evo_data(push, asyh->procamp.sat.sin << 20 |
33 asyh->procamp.sat.cos << 8);
34 evo_kick(push, core);
35 }
36}
37
38void
39head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
40{
41 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
42 u32 *push;
43 if ((push = evo_wait(core, 2))) {
44 evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
45 evo_data(push, asyh->dither.mode << 3 |
46 asyh->dither.bits << 1 |
47 asyh->dither.enable);
48 evo_kick(push, core);
49 }
50}
51
52void
53head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
54{
55 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
56 u32 bounds = 0;
57 u32 *push;
58
59 if (asyh->ovly.cpp) {
60 switch (asyh->ovly.cpp) {
61 case 4: bounds |= 0x00000300; break;
62 case 2: bounds |= 0x00000100; break;
63 default:
64 WARN_ON(1);
65 break;
66 }
67 bounds |= 0x00000001;
68 } else {
69 bounds |= 0x00000100;
70 }
71
72 if ((push = evo_wait(core, 2))) {
73 evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
74 evo_data(push, bounds);
75 evo_kick(push, core);
76 }
77}
78
79void
80head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
81{
82 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
83 u32 bounds = 0;
84 u32 *push;
85
86 if (asyh->base.cpp) {
87 switch (asyh->base.cpp) {
88 case 8: bounds |= 0x00000500; break;
89 case 4: bounds |= 0x00000300; break;
90 case 2: bounds |= 0x00000100; break;
91 case 1: bounds |= 0x00000000; break;
92 default:
93 WARN_ON(1);
94 break;
95 }
96 bounds |= 0x00000001;
97 }
98
99 if ((push = evo_wait(core, 2))) {
100 evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
101 evo_data(push, bounds);
102 evo_kick(push, core);
103 }
104}
105
106static void
107head507d_curs_clr(struct nv50_head *head)
108{
109 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
110 u32 *push;
111 if ((push = evo_wait(core, 2))) {
112 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
113 evo_data(push, 0x05000000);
114 evo_kick(push, core);
115 }
116}
117
118static void
119head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
120{
121 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
122 u32 *push;
123 if ((push = evo_wait(core, 3))) {
124 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
125 evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
126 asyh->curs.format << 24);
127 evo_data(push, asyh->curs.offset >> 8);
128 evo_kick(push, core);
129 }
130}
131
132int
133head507d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
134 struct nv50_head_atom *asyh)
135{
136 switch (asyw->image.format) {
137 case 0xcf: asyh->curs.format = 1; break;
138 default:
139 WARN_ON(1);
140 return -EINVAL;
141 }
142 return 0;
143}
144
145int
146head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
147 struct nv50_head_atom *asyh)
148{
149 switch (asyw->image.w) {
150 case 32: asyh->curs.layout = 0; break;
151 case 64: asyh->curs.layout = 1; break;
152 default:
153 return -EINVAL;
154 }
155 return 0;
156}
157
158void
159head507d_core_clr(struct nv50_head *head)
160{
161 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
162 u32 *push;
163 if ((push = evo_wait(core, 2))) {
164 evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
165 evo_data(push, 0x00000000);
166 evo_kick(push, core);
167 }
168}
169
170static void
171head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
172{
173 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
174 u32 *push;
175 if ((push = evo_wait(core, 9))) {
176 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
177 evo_data(push, asyh->core.offset >> 8);
178 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
179 evo_data(push, asyh->core.h << 16 | asyh->core.w);
180 evo_data(push, asyh->core.layout << 20 |
181 (asyh->core.pitch >> 8) << 8 |
182 asyh->core.blocks << 8 |
183 asyh->core.blockh);
184 evo_data(push, asyh->core.kind << 16 |
185 asyh->core.format << 8);
186 evo_data(push, asyh->core.handle);
187 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
188 evo_data(push, asyh->core.y << 16 | asyh->core.x);
189 evo_kick(push, core);
190
191 /* EVO will complain with INVALID_STATE if we have an
192 * active cursor and (re)specify HeadSetContextDmaIso
193 * without also updating HeadSetOffsetCursor.
194 */
195 asyh->set.curs = asyh->curs.visible;
196 asyh->set.olut = asyh->olut.handle != 0;
197 }
198}
199
200void
201head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
202{
203 struct nv50_disp *disp = nv50_disp(head->base.base.dev);
204 if ((asyh->core.visible = (asyh->base.cpp != 0))) {
205 asyh->core.x = asyh->base.x;
206 asyh->core.y = asyh->base.y;
207 asyh->core.w = asyh->base.w;
208 asyh->core.h = asyh->base.h;
209 } else
210 if ((asyh->core.visible = (asyh->ovly.cpp != 0)) ||
211 (asyh->core.visible = asyh->curs.visible)) {
212 /*XXX: We need to either find some way of having the
213 * primary base layer appear black, while still
214 * being able to display the other layers, or we
215 * need to allocate a dummy black surface here.
216 */
217 asyh->core.x = 0;
218 asyh->core.y = 0;
219 asyh->core.w = asyh->state.mode.hdisplay;
220 asyh->core.h = asyh->state.mode.vdisplay;
221 }
222 asyh->core.handle = disp->core->chan.vram.handle;
223 asyh->core.offset = 0;
224 asyh->core.format = 0xcf;
225 asyh->core.kind = 0;
226 asyh->core.layout = 1;
227 asyh->core.blockh = 0;
228 asyh->core.blocks = 0;
229 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
230}
231
232static void
233head507d_olut_clr(struct nv50_head *head)
234{
235 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
236 u32 *push;
237 if ((push = evo_wait(core, 2))) {
238 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
239 evo_data(push, 0x00000000);
240 evo_kick(push, core);
241 }
242}
243
244static void
245head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
246{
247 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
248 u32 *push;
249 if ((push = evo_wait(core, 3))) {
250 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
251 evo_data(push, 0x80000000 | asyh->olut.mode << 30);
252 evo_data(push, asyh->olut.offset >> 8);
253 evo_kick(push, core);
254 }
255}
256
257void
258head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
259{
260 if (asyh->base.cpp == 1)
261 asyh->olut.mode = 0;
262 else
263 asyh->olut.mode = 1;
264}
265
266void
267head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
268{
269 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
270 struct nv50_head_mode *m = &asyh->mode;
271 u32 *push;
272 if ((push = evo_wait(core, 13))) {
273 evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
274 evo_data(push, 0x00800000 | m->clock);
275 evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
276 evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
277 evo_data(push, 0x00000000);
278 evo_data(push, m->v.active << 16 | m->h.active );
279 evo_data(push, m->v.synce << 16 | m->h.synce );
280 evo_data(push, m->v.blanke << 16 | m->h.blanke );
281 evo_data(push, m->v.blanks << 16 | m->h.blanks );
282 evo_data(push, m->v.blank2e << 16 | m->v.blank2s);
283 evo_data(push, asyh->mode.v.blankus);
284 evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
285 evo_data(push, 0x00000000);
286 evo_kick(push, core);
287 }
288}
289
290void
291head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
292{
293 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
294 u32 *push;
295 if ((push = evo_wait(core, 7))) {
296 evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
297 evo_data(push, 0x00000000);
298 evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
299 evo_data(push, asyh->view.iH << 16 | asyh->view.iW);
300 evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
301 evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
302 evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
303 evo_kick(push, core);
304 }
305}
306
307const struct nv50_head_func
308head507d = {
309 .view = head507d_view,
310 .mode = head507d_mode,
311 .olut = head507d_olut,
312 .olut_set = head507d_olut_set,
313 .olut_clr = head507d_olut_clr,
314 .core_calc = head507d_core_calc,
315 .core_set = head507d_core_set,
316 .core_clr = head507d_core_clr,
317 .curs_layout = head507d_curs_layout,
318 .curs_format = head507d_curs_format,
319 .curs_set = head507d_curs_set,
320 .curs_clr = head507d_curs_clr,
321 .base = head507d_base,
322 .ovly = head507d_ovly,
323 .dither = head507d_dither,
324 .procamp = head507d_procamp,
325};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
new file mode 100644
index 000000000000..af5e7bd5978b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "head.h"
23#include "core.h"
24
25static void
26head827d_curs_clr(struct nv50_head *head)
27{
28 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
29 u32 *push;
30 if ((push = evo_wait(core, 4))) {
31 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
32 evo_data(push, 0x05000000);
33 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
34 evo_data(push, 0x00000000);
35 evo_kick(push, core);
36 }
37}
38
39static void
40head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
41{
42 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
43 u32 *push;
44 if ((push = evo_wait(core, 5))) {
45 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
46 evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
47 asyh->curs.format << 24);
48 evo_data(push, asyh->curs.offset >> 8);
49 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
50 evo_data(push, asyh->curs.handle);
51 evo_kick(push, core);
52 }
53}
54
55static void
56head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
57{
58 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
59 u32 *push;
60 if ((push = evo_wait(core, 9))) {
61 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
62 evo_data(push, asyh->core.offset >> 8);
63 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
64 evo_data(push, asyh->core.h << 16 | asyh->core.w);
65 evo_data(push, asyh->core.layout << 20 |
66 (asyh->core.pitch >> 8) << 8 |
67 asyh->core.blocks << 8 |
68 asyh->core.blockh);
69 evo_data(push, asyh->core.format << 8);
70 evo_data(push, asyh->core.handle);
71 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
72 evo_data(push, asyh->core.y << 16 | asyh->core.x);
73 evo_kick(push, core);
74 }
75}
76
77static void
78head827d_olut_clr(struct nv50_head *head)
79{
80 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
81 u32 *push;
82 if ((push = evo_wait(core, 4))) {
83 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
84 evo_data(push, 0x00000000);
85 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
86 evo_data(push, 0x00000000);
87 evo_kick(push, core);
88 }
89}
90
91static void
92head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
93{
94 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
95 u32 *push;
96 if ((push = evo_wait(core, 5))) {
97 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
98 evo_data(push, 0x80000000 | asyh->olut.mode << 30);
99 evo_data(push, asyh->olut.offset >> 8);
100 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
101 evo_data(push, asyh->olut.handle);
102 evo_kick(push, core);
103 }
104}
105
106const struct nv50_head_func
107head827d = {
108 .view = head507d_view,
109 .mode = head507d_mode,
110 .olut = head507d_olut,
111 .olut_set = head827d_olut_set,
112 .olut_clr = head827d_olut_clr,
113 .core_calc = head507d_core_calc,
114 .core_set = head827d_core_set,
115 .core_clr = head507d_core_clr,
116 .curs_layout = head507d_curs_layout,
117 .curs_format = head507d_curs_format,
118 .curs_set = head827d_curs_set,
119 .curs_clr = head827d_curs_clr,
120 .base = head507d_base,
121 .ovly = head507d_ovly,
122 .dither = head507d_dither,
123 .procamp = head507d_procamp,
124};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
new file mode 100644
index 000000000000..633907163eb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -0,0 +1,284 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "head.h"
23#include "core.h"
24
25void
26head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
27{
28 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
29 u32 *push;
30 if ((push = evo_wait(core, 3))) {
31 evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
32 evo_data(push, 0x00000001 | asyh->or.depth << 6 |
33 asyh->or.nvsync << 4 |
34 asyh->or.nhsync << 3);
35 evo_data(push, 0x31ec6000 | head->base.index << 25 |
36 asyh->mode.interlace);
37 evo_kick(push, core);
38 }
39}
40
41void
42head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
43{
44 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
45 u32 *push;
46 if ((push = evo_wait(core, 2))) {
47 evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
48 evo_data(push, asyh->procamp.sat.sin << 20 |
49 asyh->procamp.sat.cos << 8);
50 evo_kick(push, core);
51 }
52}
53
54static void
55head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
56{
57 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
58 u32 *push;
59 if ((push = evo_wait(core, 2))) {
60 evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
61 evo_data(push, asyh->dither.mode << 3 |
62 asyh->dither.bits << 1 |
63 asyh->dither.enable);
64 evo_kick(push, core);
65 }
66}
67
68void
69head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
70{
71 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
72 u32 bounds = 0;
73 u32 *push;
74
75 if (asyh->ovly.cpp) {
76 switch (asyh->ovly.cpp) {
77 case 8: bounds |= 0x00000500; break;
78 case 4: bounds |= 0x00000300; break;
79 case 2: bounds |= 0x00000100; break;
80 default:
81 WARN_ON(1);
82 break;
83 }
84 bounds |= 0x00000001;
85 } else {
86 bounds |= 0x00000100;
87 }
88
89 if ((push = evo_wait(core, 2))) {
90 evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
91 evo_data(push, bounds);
92 evo_kick(push, core);
93 }
94}
95
96static void
97head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
98{
99 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
100 u32 bounds = 0;
101 u32 *push;
102
103 if (asyh->base.cpp) {
104 switch (asyh->base.cpp) {
105 case 8: bounds |= 0x00000500; break;
106 case 4: bounds |= 0x00000300; break;
107 case 2: bounds |= 0x00000100; break;
108 case 1: bounds |= 0x00000000; break;
109 default:
110 WARN_ON(1);
111 break;
112 }
113 bounds |= 0x00000001;
114 }
115
116 if ((push = evo_wait(core, 2))) {
117 evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
118 evo_data(push, bounds);
119 evo_kick(push, core);
120 }
121}
122
123void
124head907d_curs_clr(struct nv50_head *head)
125{
126 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
127 u32 *push;
128 if ((push = evo_wait(core, 4))) {
129 evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
130 evo_data(push, 0x05000000);
131 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
132 evo_data(push, 0x00000000);
133 evo_kick(push, core);
134 }
135}
136
137void
138head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
139{
140 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
141 u32 *push;
142 if ((push = evo_wait(core, 5))) {
143 evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
144 evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
145 asyh->curs.format << 24);
146 evo_data(push, asyh->curs.offset >> 8);
147 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
148 evo_data(push, asyh->curs.handle);
149 evo_kick(push, core);
150 }
151}
152
153void
154head907d_core_clr(struct nv50_head *head)
155{
156 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
157 u32 *push;
158 if ((push = evo_wait(core, 2))) {
159 evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
160 evo_data(push, 0x00000000);
161 evo_kick(push, core);
162 }
163}
164
165void
166head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
167{
168 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
169 u32 *push;
170 if ((push = evo_wait(core, 9))) {
171 evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
172 evo_data(push, asyh->core.offset >> 8);
173 evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
174 evo_data(push, asyh->core.h << 16 | asyh->core.w);
175 evo_data(push, asyh->core.layout << 24 |
176 (asyh->core.pitch >> 8) << 8 |
177 asyh->core.blocks << 8 |
178 asyh->core.blockh);
179 evo_data(push, asyh->core.format << 8);
180 evo_data(push, asyh->core.handle);
181 evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
182 evo_data(push, asyh->core.y << 16 | asyh->core.x);
183 evo_kick(push, core);
184 }
185}
186
187void
188head907d_olut_clr(struct nv50_head *head)
189{
190 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
191 u32 *push;
192 if ((push = evo_wait(core, 4))) {
193 evo_mthd(push, 0x0448 + (head->base.index * 0x300), 1);
194 evo_data(push, 0x00000000);
195 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
196 evo_data(push, 0x00000000);
197 evo_kick(push, core);
198 }
199}
200
201void
202head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
203{
204 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
205 u32 *push;
206 if ((push = evo_wait(core, 5))) {
207 evo_mthd(push, 0x0448 + (head->base.index * 0x300), 2);
208 evo_data(push, 0x80000000 | asyh->olut.mode << 24);
209 evo_data(push, asyh->olut.offset >> 8);
210 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
211 evo_data(push, asyh->olut.handle);
212 evo_kick(push, core);
213 }
214}
215
216void
217head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
218{
219 asyh->olut.mode = 7;
220}
221
222void
223head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
224{
225 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
226 struct nv50_head_mode *m = &asyh->mode;
227 u32 *push;
228 if ((push = evo_wait(core, 14))) {
229 evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
230 evo_data(push, 0x00000000);
231 evo_data(push, m->v.active << 16 | m->h.active );
232 evo_data(push, m->v.synce << 16 | m->h.synce );
233 evo_data(push, m->v.blanke << 16 | m->h.blanke );
234 evo_data(push, m->v.blanks << 16 | m->h.blanks );
235 evo_data(push, m->v.blank2e << 16 | m->v.blank2s);
236 evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
237 evo_data(push, 0x00000000); /* ??? */
238 evo_data(push, 0xffffff00);
239 evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
240 evo_data(push, m->clock * 1000);
241 evo_data(push, 0x00200000); /* ??? */
242 evo_data(push, m->clock * 1000);
243 evo_kick(push, core);
244 }
245}
246
247void
248head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
249{
250 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
251 u32 *push;
252 if ((push = evo_wait(core, 8))) {
253 evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
254 evo_data(push, 0x00000000);
255 evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
256 evo_data(push, asyh->view.iH << 16 | asyh->view.iW);
257 evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
258 evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
259 evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
260 evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
261 evo_kick(push, core);
262 }
263}
264
265const struct nv50_head_func
266head907d = {
267 .view = head907d_view,
268 .mode = head907d_mode,
269 .olut = head907d_olut,
270 .olut_set = head907d_olut_set,
271 .olut_clr = head907d_olut_clr,
272 .core_calc = head507d_core_calc,
273 .core_set = head907d_core_set,
274 .core_clr = head907d_core_clr,
275 .curs_layout = head507d_curs_layout,
276 .curs_format = head507d_curs_format,
277 .curs_set = head907d_curs_set,
278 .curs_clr = head907d_curs_clr,
279 .base = head907d_base,
280 .ovly = head907d_ovly,
281 .dither = head907d_dither,
282 .procamp = head907d_procamp,
283 .or = head907d_or,
284};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
new file mode 100644
index 000000000000..303df8459ca8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "head.h"
23#include "core.h"
24
25static void
26head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
27{
28 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
29 u32 *push;
30 if ((push = evo_wait(core, 2))) {
31 evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
32 evo_data(push, asyh->dither.mode << 3 |
33 asyh->dither.bits << 1 |
34 asyh->dither.enable);
35 evo_kick(push, core);
36 }
37}
38
39static void
40head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
41{
42 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
43 u32 bounds = 0;
44 u32 *push;
45
46 if (asyh->base.cpp) {
47 switch (asyh->base.cpp) {
48 case 8: bounds |= 0x00000500; break;
49 case 4: bounds |= 0x00000300; break;
50 case 2: bounds |= 0x00000100; break;
51 case 1: bounds |= 0x00000000; break;
52 default:
53 WARN_ON(1);
54 break;
55 }
56 bounds |= 0x00020001;
57 }
58
59 if ((push = evo_wait(core, 2))) {
60 evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
61 evo_data(push, bounds);
62 evo_kick(push, core);
63 }
64}
65
66int
67head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
68 struct nv50_head_atom *asyh)
69{
70 switch (asyw->state.fb->width) {
71 case 32: asyh->curs.layout = 0; break;
72 case 64: asyh->curs.layout = 1; break;
73 case 128: asyh->curs.layout = 2; break;
74 case 256: asyh->curs.layout = 3; break;
75 default:
76 return -EINVAL;
77 }
78 return 0;
79}
80
81const struct nv50_head_func
82head917d = {
83 .view = head907d_view,
84 .mode = head907d_mode,
85 .olut = head907d_olut,
86 .olut_set = head907d_olut_set,
87 .olut_clr = head907d_olut_clr,
88 .core_calc = head507d_core_calc,
89 .core_set = head907d_core_set,
90 .core_clr = head907d_core_clr,
91 .curs_layout = head917d_curs_layout,
92 .curs_format = head507d_curs_format,
93 .curs_set = head907d_curs_set,
94 .curs_clr = head907d_curs_clr,
95 .base = head917d_base,
96 .ovly = head907d_ovly,
97 .dither = head917d_dither,
98 .procamp = head907d_procamp,
99 .or = head907d_or,
100};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
new file mode 100644
index 000000000000..989c14083066
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "head.h"
23#include "atom.h"
24#include "core.h"
25
26static void
27headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
28{
29 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
30 u32 *push;
31 if ((push = evo_wait(core, 2))) {
32 /*XXX: This is a dirty hack until OR depth handling is
33 * improved later for deep colour etc.
34 */
35 switch (asyh->or.depth) {
36 case 6: asyh->or.depth = 5; break;
37 case 5: asyh->or.depth = 4; break;
38 case 2: asyh->or.depth = 1; break;
39 case 0: asyh->or.depth = 4; break;
40 default:
41 WARN_ON(1);
42 break;
43 }
44
45 evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
46 evo_data(push, 0x00000001 |
47 asyh->or.depth << 4 |
48 asyh->or.nvsync << 3 |
49 asyh->or.nhsync << 2);
50 evo_kick(push, core);
51 }
52}
53
54static void
55headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
56{
57 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
58 u32 *push;
59 if ((push = evo_wait(core, 2))) {
60 evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
61 evo_data(push, 0x80000000 |
62 asyh->procamp.sat.sin << 16 |
63 asyh->procamp.sat.cos << 4);
64 evo_kick(push, core);
65 }
66}
67
68static void
69headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
70{
71 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
72 u32 *push;
73 if ((push = evo_wait(core, 2))) {
74 evo_mthd(push, 0x2018 + (head->base.index * 0x0400), 1);
75 evo_data(push, asyh->dither.mode << 8 |
76 asyh->dither.bits << 4 |
77 asyh->dither.enable);
78 evo_kick(push, core);
79 }
80}
81
82static void
83headc37d_curs_clr(struct nv50_head *head)
84{
85 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
86 u32 *push;
87 if ((push = evo_wait(core, 4))) {
88 evo_mthd(push, 0x209c + head->base.index * 0x400, 1);
89 evo_data(push, 0x000000cf);
90 evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
91 evo_data(push, 0x00000000);
92 evo_kick(push, core);
93 }
94}
95
96static void
97headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
98{
99 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
100 u32 *push;
101 if ((push = evo_wait(core, 7))) {
102 evo_mthd(push, 0x209c + head->base.index * 0x400, 2);
103 evo_data(push, 0x80000000 |
104 asyh->curs.layout << 8 |
105 asyh->curs.format << 0);
106 evo_data(push, 0x000072ff);
107 evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
108 evo_data(push, asyh->curs.handle);
109 evo_mthd(push, 0x2090 + head->base.index * 0x400, 1);
110 evo_data(push, asyh->curs.offset >> 8);
111 evo_kick(push, core);
112 }
113}
114
115static int
116headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
117 struct nv50_head_atom *asyh)
118{
119 asyh->curs.format = asyw->image.format;
120 return 0;
121}
122
123static void
124headc37d_olut_clr(struct nv50_head *head)
125{
126 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
127 u32 *push;
128 if ((push = evo_wait(core, 2))) {
129 evo_mthd(push, 0x20ac + (head->base.index * 0x400), 1);
130 evo_data(push, 0x00000000);
131 evo_kick(push, core);
132 }
133}
134
135static void
136headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
137{
138 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
139 u32 *push;
140 if ((push = evo_wait(core, 4))) {
141 evo_mthd(push, 0x20a4 + (head->base.index * 0x400), 3);
142 evo_data(push, asyh->olut.output_mode << 8 |
143 asyh->olut.range << 4 |
144 asyh->olut.size);
145 evo_data(push, asyh->olut.offset >> 8);
146 evo_data(push, asyh->olut.handle);
147 evo_kick(push, core);
148 }
149}
150
151static void
152headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
153{
154 asyh->olut.mode = 2;
155 asyh->olut.size = 0;
156 asyh->olut.range = 0;
157 asyh->olut.output_mode = 1;
158}
159
160static void
161headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
162{
163 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
164 struct nv50_head_mode *m = &asyh->mode;
165 u32 *push;
166 if ((push = evo_wait(core, 12))) {
167 evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
168 evo_data(push, (m->v.active << 16) | m->h.active );
169 evo_data(push, (m->v.synce << 16) | m->h.synce );
170 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
171 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
172 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
173 evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
174 evo_data(push, m->clock * 1000);
175 evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
176 evo_data(push, m->clock * 1000);
177 /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
178 evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
179 evo_data(push, 0x00000124);
180 evo_kick(push, core);
181 }
182}
183
184static void
185headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
186{
187 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
188 u32 *push;
189 if ((push = evo_wait(core, 4))) {
190 evo_mthd(push, 0x204c + (head->base.index * 0x400), 1);
191 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
192 evo_mthd(push, 0x2058 + (head->base.index * 0x400), 1);
193 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
194 evo_kick(push, core);
195 }
196}
197
198const struct nv50_head_func
199headc37d = {
200 .view = headc37d_view,
201 .mode = headc37d_mode,
202 .olut = headc37d_olut,
203 .olut_set = headc37d_olut_set,
204 .olut_clr = headc37d_olut_clr,
205 .curs_layout = head917d_curs_layout,
206 .curs_format = headc37d_curs_format,
207 .curs_set = headc37d_curs_set,
208 .curs_clr = headc37d_curs_clr,
209 .dither = headc37d_dither,
210 .procamp = headc37d_procamp,
211 .or = headc37d_or,
212};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
new file mode 100644
index 000000000000..a6b96ae2a22f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "lut.h"
23#include "disp.h"
24
25#include <drm/drm_color_mgmt.h>
26#include <drm/drm_mode.h>
27#include <drm/drm_property.h>
28
29#include <nvif/class.h>
30
31u32
32nv50_lut_load(struct nv50_lut *lut, bool legacy, int buffer,
33 struct drm_property_blob *blob)
34{
35 struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
36 void __iomem *mem = lut->mem[buffer].object.map.ptr;
37 const int size = blob->length / sizeof(*in);
38 int bits, shift, i;
39 u16 zero, r, g, b;
40 u32 addr = lut->mem[buffer].addr;
41
42 /* This can't happen.. But it shuts the compiler up. */
43 if (WARN_ON(size != 256))
44 return 0;
45
46 if (legacy) {
47 bits = 11;
48 shift = 3;
49 zero = 0x0000;
50 } else {
51 bits = 14;
52 shift = 0;
53 zero = 0x6000;
54 }
55
56 for (i = 0; i < size; i++) {
57 r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
58 g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
59 b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
60 writew(r, mem + (i * 0x08) + 0);
61 writew(g, mem + (i * 0x08) + 2);
62 writew(b, mem + (i * 0x08) + 4);
63 }
64
65 /* INTERPOLATE modes require a "next" entry to interpolate with,
66 * so we replicate the last entry to deal with this for now.
67 */
68 writew(r, mem + (i * 0x08) + 0);
69 writew(g, mem + (i * 0x08) + 2);
70 writew(b, mem + (i * 0x08) + 4);
71 return addr;
72}
73
74void
75nv50_lut_fini(struct nv50_lut *lut)
76{
77 int i;
78 for (i = 0; i < ARRAY_SIZE(lut->mem); i++)
79 nvif_mem_fini(&lut->mem[i]);
80}
81
82int
83nv50_lut_init(struct nv50_disp *disp, struct nvif_mmu *mmu,
84 struct nv50_lut *lut)
85{
86 const u32 size = disp->disp->object.oclass < GF110_DISP ? 257 : 1025;
87 int i;
88 for (i = 0; i < ARRAY_SIZE(lut->mem); i++) {
89 int ret = nvif_mem_init_map(mmu, NVIF_MEM_VRAM, size * 8,
90 &lut->mem[i]);
91 if (ret)
92 return ret;
93 }
94 return 0;
95}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.h b/drivers/gpu/drm/nouveau/dispnv50/lut.h
new file mode 100644
index 000000000000..6d7b8352e4cb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.h
@@ -0,0 +1,15 @@
1#ifndef __NV50_KMS_LUT_H__
2#define __NV50_KMS_LUT_H__
3#include <nvif/mem.h>
4struct drm_property_blob;
5struct nv50_disp;
6
7struct nv50_lut {
8 struct nvif_mem mem[2];
9};
10
11int nv50_lut_init(struct nv50_disp *, struct nvif_mmu *, struct nv50_lut *);
12void nv50_lut_fini(struct nv50_lut *);
13u32 nv50_lut_load(struct nv50_lut *, bool legacy, int buffer,
14 struct drm_property_blob *);
15#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm.c b/drivers/gpu/drm/nouveau/dispnv50/oimm.c
new file mode 100644
index 000000000000..2a2841d344c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "oimm.h"
23
24#include <nvif/class.h>
25
26int
27nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
28{
29 static const struct {
30 s32 oclass;
31 int version;
32 int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
33 } oimms[] = {
34 { GK104_DISP_OVERLAY, 0, oimm507b_init },
35 { GF110_DISP_OVERLAY, 0, oimm507b_init },
36 { GT214_DISP_OVERLAY, 0, oimm507b_init },
37 { G82_DISP_OVERLAY, 0, oimm507b_init },
38 { NV50_DISP_OVERLAY, 0, oimm507b_init },
39 {}
40 };
41 struct nv50_disp *disp = nv50_disp(drm->dev);
42 int cid;
43
44 cid = nvif_mclass(&disp->disp->object, oimms);
45 if (cid < 0) {
46 NV_ERROR(drm, "No supported overlay immediate class\n");
47 return cid;
48 }
49
50 return oimms[cid].init(drm, oimms[cid].oclass, wndw);
51}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm.h b/drivers/gpu/drm/nouveau/dispnv50/oimm.h
new file mode 100644
index 000000000000..6fa51f101e94
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm.h
@@ -0,0 +1,8 @@
1#ifndef __NV50_KMS_OIMM_H__
2#define __NV50_KMS_OIMM_H__
3#include "wndw.h"
4
5int oimm507b_init(struct nouveau_drm *, s32, struct nv50_wndw *);
6
7int nv50_oimm_init(struct nouveau_drm *, struct nv50_wndw *);
8#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
new file mode 100644
index 000000000000..2ee404b3e19f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "oimm.h"
23
24#include <nvif/cl507b.h>
25
26static int
27oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
28 s32 oclass, struct nv50_wndw *wndw)
29{
30 struct nv50_disp_overlay_v0 args = {
31 .head = wndw->id,
32 };
33 struct nv50_disp *disp = nv50_disp(drm->dev);
34 int ret;
35
36 ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
37 sizeof(args), &wndw->wimm.base.user);
38 if (ret) {
39 NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
40 return ret;
41 }
42
43 nvif_object_map(&wndw->wimm.base.user, NULL, 0);
44 wndw->immd = func;
45 return 0;
46}
47
48int
49oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
50{
51 return oimm507b_init_(&curs507a, drm, oclass, wndw);
52}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.c b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
new file mode 100644
index 000000000000..90c246d47604
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
@@ -0,0 +1,57 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "ovly.h"
23#include "oimm.h"
24
25#include <nvif/class.h>
26
27int
28nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
29{
30 static const struct {
31 s32 oclass;
32 int version;
33 int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
34 } ovlys[] = {
35 { GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly917e_new },
36 { GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly907e_new },
37 { GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
38 { GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
39 { G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
40 { NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
41 {}
42 };
43 struct nv50_disp *disp = nv50_disp(drm->dev);
44 int cid, ret;
45
46 cid = nvif_mclass(&disp->disp->object, ovlys);
47 if (cid < 0) {
48 NV_ERROR(drm, "No supported overlay class\n");
49 return cid;
50 }
51
52 ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw);
53 if (ret)
54 return ret;
55
56 return nv50_oimm_init(drm, *pwndw);
57}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.h b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
new file mode 100644
index 000000000000..4869d52d1786
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
@@ -0,0 +1,30 @@
1#ifndef __NV50_KMS_OVLY_H__
2#define __NV50_KMS_OVLY_H__
3#include "wndw.h"
4
5int ovly507e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
6int ovly507e_new_(const struct nv50_wndw_func *, const u32 *format,
7 struct nouveau_drm *, int head, s32 oclass,
8 u32 interlock_data, struct nv50_wndw **);
9int ovly507e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
10 struct nv50_head_atom *);
11void ovly507e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
12 struct nv50_head_atom *);
13void ovly507e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
14void ovly507e_ntfy_clr(struct nv50_wndw *);
15void ovly507e_image_clr(struct nv50_wndw *);
16void ovly507e_scale_set(struct nv50_wndw *, struct nv50_wndw_atom *);
17void ovly507e_update(struct nv50_wndw *, u32 *);
18
19extern const u32 ovly827e_format[];
20void ovly827e_ntfy_reset(struct nouveau_bo *, u32);
21int ovly827e_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
22
23extern const struct nv50_wndw_func ovly907e;
24
25int ovly827e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
26int ovly907e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
27int ovly917e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
28
29int nv50_ovly_new(struct nouveau_drm *, int head, struct nv50_wndw **);
30#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
new file mode 100644
index 000000000000..cc417664f823
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -0,0 +1,217 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "ovly.h"
23#include "atom.h"
24
25#include <drm/drm_atomic_helper.h>
26#include <drm/drm_plane_helper.h>
27
28#include <nvif/cl507e.h>
29#include <nvif/event.h>
30
31void
32ovly507e_update(struct nv50_wndw *wndw, u32 *interlock)
33{
34 u32 *push;
35 if ((push = evo_wait(&wndw->wndw, 2))) {
36 evo_mthd(push, 0x0080, 1);
37 evo_data(push, interlock[NV50_DISP_INTERLOCK_CORE]);
38 evo_kick(push, &wndw->wndw);
39 }
40}
41
42void
43ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
44{
45 u32 *push;
46 if ((push = evo_wait(&wndw->wndw, 4))) {
47 evo_mthd(push, 0x00e0, 3);
48 evo_data(push, asyw->scale.sy << 16 | asyw->scale.sx);
49 evo_data(push, asyw->scale.sh << 16 | asyw->scale.sw);
50 evo_data(push, asyw->scale.dw);
51 evo_kick(push, &wndw->wndw);
52 }
53}
54
55void
56ovly507e_image_clr(struct nv50_wndw *wndw)
57{
58 u32 *push;
59 if ((push = evo_wait(&wndw->wndw, 4))) {
60 evo_mthd(push, 0x0084, 1);
61 evo_data(push, 0x00000000);
62 evo_mthd(push, 0x00c0, 1);
63 evo_data(push, 0x00000000);
64 evo_kick(push, &wndw->wndw);
65 }
66}
67
68static void
69ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
70{
71 u32 *push;
72 if ((push = evo_wait(&wndw->wndw, 12))) {
73 evo_mthd(push, 0x0084, 1);
74 evo_data(push, asyw->image.interval << 4);
75 evo_mthd(push, 0x00c0, 1);
76 evo_data(push, asyw->image.handle[0]);
77 evo_mthd(push, 0x0100, 1);
78 evo_data(push, 0x00000002);
79 evo_mthd(push, 0x0800, 1);
80 evo_data(push, asyw->image.offset[0] >> 8);
81 evo_mthd(push, 0x0808, 3);
82 evo_data(push, asyw->image.h << 16 | asyw->image.w);
83 evo_data(push, asyw->image.layout << 20 |
84 (asyw->image.pitch[0] >> 8) << 8 |
85 asyw->image.blocks[0] << 8 |
86 asyw->image.blockh);
87 evo_data(push, asyw->image.kind << 16 |
88 asyw->image.format << 8 |
89 asyw->image.colorspace);
90 evo_kick(push, &wndw->wndw);
91 }
92}
93
94void
95ovly507e_ntfy_clr(struct nv50_wndw *wndw)
96{
97 u32 *push;
98 if ((push = evo_wait(&wndw->wndw, 2))) {
99 evo_mthd(push, 0x00a4, 1);
100 evo_data(push, 0x00000000);
101 evo_kick(push, &wndw->wndw);
102 }
103}
104
105void
106ovly507e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
107{
108 u32 *push;
109 if ((push = evo_wait(&wndw->wndw, 3))) {
110 evo_mthd(push, 0x00a0, 2);
111 evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset);
112 evo_data(push, asyw->ntfy.handle);
113 evo_kick(push, &wndw->wndw);
114 }
115}
116
117void
118ovly507e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
119 struct nv50_head_atom *asyh)
120{
121 asyh->ovly.cpp = 0;
122}
123
124int
125ovly507e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
126 struct nv50_head_atom *asyh)
127{
128 const struct drm_framebuffer *fb = asyw->state.fb;
129 int ret;
130
131 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
132 DRM_PLANE_HELPER_NO_SCALING,
133 DRM_PLANE_HELPER_NO_SCALING,
134 true, true);
135 if (ret)
136 return ret;
137
138 asyh->ovly.cpp = fb->format->cpp[0];
139 return 0;
140}
141
142#include "nouveau_bo.h"
143
144static const struct nv50_wndw_func
145ovly507e = {
146 .acquire = ovly507e_acquire,
147 .release = ovly507e_release,
148 .ntfy_set = ovly507e_ntfy_set,
149 .ntfy_clr = ovly507e_ntfy_clr,
150 .ntfy_reset = base507c_ntfy_reset,
151 .ntfy_wait_begun = base507c_ntfy_wait_begun,
152 .image_set = ovly507e_image_set,
153 .image_clr = ovly507e_image_clr,
154 .scale_set = ovly507e_scale_set,
155 .update = ovly507e_update,
156};
157
158static const u32
159ovly507e_format[] = {
160 DRM_FORMAT_YUYV,
161 DRM_FORMAT_UYVY,
162 DRM_FORMAT_XRGB8888,
163 DRM_FORMAT_ARGB8888,
164 DRM_FORMAT_XRGB1555,
165 DRM_FORMAT_ARGB1555,
166 0
167};
168
169int
170ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
171 struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data,
172 struct nv50_wndw **pwndw)
173{
174 struct nv50_disp_overlay_channel_dma_v0 args = {
175 .head = head,
176 };
177 struct nv50_disp *disp = nv50_disp(drm->dev);
178 struct nv50_wndw *wndw;
179 int ret;
180
181 ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
182 "ovly", head, format, BIT(head),
183 NV50_DISP_INTERLOCK_OVLY, interlock_data,
184 &wndw);
185 if (*pwndw = wndw, ret)
186 return ret;
187
188 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
189 &oclass, 0, &args, sizeof(args),
190 disp->sync->bo.offset, &wndw->wndw);
191 if (ret) {
192 NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret);
193 return ret;
194 }
195
196 ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func, false,
197 NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT,
198 &(struct nvif_notify_uevent_req) {},
199 sizeof(struct nvif_notify_uevent_req),
200 sizeof(struct nvif_notify_uevent_rep),
201 &wndw->notify);
202 if (ret)
203 return ret;
204
205 wndw->ntfy = NV50_DISP_OVLY_NTFY(wndw->id);
206 wndw->sema = NV50_DISP_OVLY_SEM0(wndw->id);
207 wndw->data = 0x00000000;
208 return 0;
209}
210
211int
212ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass,
213 struct nv50_wndw **pwndw)
214{
215 return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass,
216 0x00000004 << (head * 8), pwndw);
217}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
new file mode 100644
index 000000000000..aaa9fe5a4fc8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -0,0 +1,107 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "ovly.h"
23#include "atom.h"
24
25#include <nouveau_bo.h>
26
27static void
28ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
29{
30 u32 *push;
31 if ((push = evo_wait(&wndw->wndw, 12))) {
32 evo_mthd(push, 0x0084, 1);
33 evo_data(push, asyw->image.interval << 4);
34 evo_mthd(push, 0x00c0, 1);
35 evo_data(push, asyw->image.handle[0]);
36 evo_mthd(push, 0x0100, 1);
37 evo_data(push, 0x00000002);
38 evo_mthd(push, 0x0800, 1);
39 evo_data(push, asyw->image.offset[0] >> 8);
40 evo_mthd(push, 0x0808, 3);
41 evo_data(push, asyw->image.h << 16 | asyw->image.w);
42 evo_data(push, asyw->image.layout << 20 |
43 (asyw->image.pitch[0] >> 8) << 8 |
44 asyw->image.blocks[0] << 8 |
45 asyw->image.blockh);
46 evo_data(push, asyw->image.format << 8 |
47 asyw->image.colorspace);
48 evo_kick(push, &wndw->wndw);
49 }
50}
51
52int
53ovly827e_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
54 struct nvif_device *device)
55{
56 s64 time = nvif_msec(device, 2000ULL,
57 u32 data = nouveau_bo_rd32(bo, offset / 4 + 3);
58 if ((data & 0xffff0000) == 0xffff0000)
59 break;
60 usleep_range(1, 2);
61 );
62 return time < 0 ? time : 0;
63}
64
65void
66ovly827e_ntfy_reset(struct nouveau_bo *bo, u32 offset)
67{
68 nouveau_bo_wr32(bo, offset / 4 + 0, 0x00000000);
69 nouveau_bo_wr32(bo, offset / 4 + 1, 0x00000000);
70 nouveau_bo_wr32(bo, offset / 4 + 2, 0x00000000);
71 nouveau_bo_wr32(bo, offset / 4 + 3, 0x80000000);
72}
73
74static const struct nv50_wndw_func
75ovly827e = {
76 .acquire = ovly507e_acquire,
77 .release = ovly507e_release,
78 .ntfy_set = ovly507e_ntfy_set,
79 .ntfy_clr = ovly507e_ntfy_clr,
80 .ntfy_reset = ovly827e_ntfy_reset,
81 .ntfy_wait_begun = ovly827e_ntfy_wait_begun,
82 .image_set = ovly827e_image_set,
83 .image_clr = ovly507e_image_clr,
84 .scale_set = ovly507e_scale_set,
85 .update = ovly507e_update,
86};
87
88const u32
89ovly827e_format[] = {
90 DRM_FORMAT_YUYV,
91 DRM_FORMAT_UYVY,
92 DRM_FORMAT_XRGB8888,
93 DRM_FORMAT_ARGB8888,
94 DRM_FORMAT_XRGB1555,
95 DRM_FORMAT_ARGB1555,
96 DRM_FORMAT_XBGR2101010,
97 DRM_FORMAT_ABGR2101010,
98 0
99};
100
101int
102ovly827e_new(struct nouveau_drm *drm, int head, s32 oclass,
103 struct nv50_wndw **pwndw)
104{
105 return ovly507e_new_(&ovly827e, ovly827e_format, drm, head, oclass,
106 0x00000004 << (head * 8), pwndw);
107}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
new file mode 100644
index 000000000000..a3ce53046015
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "ovly.h"
23#include "atom.h"
24
25static void
26ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
27{
28 u32 *push;
29 if ((push = evo_wait(&wndw->wndw, 12))) {
30 evo_mthd(push, 0x0084, 1);
31 evo_data(push, asyw->image.interval << 4);
32 evo_mthd(push, 0x00c0, 1);
33 evo_data(push, asyw->image.handle[0]);
34 evo_mthd(push, 0x0100, 1);
35 evo_data(push, 0x00000002);
36 evo_mthd(push, 0x0400, 1);
37 evo_data(push, asyw->image.offset[0] >> 8);
38 evo_mthd(push, 0x0408, 3);
39 evo_data(push, asyw->image.h << 16 | asyw->image.w);
40 evo_data(push, asyw->image.layout << 24 |
41 (asyw->image.pitch[0] >> 8) << 8 |
42 asyw->image.blocks[0] << 8 |
43 asyw->image.blockh);
44 evo_data(push, asyw->image.format << 8 |
45 asyw->image.colorspace);
46 evo_kick(push, &wndw->wndw);
47 }
48}
49
50const struct nv50_wndw_func
51ovly907e = {
52 .acquire = ovly507e_acquire,
53 .release = ovly507e_release,
54 .ntfy_set = ovly507e_ntfy_set,
55 .ntfy_clr = ovly507e_ntfy_clr,
56 .ntfy_reset = ovly827e_ntfy_reset,
57 .ntfy_wait_begun = ovly827e_ntfy_wait_begun,
58 .image_set = ovly907e_image_set,
59 .image_clr = ovly507e_image_clr,
60 .scale_set = ovly507e_scale_set,
61 .update = ovly507e_update,
62};
63
64int
65ovly907e_new(struct nouveau_drm *drm, int head, s32 oclass,
66 struct nv50_wndw **pwndw)
67{
68 return ovly507e_new_(&ovly907e, ovly827e_format, drm, head, oclass,
69 0x00000004 << (head * 4), pwndw);
70}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
new file mode 100644
index 000000000000..505fa7e78523
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "ovly.h"
23
24static const u32
25ovly917e_format[] = {
26 DRM_FORMAT_YUYV,
27 DRM_FORMAT_UYVY,
28 DRM_FORMAT_XRGB8888,
29 DRM_FORMAT_ARGB8888,
30 DRM_FORMAT_XRGB1555,
31 DRM_FORMAT_ARGB1555,
32 DRM_FORMAT_XBGR2101010,
33 DRM_FORMAT_ABGR2101010,
34 DRM_FORMAT_XRGB2101010,
35 DRM_FORMAT_ARGB2101010,
36 0
37};
38
39int
40ovly917e_new(struct nouveau_drm *drm, int head, s32 oclass,
41 struct nv50_wndw **pwndw)
42{
43 return ovly507e_new_(&ovly907e, ovly917e_format, drm, head, oclass,
44 0x00000004 << (head * 4), pwndw);
45}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
new file mode 100644
index 000000000000..d2bac6a341dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "core.h"
23
24static void
25pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
26 struct nv50_head_atom *asyh)
27{
28 u32 *push;
29 if ((push = evo_wait(&core->chan, 2))) {
30 if (asyh) {
31 ctrl |= asyh->or.depth << 16;
32 ctrl |= asyh->or.nvsync << 13;
33 ctrl |= asyh->or.nhsync << 12;
34 }
35 evo_mthd(push, 0x0700 + (or * 0x040), 1);
36 evo_data(push, ctrl);
37 evo_kick(push, &core->chan);
38 }
39}
40
41const struct nv50_outp_func
42pior507d = {
43 .ctrl = pior507d_ctrl,
44};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
new file mode 100644
index 000000000000..5222fe6a9b21
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "core.h"
23
24static void
25sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
26 struct nv50_head_atom *asyh)
27{
28 u32 *push;
29 if ((push = evo_wait(&core->chan, 2))) {
30 if (asyh) {
31 ctrl |= asyh->or.depth << 16;
32 ctrl |= asyh->or.nvsync << 13;
33 ctrl |= asyh->or.nhsync << 12;
34 }
35 evo_mthd(push, 0x0600 + (or * 0x40), 1);
36 evo_data(push, ctrl);
37 evo_kick(push, &core->chan);
38 }
39}
40
41const struct nv50_outp_func
42sor507d = {
43 .ctrl = sor507d_ctrl,
44};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
index 08e2b1fa3806..b0314ec11fb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,21 +18,24 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 21 */
24#include "dmacnv50.h" 22#include "core.h"
25#include "rootnv50.h"
26 23
27#include <nvif/class.h> 24#include <nvif/class.h>
28 25
29const struct nv50_disp_dmac_oclass 26static void
30gt215_disp_base_oclass = { 27sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
31 .base.oclass = GT214_DISP_BASE_CHANNEL_DMA, 28 struct nv50_head_atom *asyh)
32 .base.minver = 0, 29{
33 .base.maxver = 0, 30 u32 *push;
34 .ctor = nv50_disp_base_new, 31 if ((push = evo_wait(&core->chan, 2))) {
35 .func = &nv50_disp_dmac_func, 32 evo_mthd(push, 0x0200 + (or * 0x20), 1);
36 .mthd = &g84_disp_base_chan_mthd, 33 evo_data(push, ctrl);
37 .chid = 1, 34 evo_kick(push, &core->chan);
35 }
36}
37
38const struct nv50_outp_func
39sor907d = {
40 .ctrl = sor907d_ctrl,
38}; 41};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
index 2a99db4bf8f8..dff059241c5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2012 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,20 +18,22 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */ 21 */
24#include "channv50.h" 22#include "core.h"
25#include "rootnv50.h"
26 23
27#include <nvif/class.h> 24static void
25sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
26 struct nv50_head_atom *asyh)
27{
28 u32 *push;
29 if ((push = evo_wait(&core->chan, 2))) {
30 evo_mthd(push, 0x0300 + (or * 0x20), 1);
31 evo_data(push, ctrl);
32 evo_kick(push, &core->chan);
33 }
34}
28 35
29const struct nv50_disp_pioc_oclass 36const struct nv50_outp_func
30gk104_disp_curs_oclass = { 37sorc37d = {
31 .base.oclass = GK104_DISP_CURSOR, 38 .ctrl = sorc37d_ctrl,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_curs_new,
35 .func = &gf119_disp_pioc_func,
36 .chid = { 13, 13 },
37}; 39};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
new file mode 100644
index 000000000000..fc36e0696407
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "wimm.h"
23
24#include <nvif/class.h>
25
26int
27nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
28{
29 struct {
30 s32 oclass;
31 int version;
32 int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
33 } wimms[] = {
34 { GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
35 {}
36 };
37 struct nv50_disp *disp = nv50_disp(drm->dev);
38 int cid;
39
40 cid = nvif_mclass(&disp->disp->object, wimms);
41 if (cid < 0) {
42 NV_ERROR(drm, "No supported window immediate class\n");
43 return cid;
44 }
45
46 return wimms[cid].init(drm, wimms[cid].oclass, wndw);
47}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.h b/drivers/gpu/drm/nouveau/dispnv50/wimm.h
new file mode 100644
index 000000000000..363052309be9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.h
@@ -0,0 +1,8 @@
1#ifndef __NV50_KMS_WIMM_H__
2#define __NV50_KMS_WIMM_H__
3#include "wndw.h"
4
5int nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *);
6
7int wimmc37b_init(struct nouveau_drm *, s32, struct nv50_wndw *);
8#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
new file mode 100644
index 000000000000..9103b8494279
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "wimm.h"
23#include "atom.h"
24#include "wndw.h"
25
26#include <nvif/clc37b.h>
27
28static void
29wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
30{
31 u32 *push;
32 if ((push = evo_wait(&wndw->wimm, 2))) {
33 evo_mthd(push, 0x0200, 1);
34 if (interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)
35 evo_data(push, 0x00000003);
36 else
37 evo_data(push, 0x00000001);
38 evo_kick(push, &wndw->wimm);
39 }
40}
41
42static void
43wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
44{
45 u32 *push;
46 if ((push = evo_wait(&wndw->wimm, 2))) {
47 evo_mthd(push, 0x0208, 1);
48 evo_data(push, asyw->point.y << 16 | asyw->point.x);
49 evo_kick(push, &wndw->wimm);
50 }
51}
52
53static const struct nv50_wimm_func
54wimmc37b = {
55 .point = wimmc37b_point,
56 .update = wimmc37b_update,
57};
58
59static int
60wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
61 s32 oclass, struct nv50_wndw *wndw)
62{
63 struct nvc37b_window_imm_channel_dma_v0 args = {
64 .pushbuf = 0xb0007b00 | wndw->id,
65 .index = wndw->id,
66 };
67 struct nv50_disp *disp = nv50_disp(drm->dev);
68 int ret;
69
70 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
71 &oclass, 0, &args, sizeof(args), 0,
72 &wndw->wimm);
73 if (ret) {
74 NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
75 return ret;
76 }
77
78 wndw->immd = func;
79 return 0;
80}
81
82int
83wimmc37b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
84{
85 return wimmc37b_init_(&wimmc37b, drm, oclass, wndw);
86}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
new file mode 100644
index 000000000000..224963b533a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -0,0 +1,641 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "wndw.h"
23#include "wimm.h"
24
25#include <nvif/class.h>
26#include <nvif/cl0002.h>
27
28#include <drm/drm_atomic_helper.h>
29#include "nouveau_bo.h"
30
31static void
32nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
33{
34 nvif_object_fini(&ctxdma->object);
35 list_del(&ctxdma->head);
36 kfree(ctxdma);
37}
38
39static struct nv50_wndw_ctxdma *
40nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
41{
42 struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
43 struct nv50_wndw_ctxdma *ctxdma;
44 const u8 kind = fb->nvbo->kind;
45 const u32 handle = 0xfb000000 | kind;
46 struct {
47 struct nv_dma_v0 base;
48 union {
49 struct nv50_dma_v0 nv50;
50 struct gf100_dma_v0 gf100;
51 struct gf119_dma_v0 gf119;
52 };
53 } args = {};
54 u32 argc = sizeof(args.base);
55 int ret;
56
57 list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
58 if (ctxdma->object.handle == handle)
59 return ctxdma;
60 }
61
62 if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
63 return ERR_PTR(-ENOMEM);
64 list_add(&ctxdma->head, &wndw->ctxdma.list);
65
66 args.base.target = NV_DMA_V0_TARGET_VRAM;
67 args.base.access = NV_DMA_V0_ACCESS_RDWR;
68 args.base.start = 0;
69 args.base.limit = drm->client.device.info.ram_user - 1;
70
71 if (drm->client.device.info.chipset < 0x80) {
72 args.nv50.part = NV50_DMA_V0_PART_256;
73 argc += sizeof(args.nv50);
74 } else
75 if (drm->client.device.info.chipset < 0xc0) {
76 args.nv50.part = NV50_DMA_V0_PART_256;
77 args.nv50.kind = kind;
78 argc += sizeof(args.nv50);
79 } else
80 if (drm->client.device.info.chipset < 0xd0) {
81 args.gf100.kind = kind;
82 argc += sizeof(args.gf100);
83 } else {
84 args.gf119.page = GF119_DMA_V0_PAGE_LP;
85 args.gf119.kind = kind;
86 argc += sizeof(args.gf119);
87 }
88
89 ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
90 &args, argc, &ctxdma->object);
91 if (ret) {
92 nv50_wndw_ctxdma_del(ctxdma);
93 return ERR_PTR(ret);
94 }
95
96 return ctxdma;
97}
98
99int
100nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
101{
102 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
103 if (asyw->set.ntfy) {
104 return wndw->func->ntfy_wait_begun(disp->sync,
105 asyw->ntfy.offset,
106 wndw->wndw.base.device);
107 }
108 return 0;
109}
110
111void
112nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
113 struct nv50_wndw_atom *asyw)
114{
115 union nv50_wndw_atom_mask clr = {
116 .mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
117 };
118 if (clr.sema ) wndw->func-> sema_clr(wndw);
119 if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
120 if (clr.xlut ) wndw->func-> xlut_clr(wndw);
121 if (clr.image) wndw->func->image_clr(wndw);
122
123 interlock[wndw->interlock.type] |= wndw->interlock.data;
124}
125
126void
127nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
128 struct nv50_wndw_atom *asyw)
129{
130 if (interlock) {
131 asyw->image.mode = 0;
132 asyw->image.interval = 1;
133 }
134
135 if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
136 if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
137 if (asyw->set.image) wndw->func->image_set(wndw, asyw);
138
139 if (asyw->set.xlut ) {
140 if (asyw->ilut) {
141 asyw->xlut.i.offset =
142 nv50_lut_load(&wndw->ilut,
143 asyw->xlut.i.mode <= 1,
144 asyw->xlut.i.buffer,
145 asyw->ilut);
146 }
147 wndw->func->xlut_set(wndw, asyw);
148 }
149
150 if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
151 if (asyw->set.point) {
152 if (asyw->set.point = false, asyw->set.mask)
153 interlock[wndw->interlock.type] |= wndw->interlock.data;
154 interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
155
156 wndw->immd->point(wndw, asyw);
157 wndw->immd->update(wndw, interlock);
158 } else {
159 interlock[wndw->interlock.type] |= wndw->interlock.data;
160 }
161}
162
163void
164nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
165{
166 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
167
168 asyw->ntfy.handle = wndw->wndw.sync.handle;
169 asyw->ntfy.offset = wndw->ntfy;
170 asyw->ntfy.awaken = false;
171 asyw->set.ntfy = true;
172
173 wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
174 wndw->ntfy ^= 0x10;
175}
176
177static void
178nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
179 struct nv50_wndw_atom *asyw,
180 struct nv50_head_atom *asyh)
181{
182 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
183 NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
184 wndw->func->release(wndw, asyw, asyh);
185 asyw->ntfy.handle = 0;
186 asyw->sema.handle = 0;
187}
188
189static int
190nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
191{
192 switch (asyw->state.fb->format->format) {
193 case DRM_FORMAT_YUYV: asyw->image.format = 0x28; break;
194 case DRM_FORMAT_UYVY: asyw->image.format = 0x29; break;
195 default:
196 WARN_ON(1);
197 return -EINVAL;
198 }
199 asyw->image.colorspace = 1;
200 return 0;
201}
202
203static int
204nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
205{
206 switch (asyw->state.fb->format->format) {
207 case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
208 case DRM_FORMAT_XRGB8888 :
209 case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
210 case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
211 case DRM_FORMAT_XRGB1555 :
212 case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
213 case DRM_FORMAT_XBGR2101010:
214 case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
215 case DRM_FORMAT_XBGR8888 :
216 case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
217 case DRM_FORMAT_XRGB2101010:
218 case DRM_FORMAT_ARGB2101010: asyw->image.format = 0xdf; break;
219 default:
220 return -EINVAL;
221 }
222 asyw->image.colorspace = 0;
223 return 0;
224}
225
226static int
227nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
228 struct nv50_wndw_atom *armw,
229 struct nv50_wndw_atom *asyw,
230 struct nv50_head_atom *asyh)
231{
232 struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
233 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
234 int ret;
235
236 NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
237
238 if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
239 asyw->image.w = fb->base.width;
240 asyw->image.h = fb->base.height;
241 asyw->image.kind = fb->nvbo->kind;
242
243 ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
244 if (ret) {
245 ret = nv50_wndw_atomic_check_acquire_yuv(asyw);
246 if (ret)
247 return ret;
248 }
249
250 if (asyw->image.kind) {
251 asyw->image.layout = 0;
252 if (drm->client.device.info.chipset >= 0xc0)
253 asyw->image.blockh = fb->nvbo->mode >> 4;
254 else
255 asyw->image.blockh = fb->nvbo->mode;
256 asyw->image.blocks[0] = fb->base.pitches[0] / 64;
257 asyw->image.pitch[0] = 0;
258 } else {
259 asyw->image.layout = 1;
260 asyw->image.blockh = 0;
261 asyw->image.blocks[0] = 0;
262 asyw->image.pitch[0] = fb->base.pitches[0];
263 }
264
265 if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
266 asyw->image.interval = 1;
267 else
268 asyw->image.interval = 0;
269 asyw->image.mode = asyw->image.interval ? 0 : 1;
270 asyw->set.image = wndw->func->image_set != NULL;
271 }
272
273 if (wndw->func->scale_set) {
274 asyw->scale.sx = asyw->state.src_x >> 16;
275 asyw->scale.sy = asyw->state.src_y >> 16;
276 asyw->scale.sw = asyw->state.src_w >> 16;
277 asyw->scale.sh = asyw->state.src_h >> 16;
278 asyw->scale.dw = asyw->state.crtc_w;
279 asyw->scale.dh = asyw->state.crtc_h;
280 if (memcmp(&armw->scale, &asyw->scale, sizeof(asyw->scale)))
281 asyw->set.scale = true;
282 }
283
284 if (wndw->immd) {
285 asyw->point.x = asyw->state.crtc_x;
286 asyw->point.y = asyw->state.crtc_y;
287 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
288 asyw->set.point = true;
289 }
290
291 return wndw->func->acquire(wndw, asyw, asyh);
292}
293
294static void
295nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
296 struct nv50_wndw_atom *armw,
297 struct nv50_wndw_atom *asyw,
298 struct nv50_head_atom *asyh)
299{
300 struct drm_property_blob *ilut = asyh->state.degamma_lut;
301
302 /* I8 format without an input LUT makes no sense, and the
303 * HW error-checks for this.
304 *
305 * In order to handle legacy gamma, when there's no input
306 * LUT we need to steal the output LUT and use it instead.
307 */
308 if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
309 /* This should be an error, but there's legacy clients
310 * that do a modeset before providing a gamma table.
311 *
312 * We keep the window disabled to avoid angering HW.
313 */
314 if (!(ilut = asyh->state.gamma_lut)) {
315 asyw->visible = false;
316 return;
317 }
318
319 if (wndw->func->ilut)
320 asyh->wndw.olut |= BIT(wndw->id);
321 } else {
322 asyh->wndw.olut &= ~BIT(wndw->id);
323 }
324
325 /* Recalculate LUT state. */
326 memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
327 if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
328 wndw->func->ilut(wndw, asyw);
329 asyw->xlut.handle = wndw->wndw.vram.handle;
330 asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
331 asyw->set.xlut = true;
332 }
333
334 /* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
335 if (wndw->func->olut_core &&
336 (!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
337 asyw->set.xlut = true;
338
339 /* Can't do an immediate flip while changing the LUT. */
340 asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
341}
342
343static int
344nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
345{
346 struct nouveau_drm *drm = nouveau_drm(plane->dev);
347 struct nv50_wndw *wndw = nv50_wndw(plane);
348 struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
349 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
350 struct nv50_head_atom *harm = NULL, *asyh = NULL;
351 bool modeset = false;
352 int ret;
353
354 NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
355
356 /* Fetch the assembly state for the head the window will belong to,
357 * and determine whether the window will be visible.
358 */
359 if (asyw->state.crtc) {
360 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
361 if (IS_ERR(asyh))
362 return PTR_ERR(asyh);
363 modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
364 asyw->visible = asyh->state.active;
365 } else {
366 asyw->visible = false;
367 }
368
369 /* Fetch assembly state for the head the window used to belong to. */
370 if (armw->state.crtc) {
371 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
372 if (IS_ERR(harm))
373 return PTR_ERR(harm);
374 }
375
376 /* LUT configuration can potentially cause the window to be disabled. */
377 if (asyw->visible && wndw->func->xlut_set &&
378 (!armw->visible ||
379 asyh->state.color_mgmt_changed ||
380 asyw->state.fb->format->format !=
381 armw->state.fb->format->format))
382 nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
383
384 /* Calculate new window state. */
385 if (asyw->visible) {
386 ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
387 armw, asyw, asyh);
388 if (ret)
389 return ret;
390
391 asyh->wndw.mask |= BIT(wndw->id);
392 } else
393 if (armw->visible) {
394 nv50_wndw_atomic_check_release(wndw, asyw, harm);
395 harm->wndw.mask &= ~BIT(wndw->id);
396 } else {
397 return 0;
398 }
399
400 /* Aside from the obvious case where the window is actively being
401 * disabled, we might also need to temporarily disable the window
402 * when performing certain modeset operations.
403 */
404 if (!asyw->visible || modeset) {
405 asyw->clr.ntfy = armw->ntfy.handle != 0;
406 asyw->clr.sema = armw->sema.handle != 0;
407 asyw->clr.xlut = armw->xlut.handle != 0;
408 if (wndw->func->image_clr)
409 asyw->clr.image = armw->image.handle[0] != 0;
410 }
411
412 return 0;
413}
414
415static void
416nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
417{
418 struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
419 struct nouveau_drm *drm = nouveau_drm(plane->dev);
420
421 NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
422 if (!old_state->fb)
423 return;
424
425 nouveau_bo_unpin(fb->nvbo);
426}
427
428static int
429nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
430{
431 struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
432 struct nouveau_drm *drm = nouveau_drm(plane->dev);
433 struct nv50_wndw *wndw = nv50_wndw(plane);
434 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
435 struct nv50_head_atom *asyh;
436 struct nv50_wndw_ctxdma *ctxdma;
437 int ret;
438
439 NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
440 if (!asyw->state.fb)
441 return 0;
442
443 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
444 if (ret)
445 return ret;
446
447 ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
448 if (IS_ERR(ctxdma)) {
449 nouveau_bo_unpin(fb->nvbo);
450 return PTR_ERR(ctxdma);
451 }
452
453 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
454 asyw->image.handle[0] = ctxdma->object.handle;
455 asyw->image.offset[0] = fb->nvbo->bo.offset;
456
457 if (wndw->func->prepare) {
458 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
459 if (IS_ERR(asyh))
460 return PTR_ERR(asyh);
461
462 wndw->func->prepare(wndw, asyh, asyw);
463 }
464
465 return 0;
466}
467
468static const struct drm_plane_helper_funcs
469nv50_wndw_helper = {
470 .prepare_fb = nv50_wndw_prepare_fb,
471 .cleanup_fb = nv50_wndw_cleanup_fb,
472 .atomic_check = nv50_wndw_atomic_check,
473};
474
475static void
476nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
477 struct drm_plane_state *state)
478{
479 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
480 __drm_atomic_helper_plane_destroy_state(&asyw->state);
481 kfree(asyw);
482}
483
484static struct drm_plane_state *
485nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
486{
487 struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
488 struct nv50_wndw_atom *asyw;
489 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
490 return NULL;
491 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
492 asyw->sema = armw->sema;
493 asyw->ntfy = armw->ntfy;
494 asyw->ilut = NULL;
495 asyw->xlut = armw->xlut;
496 asyw->image = armw->image;
497 asyw->point = armw->point;
498 asyw->clr.mask = 0;
499 asyw->set.mask = 0;
500 return &asyw->state;
501}
502
503static void
504nv50_wndw_reset(struct drm_plane *plane)
505{
506 struct nv50_wndw_atom *asyw;
507
508 if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
509 return;
510
511 if (plane->state)
512 plane->funcs->atomic_destroy_state(plane, plane->state);
513 plane->state = &asyw->state;
514 plane->state->plane = plane;
515 plane->state->rotation = DRM_MODE_ROTATE_0;
516}
517
518static void
519nv50_wndw_destroy(struct drm_plane *plane)
520{
521 struct nv50_wndw *wndw = nv50_wndw(plane);
522 struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
523
524 list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
525 nv50_wndw_ctxdma_del(ctxdma);
526 }
527
528 nvif_notify_fini(&wndw->notify);
529 nv50_dmac_destroy(&wndw->wimm);
530 nv50_dmac_destroy(&wndw->wndw);
531
532 nv50_lut_fini(&wndw->ilut);
533
534 drm_plane_cleanup(&wndw->plane);
535 kfree(wndw);
536}
537
538const struct drm_plane_funcs
539nv50_wndw = {
540 .update_plane = drm_atomic_helper_update_plane,
541 .disable_plane = drm_atomic_helper_disable_plane,
542 .destroy = nv50_wndw_destroy,
543 .reset = nv50_wndw_reset,
544 .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
545 .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
546};
547
548static int
549nv50_wndw_notify(struct nvif_notify *notify)
550{
551 return NVIF_NOTIFY_KEEP;
552}
553
554void
555nv50_wndw_fini(struct nv50_wndw *wndw)
556{
557 nvif_notify_put(&wndw->notify);
558}
559
560void
561nv50_wndw_init(struct nv50_wndw *wndw)
562{
563 nvif_notify_get(&wndw->notify);
564}
565
566int
567nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
568 enum drm_plane_type type, const char *name, int index,
569 const u32 *format, u32 heads,
570 enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
571 struct nv50_wndw **pwndw)
572{
573 struct nouveau_drm *drm = nouveau_drm(dev);
574 struct nvif_mmu *mmu = &drm->client.mmu;
575 struct nv50_disp *disp = nv50_disp(dev);
576 struct nv50_wndw *wndw;
577 int nformat;
578 int ret;
579
580 if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
581 return -ENOMEM;
582 wndw->func = func;
583 wndw->id = index;
584 wndw->interlock.type = interlock_type;
585 wndw->interlock.data = interlock_data;
586 wndw->ctxdma.parent = &wndw->wndw.base.user;
587
588 wndw->ctxdma.parent = &wndw->wndw.base.user;
589 INIT_LIST_HEAD(&wndw->ctxdma.list);
590
591 for (nformat = 0; format[nformat]; nformat++);
592
593 ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
594 format, nformat, NULL,
595 type, "%s-%d", name, index);
596 if (ret) {
597 kfree(*pwndw);
598 *pwndw = NULL;
599 return ret;
600 }
601
602 drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
603
604 if (wndw->func->ilut) {
605 ret = nv50_lut_init(disp, mmu, &wndw->ilut);
606 if (ret)
607 return ret;
608 }
609
610 wndw->notify.func = nv50_wndw_notify;
611 return 0;
612}
613
614int
615nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
616 struct nv50_wndw **pwndw)
617{
618 struct {
619 s32 oclass;
620 int version;
621 int (*new)(struct nouveau_drm *, enum drm_plane_type,
622 int, s32, struct nv50_wndw **);
623 } wndws[] = {
624 { GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
625 {}
626 };
627 struct nv50_disp *disp = nv50_disp(drm->dev);
628 int cid, ret;
629
630 cid = nvif_mclass(&disp->disp->object, wndws);
631 if (cid < 0) {
632 NV_ERROR(drm, "No supported window class\n");
633 return cid;
634 }
635
636 ret = wndws[cid].new(drm, type, index, wndws[cid].oclass, pwndw);
637 if (ret)
638 return ret;
639
640 return nv50_wimm_init(drm, *pwndw);
641}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
new file mode 100644
index 000000000000..b0b6428034b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -0,0 +1,96 @@
1#ifndef __NV50_KMS_WNDW_H__
2#define __NV50_KMS_WNDW_H__
3#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
4#include "disp.h"
5#include "atom.h"
6#include "lut.h"
7
8#include <nvif/notify.h>
9
10struct nv50_wndw_ctxdma {
11 struct list_head head;
12 struct nvif_object object;
13};
14
15struct nv50_wndw {
16 const struct nv50_wndw_func *func;
17 const struct nv50_wimm_func *immd;
18 int id;
19 struct nv50_disp_interlock interlock;
20
21 struct {
22 struct nvif_object *parent;
23 struct list_head list;
24 } ctxdma;
25
26 struct drm_plane plane;
27
28 struct nv50_lut ilut;
29
30 struct nv50_dmac wndw;
31 struct nv50_dmac wimm;
32
33 struct nvif_notify notify;
34 u16 ntfy;
35 u16 sema;
36 u32 data;
37};
38
39int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
40 enum drm_plane_type, const char *name, int index,
41 const u32 *format, enum nv50_disp_interlock_type,
42 u32 interlock_data, u32 heads, struct nv50_wndw **);
43void nv50_wndw_init(struct nv50_wndw *);
44void nv50_wndw_fini(struct nv50_wndw *);
45void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
46 struct nv50_wndw_atom *);
47void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
48 struct nv50_wndw_atom *);
49void nv50_wndw_ntfy_enable(struct nv50_wndw *, struct nv50_wndw_atom *);
50int nv50_wndw_wait_armed(struct nv50_wndw *, struct nv50_wndw_atom *);
51
52struct nv50_wndw_func {
53 int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
54 struct nv50_head_atom *asyh);
55 void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
56 struct nv50_head_atom *asyh);
57 void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
58 struct nv50_wndw_atom *asyw);
59
60 void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
61 void (*sema_clr)(struct nv50_wndw *);
62 void (*ntfy_reset)(struct nouveau_bo *, u32 offset);
63 void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
64 void (*ntfy_clr)(struct nv50_wndw *);
65 int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
66 struct nvif_device *);
67 void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
68 bool olut_core;
69 void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
70 void (*xlut_clr)(struct nv50_wndw *);
71 void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
72 void (*image_clr)(struct nv50_wndw *);
73 void (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
74
75 void (*update)(struct nv50_wndw *, u32 *interlock);
76};
77
78extern const struct drm_plane_funcs nv50_wndw;
79
80void base507c_ntfy_reset(struct nouveau_bo *, u32);
81int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
82
83struct nv50_wimm_func {
84 void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
85
86 void (*update)(struct nv50_wndw *, u32 *interlock);
87};
88
89extern const struct nv50_wimm_func curs507a;
90
91int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
92 struct nv50_wndw **);
93
94int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
95 struct nv50_wndw **);
96#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
new file mode 100644
index 000000000000..44afb0f069a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -0,0 +1,278 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "wndw.h"
23#include "atom.h"
24
25#include <drm/drm_atomic_helper.h>
26#include <drm/drm_plane_helper.h>
27#include <nouveau_bo.h>
28
29#include <nvif/clc37e.h>
30
31static void
32wndwc37e_ilut_clr(struct nv50_wndw *wndw)
33{
34 u32 *push;
35 if ((push = evo_wait(&wndw->wndw, 2))) {
36 evo_mthd(push, 0x02b8, 1);
37 evo_data(push, 0x00000000);
38 evo_kick(push, &wndw->wndw);
39 }
40}
41
42static void
43wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
44{
45 u32 *push;
46 if ((push = evo_wait(&wndw->wndw, 4))) {
47 evo_mthd(push, 0x02b0, 3);
48 evo_data(push, asyw->xlut.i.output_mode << 8 |
49 asyw->xlut.i.range << 4 |
50 asyw->xlut.i.size);
51 evo_data(push, asyw->xlut.i.offset >> 8);
52 evo_data(push, asyw->xlut.handle);
53 evo_kick(push, &wndw->wndw);
54 }
55}
56
57static void
58wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
59{
60 asyw->xlut.i.mode = 2;
61 asyw->xlut.i.size = 0;
62 asyw->xlut.i.range = 0;
63 asyw->xlut.i.output_mode = 1;
64}
65
66static void
67wndwc37e_image_clr(struct nv50_wndw *wndw)
68{
69 u32 *push;
70 if ((push = evo_wait(&wndw->wndw, 4))) {
71 evo_mthd(push, 0x0308, 1);
72 evo_data(push, 0x00000000);
73 evo_mthd(push, 0x0240, 1);
74 evo_data(push, 0x00000000);
75 evo_kick(push, &wndw->wndw);
76 }
77}
78
79static void
80wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
81{
82 u32 *push;
83
84 if (!(push = evo_wait(&wndw->wndw, 25)))
85 return;
86
87 evo_mthd(push, 0x0308, 1);
88 evo_data(push, asyw->image.mode << 4 | asyw->image.interval);
89 evo_mthd(push, 0x0224, 4);
90 evo_data(push, asyw->image.h << 16 | asyw->image.w);
91 evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
92 evo_data(push, asyw->image.colorspace << 8 | asyw->image.format);
93 evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
94 evo_mthd(push, 0x0240, 1);
95 evo_data(push, asyw->image.handle[0]);
96 evo_mthd(push, 0x0260, 1);
97 evo_data(push, asyw->image.offset[0] >> 8);
98 evo_mthd(push, 0x0290, 1);
99 evo_data(push, (asyw->state.src_y >> 16) << 16 |
100 (asyw->state.src_x >> 16));
101 evo_mthd(push, 0x0298, 1);
102 evo_data(push, (asyw->state.src_h >> 16) << 16 |
103 (asyw->state.src_w >> 16));
104 evo_mthd(push, 0x02a4, 1);
105 evo_data(push, asyw->state.crtc_h << 16 |
106 asyw->state.crtc_w);
107
108 /*XXX: Composition-related stuff. Need to implement properly. */
109 evo_mthd(push, 0x02ec, 1);
110 evo_data(push, (2 - (wndw->id & 1)) << 4);
111 evo_mthd(push, 0x02f4, 5);
112 evo_data(push, 0x00000011);
113 evo_data(push, 0xffff0000);
114 evo_data(push, 0xffff0000);
115 evo_data(push, 0xffff0000);
116 evo_data(push, 0xffff0000);
117 evo_kick(push, &wndw->wndw);
118}
119
120static void
121wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
122{
123 u32 *push;
124 if ((push = evo_wait(&wndw->wndw, 2))) {
125 evo_mthd(push, 0x021c, 1);
126 evo_data(push, 0x00000000);
127 evo_kick(push, &wndw->wndw);
128 }
129}
130
131static void
132wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
133{
134 u32 *push;
135 if ((push = evo_wait(&wndw->wndw, 3))) {
136 evo_mthd(push, 0x021c, 2);
137 evo_data(push, asyw->ntfy.handle);
138 evo_data(push, asyw->ntfy.offset | asyw->ntfy.awaken);
139 evo_kick(push, &wndw->wndw);
140 }
141}
142
143static void
144wndwc37e_sema_clr(struct nv50_wndw *wndw)
145{
146 u32 *push;
147 if ((push = evo_wait(&wndw->wndw, 2))) {
148 evo_mthd(push, 0x0218, 1);
149 evo_data(push, 0x00000000);
150 evo_kick(push, &wndw->wndw);
151 }
152}
153
154static void
155wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
156{
157 u32 *push;
158 if ((push = evo_wait(&wndw->wndw, 5))) {
159 evo_mthd(push, 0x020c, 4);
160 evo_data(push, asyw->sema.offset);
161 evo_data(push, asyw->sema.acquire);
162 evo_data(push, asyw->sema.release);
163 evo_data(push, asyw->sema.handle);
164 evo_kick(push, &wndw->wndw);
165 }
166}
167
168static void
169wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
170{
171 u32 *push;
172 if ((push = evo_wait(&wndw->wndw, 5))) {
173 evo_mthd(push, 0x0370, 2);
174 evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
175 interlock[NV50_DISP_INTERLOCK_CORE]);
176 evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
177 evo_mthd(push, 0x0200, 1);
178 if (interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)
179 evo_data(push, 0x00001001);
180 else
181 evo_data(push, 0x00000001);
182 evo_kick(push, &wndw->wndw);
183 }
184}
185
186static void
187wndwc37e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
188 struct nv50_head_atom *asyh)
189{
190}
191
192static int
193wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
194 struct nv50_head_atom *asyh)
195{
196 return drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
197 DRM_PLANE_HELPER_NO_SCALING,
198 DRM_PLANE_HELPER_NO_SCALING,
199 true, true);
200}
201
202static const u32
203wndwc37e_format[] = {
204 DRM_FORMAT_C8,
205 DRM_FORMAT_YUYV,
206 DRM_FORMAT_UYVY,
207 DRM_FORMAT_XRGB8888,
208 DRM_FORMAT_ARGB8888,
209 DRM_FORMAT_RGB565,
210 DRM_FORMAT_XRGB1555,
211 DRM_FORMAT_ARGB1555,
212 DRM_FORMAT_XBGR2101010,
213 DRM_FORMAT_ABGR2101010,
214 DRM_FORMAT_XBGR8888,
215 DRM_FORMAT_ABGR8888,
216 DRM_FORMAT_XRGB2101010,
217 DRM_FORMAT_ARGB2101010,
218 0
219};
220
221static const struct nv50_wndw_func
222wndwc37e = {
223 .acquire = wndwc37e_acquire,
224 .release = wndwc37e_release,
225 .sema_set = wndwc37e_sema_set,
226 .sema_clr = wndwc37e_sema_clr,
227 .ntfy_set = wndwc37e_ntfy_set,
228 .ntfy_clr = wndwc37e_ntfy_clr,
229 .ntfy_reset = corec37d_ntfy_init,
230 .ntfy_wait_begun = base507c_ntfy_wait_begun,
231 .ilut = wndwc37e_ilut,
232 .xlut_set = wndwc37e_ilut_set,
233 .xlut_clr = wndwc37e_ilut_clr,
234 .image_set = wndwc37e_image_set,
235 .image_clr = wndwc37e_image_clr,
236 .update = wndwc37e_update,
237};
238
239static int
240wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
241 enum drm_plane_type type, int index, s32 oclass, u32 heads,
242 struct nv50_wndw **pwndw)
243{
244 struct nvc37e_window_channel_dma_v0 args = {
245 .pushbuf = 0xb0007e00 | index,
246 .index = index,
247 };
248 struct nv50_disp *disp = nv50_disp(drm->dev);
249 struct nv50_wndw *wndw;
250 int ret;
251
252 ret = nv50_wndw_new_(func, drm->dev, type, "wndw", index,
253 wndwc37e_format, heads, NV50_DISP_INTERLOCK_WNDW,
254 BIT(index), &wndw);
255 if (*pwndw = wndw, ret)
256 return ret;
257
258 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
259 &oclass, 0, &args, sizeof(args),
260 disp->sync->bo.offset, &wndw->wndw);
261 if (ret) {
262 NV_ERROR(drm, "qndw%04x allocation failed: %d\n", oclass, ret);
263 return ret;
264 }
265
266 wndw->ntfy = NV50_DISP_WNDW_NTFY(wndw->id);
267 wndw->sema = NV50_DISP_WNDW_SEM0(wndw->id);
268 wndw->data = 0x00000000;
269 return 0;
270}
271
272int
273wndwc37e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
274 s32 oclass, struct nv50_wndw **pwndw)
275{
276 return wndwc37e_new_(&wndwc37e, drm, type, index, oclass,
277 BIT(index >> 1), pwndw);
278}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 2740278d226b..4f5233107f5f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -31,6 +31,7 @@ struct nv_device_info_v0 {
31#define NV_DEVICE_INFO_V0_KEPLER 0x08 31#define NV_DEVICE_INFO_V0_KEPLER 0x08
32#define NV_DEVICE_INFO_V0_MAXWELL 0x09 32#define NV_DEVICE_INFO_V0_MAXWELL 0x09
33#define NV_DEVICE_INFO_V0_PASCAL 0x0a 33#define NV_DEVICE_INFO_V0_PASCAL 0x0a
34#define NV_DEVICE_INFO_V0_VOLTA 0x0b
34 __u8 family; 35 __u8 family;
35 __u8 pad06[2]; 36 __u8 pad06[2];
36 __u64 ram_size; 37 __u64 ram_size;
@@ -39,9 +40,55 @@ struct nv_device_info_v0 {
39 char name[64]; 40 char name[64];
40}; 41};
41 42
43struct nv_device_info_v1 {
44 __u8 version;
45 __u8 count;
46 __u8 pad02[6];
47 struct nv_device_info_v1_data {
48 __u64 mthd; /* NV_DEVICE_INFO_* (see below). */
49 __u64 data;
50 } data[];
51};
52
42struct nv_device_time_v0 { 53struct nv_device_time_v0 {
43 __u8 version; 54 __u8 version;
44 __u8 pad01[7]; 55 __u8 pad01[7];
45 __u64 time; 56 __u64 time;
46}; 57};
58
59#define NV_DEVICE_INFO_UNIT (0xffffffffULL << 32)
60#define NV_DEVICE_INFO(n) ((n) | (0x00000000ULL << 32))
61#define NV_DEVICE_FIFO(n) ((n) | (0x00000001ULL << 32))
62
63/* This will be returned for unsupported queries. */
64#define NV_DEVICE_INFO_INVALID ~0ULL
65
66/* These return a mask of available engines of particular type. */
67#define NV_DEVICE_INFO_ENGINE_SW NV_DEVICE_INFO(0x00000000)
68#define NV_DEVICE_INFO_ENGINE_GR NV_DEVICE_INFO(0x00000001)
69#define NV_DEVICE_INFO_ENGINE_MPEG NV_DEVICE_INFO(0x00000002)
70#define NV_DEVICE_INFO_ENGINE_ME NV_DEVICE_INFO(0x00000003)
71#define NV_DEVICE_INFO_ENGINE_CIPHER NV_DEVICE_INFO(0x00000004)
72#define NV_DEVICE_INFO_ENGINE_BSP NV_DEVICE_INFO(0x00000005)
73#define NV_DEVICE_INFO_ENGINE_VP NV_DEVICE_INFO(0x00000006)
74#define NV_DEVICE_INFO_ENGINE_CE NV_DEVICE_INFO(0x00000007)
75#define NV_DEVICE_INFO_ENGINE_SEC NV_DEVICE_INFO(0x00000008)
76#define NV_DEVICE_INFO_ENGINE_MSVLD NV_DEVICE_INFO(0x00000009)
77#define NV_DEVICE_INFO_ENGINE_MSPDEC NV_DEVICE_INFO(0x0000000a)
78#define NV_DEVICE_INFO_ENGINE_MSPPP NV_DEVICE_INFO(0x0000000b)
79#define NV_DEVICE_INFO_ENGINE_MSENC NV_DEVICE_INFO(0x0000000c)
80#define NV_DEVICE_INFO_ENGINE_VIC NV_DEVICE_INFO(0x0000000d)
81#define NV_DEVICE_INFO_ENGINE_SEC2 NV_DEVICE_INFO(0x0000000e)
82#define NV_DEVICE_INFO_ENGINE_NVDEC NV_DEVICE_INFO(0x0000000f)
83#define NV_DEVICE_INFO_ENGINE_NVENC NV_DEVICE_INFO(0x00000010)
84
85/* Returns the number of available channels. */
86#define NV_DEVICE_FIFO_CHANNELS NV_DEVICE_FIFO(0x00000000)
87
88/* Returns a mask of available runlists. */
89#define NV_DEVICE_FIFO_RUNLISTS NV_DEVICE_FIFO(0x00000001)
90
91/* These return a mask of engines available on a particular runlist. */
92#define NV_DEVICE_FIFO_RUNLIST_ENGINES(n) ((n) + NV_DEVICE_FIFO(0x00000010))
93#define NV_DEVICE_FIFO_RUNLIST_ENGINES__SIZE 64
47#endif 94#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 56f5bd81e480..fbfcffc5feb2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -4,25 +4,11 @@
4 4
5struct kepler_channel_gpfifo_a_v0 { 5struct kepler_channel_gpfifo_a_v0 {
6 __u8 version; 6 __u8 version;
7 __u8 pad01[5]; 7 __u8 pad01[1];
8 __u16 chid; 8 __u16 chid;
9#define NVA06F_V0_ENGINE_SW 0x00000001
10#define NVA06F_V0_ENGINE_GR 0x00000002
11#define NVA06F_V0_ENGINE_SEC 0x00000004
12#define NVA06F_V0_ENGINE_MSVLD 0x00000010
13#define NVA06F_V0_ENGINE_MSPDEC 0x00000020
14#define NVA06F_V0_ENGINE_MSPPP 0x00000040
15#define NVA06F_V0_ENGINE_MSENC 0x00000080
16#define NVA06F_V0_ENGINE_VIC 0x00000100
17#define NVA06F_V0_ENGINE_NVDEC 0x00000200
18#define NVA06F_V0_ENGINE_NVENC0 0x00000400
19#define NVA06F_V0_ENGINE_NVENC1 0x00000800
20#define NVA06F_V0_ENGINE_CE0 0x00010000
21#define NVA06F_V0_ENGINE_CE1 0x00020000
22#define NVA06F_V0_ENGINE_CE2 0x00040000
23 __u32 engines;
24 __u32 ilength; 9 __u32 ilength;
25 __u64 ioffset; 10 __u64 ioffset;
11 __u64 runlist;
26 __u64 vmm; 12 __u64 vmm;
27}; 13};
28 14
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index a7c5bf572788..6db56bd7d67e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -52,6 +52,8 @@
52 52
53#define NV04_DISP /* cl0046.h */ 0x00000046 53#define NV04_DISP /* cl0046.h */ 0x00000046
54 54
55#define VOLTA_USERMODE_A 0x0000c361
56
55#define NV03_CHANNEL_DMA /* cl506b.h */ 0x0000006b 57#define NV03_CHANNEL_DMA /* cl506b.h */ 0x0000006b
56#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e 58#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
57#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e 59#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
@@ -66,6 +68,7 @@
66#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f 68#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
67#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f 69#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
68#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f 70#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
71#define VOLTA_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c36f
69 72
70#define NV50_DISP /* cl5070.h */ 0x00005070 73#define NV50_DISP /* cl5070.h */ 0x00005070
71#define G82_DISP /* cl5070.h */ 0x00008270 74#define G82_DISP /* cl5070.h */ 0x00008270
@@ -79,6 +82,7 @@
79#define GM200_DISP /* cl5070.h */ 0x00009570 82#define GM200_DISP /* cl5070.h */ 0x00009570
80#define GP100_DISP /* cl5070.h */ 0x00009770 83#define GP100_DISP /* cl5070.h */ 0x00009770
81#define GP102_DISP /* cl5070.h */ 0x00009870 84#define GP102_DISP /* cl5070.h */ 0x00009870
85#define GV100_DISP /* cl5070.h */ 0x0000c370
82 86
83#define NV31_MPEG 0x00003174 87#define NV31_MPEG 0x00003174
84#define G82_MPEG 0x00008274 88#define G82_MPEG 0x00008274
@@ -90,6 +94,7 @@
90#define GT214_DISP_CURSOR /* cl507a.h */ 0x0000857a 94#define GT214_DISP_CURSOR /* cl507a.h */ 0x0000857a
91#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a 95#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a
92#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a 96#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
97#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
93 98
94#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b 99#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
95#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b 100#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@@ -97,6 +102,8 @@
97#define GF110_DISP_OVERLAY /* cl507b.h */ 0x0000907b 102#define GF110_DISP_OVERLAY /* cl507b.h */ 0x0000907b
98#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b 103#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b
99 104
105#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
106
100#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c 107#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
101#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c 108#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
102#define GT200_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000837c 109#define GT200_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000837c
@@ -117,6 +124,7 @@
117#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d 124#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
118#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d 125#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
119#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d 126#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
127#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
120 128
121#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e 129#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
122#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e 130#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -125,6 +133,8 @@
125#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e 133#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e
126#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e 134#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
127 135
136#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
137
128#define NV50_TESLA 0x00005097 138#define NV50_TESLA 0x00005097
129#define G82_TESLA 0x00008297 139#define G82_TESLA 0x00008297
130#define GT200_TESLA 0x00008397 140#define GT200_TESLA 0x00008397
@@ -145,6 +155,8 @@
145#define PASCAL_A /* cl9097.h */ 0x0000c097 155#define PASCAL_A /* cl9097.h */ 0x0000c097
146#define PASCAL_B /* cl9097.h */ 0x0000c197 156#define PASCAL_B /* cl9097.h */ 0x0000c197
147 157
158#define VOLTA_A /* cl9097.h */ 0x0000c397
159
148#define NV74_BSP 0x000074b0 160#define NV74_BSP 0x000074b0
149 161
150#define GT212_MSVLD 0x000085b1 162#define GT212_MSVLD 0x000085b1
@@ -170,6 +182,7 @@
170#define MAXWELL_DMA_COPY_A 0x0000b0b5 182#define MAXWELL_DMA_COPY_A 0x0000b0b5
171#define PASCAL_DMA_COPY_A 0x0000c0b5 183#define PASCAL_DMA_COPY_A 0x0000c0b5
172#define PASCAL_DMA_COPY_B 0x0000c1b5 184#define PASCAL_DMA_COPY_B 0x0000c1b5
185#define VOLTA_DMA_COPY_A 0x0000c3b5
173 186
174#define FERMI_DECOMPRESS 0x000090b8 187#define FERMI_DECOMPRESS 0x000090b8
175 188
@@ -183,6 +196,7 @@
183#define MAXWELL_COMPUTE_B 0x0000b1c0 196#define MAXWELL_COMPUTE_B 0x0000b1c0
184#define PASCAL_COMPUTE_A 0x0000c0c0 197#define PASCAL_COMPUTE_A 0x0000c0c0
185#define PASCAL_COMPUTE_B 0x0000c1c0 198#define PASCAL_COMPUTE_B 0x0000c1c0
199#define VOLTA_COMPUTE_A 0x0000c3c0
186 200
187#define NV74_CIPHER 0x000074c1 201#define NV74_CIPHER 0x000074c1
188#endif 202#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc37b.h b/drivers/gpu/drm/nouveau/include/nvif/clc37b.h
new file mode 100644
index 000000000000..89b18189d43b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc37b.h
@@ -0,0 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NVIF_CLC37B_H__
3#define __NVIF_CLC37B_H__
4
5struct nvc37b_window_imm_channel_dma_v0 {
6 __u8 version;
7 __u8 index;
8 __u8 pad02[6];
9 __u64 pushbuf;
10};
11#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc37e.h b/drivers/gpu/drm/nouveau/include/nvif/clc37e.h
new file mode 100644
index 000000000000..899db9e915ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc37e.h
@@ -0,0 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NVIF_CLC37E_H__
3#define __NVIF_CLC37E_H__
4
5struct nvc37e_window_channel_dma_v0 {
6 __u8 version;
7 __u8 index;
8 __u8 pad02[6];
9 __u64 pushbuf;
10};
11
12#define NVC37E_WINDOW_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
13#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 6edb6266857e..ef839bd1d37e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -4,10 +4,18 @@
4 4
5#include <nvif/object.h> 5#include <nvif/object.h>
6#include <nvif/cl0080.h> 6#include <nvif/cl0080.h>
7#include <nvif/user.h>
7 8
8struct nvif_device { 9struct nvif_device {
9 struct nvif_object object; 10 struct nvif_object object;
10 struct nv_device_info_v0 info; 11 struct nv_device_info_v0 info;
12
13 struct nvif_fifo_runlist {
14 u64 engines;
15 } *runlist;
16 int runlists;
17
18 struct nvif_user user;
11}; 19};
12 20
13int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32, 21int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
@@ -67,6 +75,5 @@ u64 nvif_device_time(struct nvif_device *);
67#include <engine/fifo.h> 75#include <engine/fifo.h>
68#include <engine/gr.h> 76#include <engine/gr.h>
69 77
70#define nvxx_fifo(a) nvxx_device(a)->fifo
71#define nvxx_gr(a) nvxx_device(a)->gr 78#define nvxx_gr(a) nvxx_device(a)->gr
72#endif 79#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/disp.h b/drivers/gpu/drm/nouveau/include/nvif/disp.h
new file mode 100644
index 000000000000..7c0eda375c01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/disp.h
@@ -0,0 +1,12 @@
1#ifndef __NVIF_DISP_H__
2#define __NVIF_DISP_H__
3#include <nvif/object.h>
4struct nvif_device;
5
6struct nvif_disp {
7 struct nvif_object object;
8};
9
10int nvif_disp_ctor(struct nvif_device *, s32 oclass, struct nvif_disp *);
11void nvif_disp_dtor(struct nvif_disp *);
12#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/fifo.h b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
new file mode 100644
index 000000000000..e9468c9f9abf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
@@ -0,0 +1,18 @@
1#ifndef __NVIF_FIFO_H__
2#define __NVIF_FIFO_H__
3#include <nvif/device.h>
4
5/* Returns mask of runlists that support a NV_DEVICE_INFO_ENGINE_* type. */
6u64 nvif_fifo_runlist(struct nvif_device *, u64 engine);
7
8/* CE-supporting runlists (excluding GRCE, if others exist). */
9static inline u64
10nvif_fifo_runlist_ce(struct nvif_device *device)
11{
12 u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
13 u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_CE);
14 if (runmce && !(runmce &= ~runmgr))
15 runmce = runmgr;
16 return runmce;
17}
18#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mem.h b/drivers/gpu/drm/nouveau/include/nvif/mem.h
index b542fe38398e..80ee4ab0f016 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mem.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mem.h
@@ -15,4 +15,6 @@ int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
15int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page, 15int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
16 u64 size, void *argv, u32 argc, struct nvif_mem *); 16 u64 size, void *argv, u32 argc, struct nvif_mem *);
17void nvif_mem_fini(struct nvif_mem *); 17void nvif_mem_fini(struct nvif_mem *);
18
19int nvif_mem_init_map(struct nvif_mmu *, u8 type, u64 size, struct nvif_mem *);
18#endif 20#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
index c8cd5b5b0688..747ecf67e403 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -8,6 +8,7 @@ struct nvif_mmu {
8 u8 heap_nr; 8 u8 heap_nr;
9 u8 type_nr; 9 u8 type_nr;
10 u16 kind_nr; 10 u16 kind_nr;
11 s32 mem;
11 12
12 struct { 13 struct {
13 u64 size; 14 u64 size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index a2d5244ff2b7..20754d9e6883 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -99,6 +99,22 @@ struct nvif_mclass {
99 ret; \ 99 ret; \
100}) 100})
101 101
102#define nvif_sclass(o,m,u) ({ \
103 const typeof(m[0]) *_mclass = (m); \
104 s32 _oclass = (u); \
105 int _cid; \
106 if (_oclass) { \
107 for (_cid = 0; _mclass[_cid].oclass; _cid++) { \
108 if (_mclass[_cid].oclass == _oclass) \
109 break; \
110 } \
111 _cid = _mclass[_cid].oclass ? _cid : -ENOSYS; \
112 } else { \
113 _cid = nvif_mclass((o), _mclass); \
114 } \
115 _cid; \
116})
117
102/*XXX*/ 118/*XXX*/
103#include <core/object.h> 119#include <core/object.h>
104#define nvxx_object(a) ({ \ 120#define nvxx_object(a) ({ \
diff --git a/drivers/gpu/drm/nouveau/include/nvif/user.h b/drivers/gpu/drm/nouveau/include/nvif/user.h
new file mode 100644
index 000000000000..03c11826b693
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/user.h
@@ -0,0 +1,19 @@
1#ifndef __NVIF_USER_H__
2#define __NVIF_USER_H__
3#include <nvif/object.h>
4struct nvif_device;
5
6struct nvif_user {
7 const struct nvif_user_func *func;
8 struct nvif_object object;
9};
10
11struct nvif_user_func {
12 void (*doorbell)(struct nvif_user *, u32 token);
13};
14
15int nvif_user_init(struct nvif_device *);
16void nvif_user_fini(struct nvif_device *);
17
18extern const struct nvif_user_func nvif_userc361;
19#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 560265b15ec2..d83d834b7452 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -22,6 +22,7 @@ enum nvkm_devidx {
22 NVKM_SUBDEV_LTC, 22 NVKM_SUBDEV_LTC,
23 NVKM_SUBDEV_MMU, 23 NVKM_SUBDEV_MMU,
24 NVKM_SUBDEV_BAR, 24 NVKM_SUBDEV_BAR,
25 NVKM_SUBDEV_FAULT,
25 NVKM_SUBDEV_PMU, 26 NVKM_SUBDEV_PMU,
26 NVKM_SUBDEV_VOLT, 27 NVKM_SUBDEV_VOLT,
27 NVKM_SUBDEV_ICCSENSE, 28 NVKM_SUBDEV_ICCSENSE,
@@ -37,7 +38,10 @@ enum nvkm_devidx {
37 NVKM_ENGINE_CE3, 38 NVKM_ENGINE_CE3,
38 NVKM_ENGINE_CE4, 39 NVKM_ENGINE_CE4,
39 NVKM_ENGINE_CE5, 40 NVKM_ENGINE_CE5,
40 NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5, 41 NVKM_ENGINE_CE6,
42 NVKM_ENGINE_CE7,
43 NVKM_ENGINE_CE8,
44 NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE8,
41 45
42 NVKM_ENGINE_CIPHER, 46 NVKM_ENGINE_CIPHER,
43 NVKM_ENGINE_DISP, 47 NVKM_ENGINE_DISP,
@@ -109,6 +113,7 @@ struct nvkm_device {
109 NV_E0 = 0xe0, 113 NV_E0 = 0xe0,
110 GM100 = 0x110, 114 GM100 = 0x110,
111 GP100 = 0x130, 115 GP100 = 0x130,
116 GV100 = 0x140,
112 } card_type; 117 } card_type;
113 u32 chipset; 118 u32 chipset;
114 u8 chiprev; 119 u8 chiprev;
@@ -123,6 +128,7 @@ struct nvkm_device {
123 struct nvkm_bus *bus; 128 struct nvkm_bus *bus;
124 struct nvkm_clk *clk; 129 struct nvkm_clk *clk;
125 struct nvkm_devinit *devinit; 130 struct nvkm_devinit *devinit;
131 struct nvkm_fault *fault;
126 struct nvkm_fb *fb; 132 struct nvkm_fb *fb;
127 struct nvkm_fuse *fuse; 133 struct nvkm_fuse *fuse;
128 struct nvkm_gpio *gpio; 134 struct nvkm_gpio *gpio;
@@ -143,7 +149,7 @@ struct nvkm_device {
143 struct nvkm_volt *volt; 149 struct nvkm_volt *volt;
144 150
145 struct nvkm_engine *bsp; 151 struct nvkm_engine *bsp;
146 struct nvkm_engine *ce[6]; 152 struct nvkm_engine *ce[9];
147 struct nvkm_engine *cipher; 153 struct nvkm_engine *cipher;
148 struct nvkm_disp *disp; 154 struct nvkm_disp *disp;
149 struct nvkm_dma *dma; 155 struct nvkm_dma *dma;
@@ -194,6 +200,7 @@ struct nvkm_device_chip {
194 int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **); 200 int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
195 int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **); 201 int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **);
196 int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **); 202 int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **);
203 int (*fault )(struct nvkm_device *, int idx, struct nvkm_fault **);
197 int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **); 204 int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **);
198 int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **); 205 int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **);
199 int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **); 206 int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **);
@@ -214,7 +221,7 @@ struct nvkm_device_chip {
214 int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **); 221 int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
215 222
216 int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **); 223 int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
217 int (*ce[6] )(struct nvkm_device *, int idx, struct nvkm_engine **); 224 int (*ce[9] )(struct nvkm_device *, int idx, struct nvkm_engine **);
218 int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **); 225 int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
219 int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **); 226 int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
220 int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **); 227 int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index ebf8473a39fe..8a2be5b635e2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -18,6 +18,7 @@ struct nvkm_engine_func {
18 void *(*dtor)(struct nvkm_engine *); 18 void *(*dtor)(struct nvkm_engine *);
19 void (*preinit)(struct nvkm_engine *); 19 void (*preinit)(struct nvkm_engine *);
20 int (*oneinit)(struct nvkm_engine *); 20 int (*oneinit)(struct nvkm_engine *);
21 int (*info)(struct nvkm_engine *, u64 mthd, u64 *data);
21 int (*init)(struct nvkm_engine *); 22 int (*init)(struct nvkm_engine *);
22 int (*fini)(struct nvkm_engine *, bool suspend); 23 int (*fini)(struct nvkm_engine *, bool suspend);
23 void (*intr)(struct nvkm_engine *); 24 void (*intr)(struct nvkm_engine *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 63df2290177f..85a0777c2ce4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -17,6 +17,7 @@ struct nvkm_subdev_func {
17 void *(*dtor)(struct nvkm_subdev *); 17 void *(*dtor)(struct nvkm_subdev *);
18 int (*preinit)(struct nvkm_subdev *); 18 int (*preinit)(struct nvkm_subdev *);
19 int (*oneinit)(struct nvkm_subdev *); 19 int (*oneinit)(struct nvkm_subdev *);
20 int (*info)(struct nvkm_subdev *, u64 mthd, u64 *data);
20 int (*init)(struct nvkm_subdev *); 21 int (*init)(struct nvkm_subdev *);
21 int (*fini)(struct nvkm_subdev *, bool suspend); 22 int (*fini)(struct nvkm_subdev *, bool suspend);
22 void (*intr)(struct nvkm_subdev *); 23 void (*intr)(struct nvkm_subdev *);
@@ -29,6 +30,7 @@ void nvkm_subdev_del(struct nvkm_subdev **);
29int nvkm_subdev_preinit(struct nvkm_subdev *); 30int nvkm_subdev_preinit(struct nvkm_subdev *);
30int nvkm_subdev_init(struct nvkm_subdev *); 31int nvkm_subdev_init(struct nvkm_subdev *);
31int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend); 32int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
33int nvkm_subdev_info(struct nvkm_subdev *, u64, u64 *);
32void nvkm_subdev_intr(struct nvkm_subdev *); 34void nvkm_subdev_intr(struct nvkm_subdev *);
33 35
34/* subdev logging */ 36/* subdev logging */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 553245994450..fc295e1faa19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -10,4 +10,5 @@ int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
10int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **); 10int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
11int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); 11int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
12int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **); 12int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
13int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
13#endif 14#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index e83193d3ccab..ef7dc0844d26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -35,4 +35,5 @@ int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
35int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **); 35int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
36int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **); 36int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
37int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **); 37int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
38int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
38#endif 39#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 0f9c1c702ed6..f0c1b2c8c78c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -27,4 +27,5 @@ int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
27int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **); 27int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
28int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **); 28int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
29int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **); 29int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
30int gv100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
30#endif 31#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index c17b3a9bf8fb..7e39fbed2519 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -4,6 +4,7 @@
4#include <core/engine.h> 4#include <core/engine.h>
5#include <core/object.h> 5#include <core/object.h>
6#include <core/event.h> 6#include <core/event.h>
7struct nvkm_fault_data;
7 8
8#define NVKM_FIFO_CHID_NR 4096 9#define NVKM_FIFO_CHID_NR 4096
9 10
@@ -45,6 +46,7 @@ struct nvkm_fifo {
45 struct nvkm_event kevent; /* channel killed */ 46 struct nvkm_event kevent; /* channel killed */
46}; 47};
47 48
49void nvkm_fifo_fault(struct nvkm_fifo *, struct nvkm_fault_data *);
48void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *); 50void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
49void nvkm_fifo_start(struct nvkm_fifo *, unsigned long *); 51void nvkm_fifo_start(struct nvkm_fifo *, unsigned long *);
50 52
@@ -71,4 +73,5 @@ int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
71int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); 73int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
72int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); 74int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
73int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); 75int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
76int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
74#endif 77#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index fb18f105fc43..ba1518ff8b66 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -45,6 +45,8 @@ int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
45int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 45int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
46int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 46int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
47int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 47int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
48int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
48int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 49int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
49int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 50int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
51int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
50#endif 52#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
index df34b41838d6..512e25a41803 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
@@ -1,6 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NVBIOS_DP_H__ 2#ifndef __NVBIOS_DP_H__
3#define __NVBIOS_DP_H__ 3#define __NVBIOS_DP_H__
4
5u16
6nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
7
4struct nvbios_dpout { 8struct nvbios_dpout {
5 u16 type; 9 u16 type;
6 u16 mask; 10 u16 mask;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 40558064d589..486e7635c29d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -30,4 +30,5 @@ int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
30int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); 30int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
31int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); 31int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
32int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); 32int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
33int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
33#endif 34#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
new file mode 100644
index 000000000000..5a77498fe6a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -0,0 +1,33 @@
1#ifndef __NVKM_FAULT_H__
2#define __NVKM_FAULT_H__
3#include <core/subdev.h>
4#include <core/notify.h>
5
6struct nvkm_fault {
7 const struct nvkm_fault_func *func;
8 struct nvkm_subdev subdev;
9
10 struct nvkm_fault_buffer *buffer[2];
11 int buffer_nr;
12
13 struct nvkm_event event;
14
15 struct nvkm_notify nrpfb;
16};
17
18struct nvkm_fault_data {
19 u64 addr;
20 u64 inst;
21 u64 time;
22 u8 engine;
23 u8 valid;
24 u8 gpc;
25 u8 hub;
26 u8 access;
27 u8 client;
28 u8 reason;
29};
30
31int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
32int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
33#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 92be0e5269c6..96ccc624ee81 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -83,6 +83,7 @@ int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
83int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 83int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
84int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 84int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
85int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 85int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
86int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
86 87
87#include <subdev/bios.h> 88#include <subdev/bios.h>
88#include <subdev/bios/ramcfg.h> 89#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 95b611554d53..9db5f8293198 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -21,12 +21,14 @@ struct nvkm_ltc {
21 int zbc_max; 21 int zbc_max;
22 u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4]; 22 u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
23 u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT]; 23 u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
24 u32 zbc_stencil[NVKM_LTC_MAX_ZBC_CNT];
24}; 25};
25 26
26void nvkm_ltc_tags_clear(struct nvkm_device *, u32 first, u32 count); 27void nvkm_ltc_tags_clear(struct nvkm_device *, u32 first, u32 count);
27 28
28int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]); 29int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
29int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32); 30int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
31int nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *, int index, const u32);
30 32
31void nvkm_ltc_invalidate(struct nvkm_ltc *); 33void nvkm_ltc_invalidate(struct nvkm_ltc *);
32void nvkm_ltc_flush(struct nvkm_ltc *); 34void nvkm_ltc_flush(struct nvkm_ltc *);
@@ -37,4 +39,5 @@ int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
37int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 39int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
38int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 40int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
39int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 41int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
42int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
40#endif 43#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index baab93398e54..688595545e21 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -129,4 +129,5 @@ int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
129int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 129int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
130int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 130int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
131int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 131int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
132int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
132#endif 133#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index ece650a0c5f9..e2211bb2cf79 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -23,6 +23,7 @@
23 23
24#include <nvif/client.h> 24#include <nvif/client.h>
25#include <nvif/driver.h> 25#include <nvif/driver.h>
26#include <nvif/fifo.h>
26#include <nvif/ioctl.h> 27#include <nvif/ioctl.h>
27#include <nvif/class.h> 28#include <nvif/class.h>
28#include <nvif/cl0002.h> 29#include <nvif/cl0002.h>
@@ -102,6 +103,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
102 case NV_DEVICE_INFO_V0_KEPLER: 103 case NV_DEVICE_INFO_V0_KEPLER:
103 case NV_DEVICE_INFO_V0_MAXWELL: 104 case NV_DEVICE_INFO_V0_MAXWELL:
104 case NV_DEVICE_INFO_V0_PASCAL: 105 case NV_DEVICE_INFO_V0_PASCAL:
106 case NV_DEVICE_INFO_V0_VOLTA:
105 return NVIF_CLASS_SW_GF100; 107 return NVIF_CLASS_SW_GF100;
106 } 108 }
107 109
@@ -256,6 +258,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
256 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 258 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
257 struct nouveau_abi16_chan *chan; 259 struct nouveau_abi16_chan *chan;
258 struct nvif_device *device; 260 struct nvif_device *device;
261 u64 engine;
259 int ret; 262 int ret;
260 263
261 if (unlikely(!abi16)) 264 if (unlikely(!abi16))
@@ -268,25 +271,26 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
268 271
269 /* hack to allow channel engine type specification on kepler */ 272 /* hack to allow channel engine type specification on kepler */
270 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { 273 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
271 if (init->fb_ctxdma_handle != ~0) 274 if (init->fb_ctxdma_handle == ~0) {
272 init->fb_ctxdma_handle = NVA06F_V0_ENGINE_GR; 275 switch (init->tt_ctxdma_handle) {
273 else { 276 case 0x01: engine = NV_DEVICE_INFO_ENGINE_GR ; break;
274 init->fb_ctxdma_handle = 0; 277 case 0x02: engine = NV_DEVICE_INFO_ENGINE_MSPDEC; break;
275#define _(A,B) if (init->tt_ctxdma_handle & (A)) init->fb_ctxdma_handle |= (B) 278 case 0x04: engine = NV_DEVICE_INFO_ENGINE_MSPPP ; break;
276 _(0x01, NVA06F_V0_ENGINE_GR); 279 case 0x08: engine = NV_DEVICE_INFO_ENGINE_MSVLD ; break;
277 _(0x02, NVA06F_V0_ENGINE_MSPDEC); 280 case 0x30: engine = NV_DEVICE_INFO_ENGINE_CE ; break;
278 _(0x04, NVA06F_V0_ENGINE_MSPPP); 281 default:
279 _(0x08, NVA06F_V0_ENGINE_MSVLD); 282 return nouveau_abi16_put(abi16, -ENOSYS);
280 _(0x10, NVA06F_V0_ENGINE_CE0); 283 }
281 _(0x20, NVA06F_V0_ENGINE_CE1); 284 } else {
282 _(0x40, NVA06F_V0_ENGINE_MSENC); 285 engine = NV_DEVICE_INFO_ENGINE_GR;
283#undef _
284 } 286 }
285 287
286 /* allow flips to be executed if this is a graphics channel */ 288 if (engine != NV_DEVICE_INFO_ENGINE_CE)
289 engine = nvif_fifo_runlist(device, engine);
290 else
291 engine = nvif_fifo_runlist_ce(device);
292 init->fb_ctxdma_handle = engine;
287 init->tt_ctxdma_handle = 0; 293 init->tt_ctxdma_handle = 0;
288 if (init->fb_ctxdma_handle == NVA06F_V0_ENGINE_GR)
289 init->tt_ctxdma_handle = 1;
290 } 294 }
291 295
292 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 296 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 5ffcb6683776..ffb195850314 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -193,7 +193,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
193 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); 193 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
194} 194}
195 195
196static int nouveau_dsm_get_client_id(struct pci_dev *pdev) 196static enum vga_switcheroo_client_id nouveau_dsm_get_client_id(struct pci_dev *pdev)
197{ 197{
198 /* easy option one - intel vendor ID means Integrated */ 198 /* easy option one - intel vendor ID means Integrated */
199 if (pdev->vendor == PCI_VENDOR_ID_INTEL) 199 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index ab61c038f42c..7214022dfb91 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1141,6 +1141,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1141 struct ttm_mem_reg *, struct ttm_mem_reg *); 1141 struct ttm_mem_reg *, struct ttm_mem_reg *);
1142 int (*init)(struct nouveau_channel *, u32 handle); 1142 int (*init)(struct nouveau_channel *, u32 handle);
1143 } _methods[] = { 1143 } _methods[] = {
1144 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
1145 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
1144 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, 1146 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1145 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1147 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1146 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1148 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index af1116655910..92d3115f96b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -163,12 +163,15 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
163 return ret; 163 return ret;
164 } 164 }
165 165
166 chan->push.addr = chan->push.vma->addr;
167
168 if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
169 return 0;
170
166 args.target = NV_DMA_V0_TARGET_VM; 171 args.target = NV_DMA_V0_TARGET_VM;
167 args.access = NV_DMA_V0_ACCESS_VM; 172 args.access = NV_DMA_V0_ACCESS_VM;
168 args.start = 0; 173 args.start = 0;
169 args.limit = cli->vmm.vmm.limit - 1; 174 args.limit = cli->vmm.vmm.limit - 1;
170
171 chan->push.addr = chan->push.vma->addr;
172 } else 175 } else
173 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { 176 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
174 if (device->info.family == NV_DEVICE_INFO_V0_TNT) { 177 if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
@@ -214,10 +217,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
214 217
215static int 218static int
216nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, 219nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
217 u32 engine, struct nouveau_channel **pchan) 220 u64 runlist, struct nouveau_channel **pchan)
218{ 221{
219 struct nouveau_cli *cli = (void *)device->object.client; 222 struct nouveau_cli *cli = (void *)device->object.client;
220 static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A, 223 static const u16 oclasses[] = { VOLTA_CHANNEL_GPFIFO_A,
224 PASCAL_CHANNEL_GPFIFO_A,
221 MAXWELL_CHANNEL_GPFIFO_A, 225 MAXWELL_CHANNEL_GPFIFO_A,
222 KEPLER_CHANNEL_GPFIFO_B, 226 KEPLER_CHANNEL_GPFIFO_B,
223 KEPLER_CHANNEL_GPFIFO_A, 227 KEPLER_CHANNEL_GPFIFO_A,
@@ -245,9 +249,9 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
245 do { 249 do {
246 if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) { 250 if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
247 args.kepler.version = 0; 251 args.kepler.version = 0;
248 args.kepler.engines = engine;
249 args.kepler.ilength = 0x02000; 252 args.kepler.ilength = 0x02000;
250 args.kepler.ioffset = 0x10000 + chan->push.addr; 253 args.kepler.ioffset = 0x10000 + chan->push.addr;
254 args.kepler.runlist = runlist;
251 args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object); 255 args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
252 size = sizeof(args.kepler); 256 size = sizeof(args.kepler);
253 } else 257 } else
@@ -474,3 +478,28 @@ done:
474 cli->base.super = super; 478 cli->base.super = super;
475 return ret; 479 return ret;
476} 480}
481
482int
483nouveau_channels_init(struct nouveau_drm *drm)
484{
485 struct {
486 struct nv_device_info_v1 m;
487 struct {
488 struct nv_device_info_v1_data channels;
489 } v;
490 } args = {
491 .m.version = 1,
492 .m.count = sizeof(args.v) / sizeof(args.v.channels),
493 .v.channels.mthd = NV_DEVICE_FIFO_CHANNELS,
494 };
495 struct nvif_object *device = &drm->client.device.object;
496 int ret;
497
498 ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
499 if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
500 return -ENODEV;
501
502 drm->chan.nr = args.v.channels.data;
503 drm->chan.context_base = dma_fence_context_alloc(drm->chan.nr);
504 return 0;
505}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 14607c16a2bd..64454c2ebd90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -45,6 +45,7 @@ struct nouveau_channel {
45 atomic_t killed; 45 atomic_t killed;
46}; 46};
47 47
48int nouveau_channels_init(struct nouveau_drm *);
48 49
49int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, 50int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
50 u32 arg0, u32 arg1, struct nouveau_channel **); 51 u32 arg0, u32 arg1, struct nouveau_channel **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 6ed9cb053dfa..7b557c354307 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -151,7 +151,7 @@ nouveau_conn_atomic_set_property(struct drm_connector *connector,
151 /* ... except prior to G80, where the code 151 /* ... except prior to G80, where the code
152 * doesn't support such things. 152 * doesn't support such things.
153 */ 153 */
154 if (disp->disp.oclass < NV50_DISP) 154 if (disp->disp.object.oclass < NV50_DISP)
155 return -EINVAL; 155 return -EINVAL;
156 break; 156 break;
157 default: 157 default:
@@ -260,7 +260,7 @@ nouveau_conn_reset(struct drm_connector *connector)
260 asyc->procamp.color_vibrance = 150; 260 asyc->procamp.color_vibrance = 150;
261 asyc->procamp.vibrant_hue = 90; 261 asyc->procamp.vibrant_hue = 90;
262 262
263 if (nouveau_display(connector->dev)->disp.oclass < NV50_DISP) { 263 if (nouveau_display(connector->dev)->disp.object.oclass < NV50_DISP) {
264 switch (connector->connector_type) { 264 switch (connector->connector_type) {
265 case DRM_MODE_CONNECTOR_LVDS: 265 case DRM_MODE_CONNECTOR_LVDS:
266 /* See note in nouveau_conn_atomic_set_property(). */ 266 /* See note in nouveau_conn_atomic_set_property(). */
@@ -314,7 +314,7 @@ nouveau_conn_attach_properties(struct drm_connector *connector)
314 case DRM_MODE_CONNECTOR_TV: 314 case DRM_MODE_CONNECTOR_TV:
315 break; 315 break;
316 case DRM_MODE_CONNECTOR_VGA: 316 case DRM_MODE_CONNECTOR_VGA:
317 if (disp->disp.oclass < NV50_DISP) 317 if (disp->disp.object.oclass < NV50_DISP)
318 break; /* Can only scale on DFPs. */ 318 break; /* Can only scale on DFPs. */
319 /* Fall-through. */ 319 /* Fall-through. */
320 default: 320 default:
@@ -1005,7 +1005,7 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
1005 return 112000; 1005 return 112000;
1006} 1006}
1007 1007
1008static int 1008static enum drm_mode_status
1009nouveau_connector_mode_valid(struct drm_connector *connector, 1009nouveau_connector_mode_valid(struct drm_connector *connector,
1010 struct drm_display_mode *mode) 1010 struct drm_display_mode *mode)
1011{ 1011{
@@ -1321,7 +1321,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1321 } 1321 }
1322 1322
1323 /* HDMI 3D support */ 1323 /* HDMI 3D support */
1324 if ((disp->disp.oclass >= G82_DISP) 1324 if ((disp->disp.object.oclass >= G82_DISP)
1325 && ((type == DRM_MODE_CONNECTOR_DisplayPort) 1325 && ((type == DRM_MODE_CONNECTOR_DisplayPort)
1326 || (type == DRM_MODE_CONNECTOR_eDP) 1326 || (type == DRM_MODE_CONNECTOR_eDP)
1327 || (type == DRM_MODE_CONNECTOR_HDMIA))) 1327 || (type == DRM_MODE_CONNECTOR_HDMIA)))
@@ -1343,7 +1343,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1343 case DCB_CONNECTOR_LVDS_SPWG: 1343 case DCB_CONNECTOR_LVDS_SPWG:
1344 case DCB_CONNECTOR_eDP: 1344 case DCB_CONNECTOR_eDP:
1345 /* see note in nouveau_connector_set_property() */ 1345 /* see note in nouveau_connector_set_property() */
1346 if (disp->disp.oclass < NV50_DISP) { 1346 if (disp->disp.object.oclass < NV50_DISP) {
1347 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; 1347 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
1348 break; 1348 break;
1349 } 1349 }
@@ -1366,8 +1366,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
1366 break; 1366 break;
1367 } 1367 }
1368 1368
1369 ret = nvif_notify_init(&disp->disp, nouveau_connector_hotplug, true, 1369 ret = nvif_notify_init(&disp->disp.object, nouveau_connector_hotplug,
1370 NV04_DISP_NTFY_CONN, 1370 true, NV04_DISP_NTFY_CONN,
1371 &(struct nvif_notify_conn_req_v0) { 1371 &(struct nvif_notify_conn_req_v0) {
1372 .mask = NVIF_NOTIFY_CONN_V0_ANY, 1372 .mask = NVIF_NOTIFY_CONN_V0_ANY,
1373 .conn = index, 1373 .conn = index,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7d0bec8dd03d..774b429142bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -116,7 +116,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
116 bool ret = false; 116 bool ret = false;
117 117
118 do { 118 do {
119 ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args)); 119 ret = nvif_mthd(&disp->disp.object, 0, &args, sizeof(args));
120 if (ret != 0) 120 if (ret != 0)
121 return false; 121 return false;
122 122
@@ -175,7 +175,7 @@ nouveau_display_vblank_init(struct drm_device *dev)
175 175
176 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 176 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
177 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 177 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
178 ret = nvif_notify_init(&disp->disp, 178 ret = nvif_notify_init(&disp->disp.object,
179 nouveau_display_vblank_handler, false, 179 nouveau_display_vblank_handler, false,
180 NV04_DISP_NTFY_VBLANK, 180 NV04_DISP_NTFY_VBLANK,
181 &(struct nvif_notify_head_req_v0) { 181 &(struct nvif_notify_head_req_v0) {
@@ -454,10 +454,10 @@ nouveau_display_create_properties(struct drm_device *dev)
454 struct nouveau_display *disp = nouveau_display(dev); 454 struct nouveau_display *disp = nouveau_display(dev);
455 int gen; 455 int gen;
456 456
457 if (disp->disp.oclass < NV50_DISP) 457 if (disp->disp.object.oclass < NV50_DISP)
458 gen = 0; 458 gen = 0;
459 else 459 else
460 if (disp->disp.oclass < GF110_DISP) 460 if (disp->disp.object.oclass < GF110_DISP)
461 gen = 1; 461 gen = 1;
462 else 462 else
463 gen = 2; 463 gen = 2;
@@ -533,31 +533,10 @@ nouveau_display_create(struct drm_device *dev)
533 drm_kms_helper_poll_disable(dev); 533 drm_kms_helper_poll_disable(dev);
534 534
535 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { 535 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
536 static const u16 oclass[] = { 536 ret = nvif_disp_ctor(&drm->client.device, 0, &disp->disp);
537 GP102_DISP,
538 GP100_DISP,
539 GM200_DISP,
540 GM107_DISP,
541 GK110_DISP,
542 GK104_DISP,
543 GF110_DISP,
544 GT214_DISP,
545 GT206_DISP,
546 GT200_DISP,
547 G82_DISP,
548 NV50_DISP,
549 NV04_DISP,
550 };
551 int i;
552
553 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
554 ret = nvif_object_init(&drm->client.device.object, 0,
555 oclass[i], NULL, 0, &disp->disp);
556 }
557
558 if (ret == 0) { 537 if (ret == 0) {
559 nouveau_display_create_properties(dev); 538 nouveau_display_create_properties(dev);
560 if (disp->disp.oclass < NV50_DISP) 539 if (disp->disp.object.oclass < NV50_DISP)
561 ret = nv04_display_create(dev); 540 ret = nv04_display_create(dev);
562 else 541 else
563 ret = nv50_display_create(dev); 542 ret = nv50_display_create(dev);
@@ -611,7 +590,7 @@ nouveau_display_destroy(struct drm_device *dev)
611 if (disp->dtor) 590 if (disp->dtor)
612 disp->dtor(dev); 591 disp->dtor(dev);
613 592
614 nvif_object_fini(&disp->disp); 593 nvif_disp_dtor(&disp->disp);
615 594
616 nouveau_drm(dev)->display = NULL; 595 nouveau_drm(dev)->display = NULL;
617 kfree(disp); 596 kfree(disp);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 270ba56f2756..54aa7c3fa42d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -2,6 +2,7 @@
2#ifndef __NOUVEAU_DISPLAY_H__ 2#ifndef __NOUVEAU_DISPLAY_H__
3#define __NOUVEAU_DISPLAY_H__ 3#define __NOUVEAU_DISPLAY_H__
4#include "nouveau_drv.h" 4#include "nouveau_drv.h"
5#include <nvif/disp.h>
5 6
6struct nouveau_framebuffer { 7struct nouveau_framebuffer {
7 struct drm_framebuffer base; 8 struct drm_framebuffer base;
@@ -38,7 +39,7 @@ struct nouveau_display {
38 int (*init)(struct drm_device *); 39 int (*init)(struct drm_device *);
39 void (*fini)(struct drm_device *); 40 void (*fini)(struct drm_device *);
40 41
41 struct nvif_object disp; 42 struct nvif_disp disp;
42 43
43 struct drm_property *dithering_mode; 44 struct drm_property *dithering_mode;
44 struct drm_property *dithering_depth; 45 struct drm_property *dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 10e84f6ca2b7..945afd34138e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -28,6 +28,8 @@
28#include "nouveau_dma.h" 28#include "nouveau_dma.h"
29#include "nouveau_vmm.h" 29#include "nouveau_vmm.h"
30 30
31#include <nvif/user.h>
32
31void 33void
32OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) 34OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
33{ 35{
@@ -80,18 +82,11 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
80} 82}
81 83
82void 84void
83nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 85nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
84 int delta, int length)
85{ 86{
86 struct nouveau_cli *cli = (void *)chan->user.client; 87 struct nvif_user *user = &chan->drm->client.device.user;
87 struct nouveau_bo *pb = chan->push.buffer; 88 struct nouveau_bo *pb = chan->push.buffer;
88 struct nouveau_vma *vma;
89 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 89 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
90 u64 offset;
91
92 vma = nouveau_vma_find(bo, &cli->vmm);
93 BUG_ON(!vma);
94 offset = vma->addr + delta;
95 90
96 BUG_ON(chan->dma.ib_free < 1); 91 BUG_ON(chan->dma.ib_free < 1);
97 92
@@ -105,6 +100,8 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
105 nouveau_bo_rd32(pb, 0); 100 nouveau_bo_rd32(pb, 0);
106 101
107 nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put); 102 nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
103 if (user->func && user->func->doorbell)
104 user->func->doorbell(user, chan->chid);
108 chan->dma.ib_free--; 105 chan->dma.ib_free--;
109} 106}
110 107
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 74e10b14a7da..fc5e3f41282d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,8 +31,7 @@
31#include "nouveau_chan.h" 31#include "nouveau_chan.h"
32 32
33int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); 33int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, 34void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
35 int delta, int length);
36 35
37/* 36/*
38 * There's a hw race condition where you can't jump to your PUT offset, 37 * There's a hw race condition where you can't jump to your PUT offset,
@@ -55,7 +54,6 @@ enum {
55 54
56 NvSub2D = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */ 55 NvSub2D = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
57 NvSubCopy = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */ 56 NvSubCopy = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
58 FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
59}; 57};
60 58
61/* Object handles - for stuff that's doesn't use handle == oclass. */ 59/* Object handles - for stuff that's doesn't use handle == oclass. */
@@ -151,7 +149,7 @@ FIRE_RING(struct nouveau_channel *chan)
151 chan->accel_done = true; 149 chan->accel_done = true;
152 150
153 if (chan->dma.ib_max) { 151 if (chan->dma.ib_max) {
154 nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2, 152 nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
155 (chan->dma.cur - chan->dma.put) << 2); 153 (chan->dma.cur - chan->dma.put) << 2);
156 } else { 154 } else {
157 WRITE_PUT(chan->dma.cur); 155 WRITE_PUT(chan->dma.cur);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index bbbf353682e1..775443c9af94 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -38,6 +38,8 @@
38#include <core/tegra.h> 38#include <core/tegra.h>
39 39
40#include <nvif/driver.h> 40#include <nvif/driver.h>
41#include <nvif/fifo.h>
42#include <nvif/user.h>
41 43
42#include <nvif/class.h> 44#include <nvif/class.h>
43#include <nvif/cl0002.h> 45#include <nvif/cl0002.h>
@@ -112,24 +114,22 @@ nouveau_name(struct drm_device *dev)
112} 114}
113 115
114static inline bool 116static inline bool
115nouveau_cli_work_ready(struct dma_fence *fence, bool wait) 117nouveau_cli_work_ready(struct dma_fence *fence)
116{ 118{
117 if (!dma_fence_is_signaled(fence)) { 119 if (!dma_fence_is_signaled(fence))
118 if (!wait) 120 return false;
119 return false;
120 WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
121 }
122 dma_fence_put(fence); 121 dma_fence_put(fence);
123 return true; 122 return true;
124} 123}
125 124
126static void 125static void
127nouveau_cli_work_flush(struct nouveau_cli *cli, bool wait) 126nouveau_cli_work(struct work_struct *w)
128{ 127{
128 struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
129 struct nouveau_cli_work *work, *wtmp; 129 struct nouveau_cli_work *work, *wtmp;
130 mutex_lock(&cli->lock); 130 mutex_lock(&cli->lock);
131 list_for_each_entry_safe(work, wtmp, &cli->worker, head) { 131 list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
132 if (!work->fence || nouveau_cli_work_ready(work->fence, wait)) { 132 if (!work->fence || nouveau_cli_work_ready(work->fence)) {
133 list_del(&work->head); 133 list_del(&work->head);
134 work->func(work); 134 work->func(work);
135 } 135 }
@@ -158,16 +158,16 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
158} 158}
159 159
160static void 160static void
161nouveau_cli_work(struct work_struct *w)
162{
163 struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
164 nouveau_cli_work_flush(cli, false);
165}
166
167static void
168nouveau_cli_fini(struct nouveau_cli *cli) 161nouveau_cli_fini(struct nouveau_cli *cli)
169{ 162{
170 nouveau_cli_work_flush(cli, true); 163 /* All our channels are dead now, which means all the fences they
164 * own are signalled, and all callback functions have been called.
165 *
166 * So, after flushing the workqueue, there should be nothing left.
167 */
168 flush_work(&cli->work);
169 WARN_ON(!list_empty(&cli->worker));
170
171 usif_client_fini(cli); 171 usif_client_fini(cli);
172 nouveau_vmm_fini(&cli->vmm); 172 nouveau_vmm_fini(&cli->vmm);
173 nvif_mmu_fini(&cli->mmu); 173 nvif_mmu_fini(&cli->mmu);
@@ -307,6 +307,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
307 if (nouveau_noaccel) 307 if (nouveau_noaccel)
308 return; 308 return;
309 309
310 ret = nouveau_channels_init(drm);
311 if (ret)
312 return;
313
314 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
315 ret = nvif_user_init(device);
316 if (ret)
317 return;
318 }
319
310 /* initialise synchronisation routines */ 320 /* initialise synchronisation routines */
311 /*XXX: this is crap, but the fence/channel stuff is a little 321 /*XXX: this is crap, but the fence/channel stuff is a little
312 * backwards in some places. this will be fixed. 322 * backwards in some places. this will be fixed.
@@ -338,6 +348,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
338 case KEPLER_CHANNEL_GPFIFO_B: 348 case KEPLER_CHANNEL_GPFIFO_B:
339 case MAXWELL_CHANNEL_GPFIFO_A: 349 case MAXWELL_CHANNEL_GPFIFO_A:
340 case PASCAL_CHANNEL_GPFIFO_A: 350 case PASCAL_CHANNEL_GPFIFO_A:
351 case VOLTA_CHANNEL_GPFIFO_A:
341 ret = nvc0_fence_create(drm); 352 ret = nvc0_fence_create(drm);
342 break; 353 break;
343 default: 354 default:
@@ -354,13 +365,12 @@ nouveau_accel_init(struct nouveau_drm *drm)
354 365
355 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { 366 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
356 ret = nouveau_channel_new(drm, &drm->client.device, 367 ret = nouveau_channel_new(drm, &drm->client.device,
357 NVA06F_V0_ENGINE_CE0 | 368 nvif_fifo_runlist_ce(device), 0,
358 NVA06F_V0_ENGINE_CE1, 369 &drm->cechan);
359 0, &drm->cechan);
360 if (ret) 370 if (ret)
361 NV_ERROR(drm, "failed to create ce channel, %d\n", ret); 371 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
362 372
363 arg0 = NVA06F_V0_ENGINE_GR; 373 arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
364 arg1 = 1; 374 arg1 = 1;
365 } else 375 } else
366 if (device->info.chipset >= 0xa3 && 376 if (device->info.chipset >= 0xa3 &&
@@ -386,38 +396,36 @@ nouveau_accel_init(struct nouveau_drm *drm)
386 return; 396 return;
387 } 397 }
388 398
389 ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW, 399 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
390 nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw); 400 ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
391 if (ret == 0) { 401 nouveau_abi16_swclass(drm), NULL, 0,
392 ret = RING_SPACE(drm->channel, 2); 402 &drm->nvsw);
393 if (ret == 0) { 403 if (ret == 0) {
394 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { 404 ret = RING_SPACE(drm->channel, 2);
405 if (ret == 0) {
395 BEGIN_NV04(drm->channel, NvSubSw, 0, 1); 406 BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
396 OUT_RING (drm->channel, NVDRM_NVSW); 407 OUT_RING (drm->channel, drm->nvsw.handle);
397 } else 408 }
398 if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) { 409
399 BEGIN_NVC0(drm->channel, FermiSw, 0, 1); 410 ret = nvif_notify_init(&drm->nvsw,
400 OUT_RING (drm->channel, 0x001f0000); 411 nouveau_flip_complete,
412 false, NV04_NVSW_NTFY_UEVENT,
413 NULL, 0, 0, &drm->flip);
414 if (ret == 0)
415 ret = nvif_notify_get(&drm->flip);
416 if (ret) {
417 nouveau_accel_fini(drm);
418 return;
401 } 419 }
402 } 420 }
403 421
404 ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
405 false, NV04_NVSW_NTFY_UEVENT,
406 NULL, 0, 0, &drm->flip);
407 if (ret == 0)
408 ret = nvif_notify_get(&drm->flip);
409 if (ret) { 422 if (ret) {
423 NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
410 nouveau_accel_fini(drm); 424 nouveau_accel_fini(drm);
411 return; 425 return;
412 } 426 }
413 } 427 }
414 428
415 if (ret) {
416 NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
417 nouveau_accel_fini(drm);
418 return;
419 }
420
421 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { 429 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
422 ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0, 430 ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0,
423 false, NULL, &drm->notify); 431 false, NULL, &drm->notify);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 881b44b89a01..6e1acaec3400 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -170,6 +170,12 @@ struct nouveau_drm {
170 /* synchronisation */ 170 /* synchronisation */
171 void *fence; 171 void *fence;
172 172
173 /* Global channel management. */
174 struct {
175 int nr;
176 u64 context_base;
177 } chan;
178
173 /* context for accelerated drm-internal operations */ 179 /* context for accelerated drm-internal operations */
174 struct nouveau_channel *cechan; 180 struct nouveau_channel *cechan;
175 struct nouveau_channel *channel; 181 struct nouveau_channel *channel;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index e28d966946a1..3517f920bf89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -32,6 +32,7 @@
32#include <drm/drm_encoder_slave.h> 32#include <drm/drm_encoder_slave.h>
33#include <drm/drm_dp_mst_helper.h> 33#include <drm/drm_dp_mst_helper.h>
34#include "dispnv04/disp.h" 34#include "dispnv04/disp.h"
35struct nv50_head_atom;
35 36
36#define NV_DPMS_CLEARED 0x80 37#define NV_DPMS_CLEARED 0x80
37 38
@@ -68,7 +69,7 @@ struct nouveau_encoder {
68 void (*enc_save)(struct drm_encoder *encoder); 69 void (*enc_save)(struct drm_encoder *encoder);
69 void (*enc_restore)(struct drm_encoder *encoder); 70 void (*enc_restore)(struct drm_encoder *encoder);
70 void (*update)(struct nouveau_encoder *, u8 head, 71 void (*update)(struct nouveau_encoder *, u8 head,
71 struct drm_display_mode *, u8 proto, u8 depth); 72 struct nv50_head_atom *, u8 proto, u8 depth);
72}; 73};
73 74
74struct nouveau_encoder * 75struct nouveau_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 503fa94dc06d..412d49bc6e56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -74,15 +74,14 @@ nouveau_fence_signal(struct nouveau_fence *fence)
74} 74}
75 75
76static struct nouveau_fence * 76static struct nouveau_fence *
77nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) { 77nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
78 struct nouveau_fence_priv *priv = (void*)drm->fence; 78{
79
80 if (fence->ops != &nouveau_fence_ops_legacy && 79 if (fence->ops != &nouveau_fence_ops_legacy &&
81 fence->ops != &nouveau_fence_ops_uevent) 80 fence->ops != &nouveau_fence_ops_uevent)
82 return NULL; 81 return NULL;
83 82
84 if (fence->context < priv->context_base || 83 if (fence->context < drm->chan.context_base ||
85 fence->context >= priv->context_base + priv->contexts) 84 fence->context >= drm->chan.context_base + drm->chan.nr)
86 return NULL; 85 return NULL;
87 86
88 return from_fence(fence); 87 return from_fence(fence);
@@ -176,7 +175,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
176 INIT_LIST_HEAD(&fctx->flip); 175 INIT_LIST_HEAD(&fctx->flip);
177 INIT_LIST_HEAD(&fctx->pending); 176 INIT_LIST_HEAD(&fctx->pending);
178 spin_lock_init(&fctx->lock); 177 spin_lock_init(&fctx->lock);
179 fctx->context = priv->context_base + chan->chid; 178 fctx->context = chan->drm->chan.context_base + chan->chid;
180 179
181 if (chan == chan->drm->cechan) 180 if (chan == chan->drm->cechan)
182 strcpy(fctx->name, "copy engine channel"); 181 strcpy(fctx->name, "copy engine channel");
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 5bd8d30d1657..b999e6058046 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -55,8 +55,6 @@ struct nouveau_fence_priv {
55 int (*context_new)(struct nouveau_channel *); 55 int (*context_new)(struct nouveau_channel *);
56 void (*context_del)(struct nouveau_channel *); 56 void (*context_del)(struct nouveau_channel *);
57 57
58 u32 contexts;
59 u64 context_base;
60 bool uevent; 58 bool uevent;
61}; 59};
62 60
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index e72a7e37eb0a..300daee74209 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -99,6 +99,7 @@ struct nouveau_gem_object_unmap {
99static void 99static void
100nouveau_gem_object_delete(struct nouveau_vma *vma) 100nouveau_gem_object_delete(struct nouveau_vma *vma)
101{ 101{
102 nouveau_fence_unref(&vma->fence);
102 nouveau_vma_del(&vma); 103 nouveau_vma_del(&vma);
103} 104}
104 105
@@ -114,25 +115,12 @@ nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
114static void 115static void
115nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 116nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
116{ 117{
117 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; 118 struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
118 struct reservation_object *resv = nvbo->bo.resv;
119 struct reservation_object_list *fobj;
120 struct nouveau_gem_object_unmap *work; 119 struct nouveau_gem_object_unmap *work;
121 struct dma_fence *fence = NULL;
122
123 fobj = reservation_object_get_list(resv);
124 120
125 list_del_init(&vma->head); 121 list_del_init(&vma->head);
126 122
127 if (fobj && fobj->shared_count > 1) 123 if (!fence) {
128 ttm_bo_wait(&nvbo->bo, false, false);
129 else if (fobj && fobj->shared_count == 1)
130 fence = rcu_dereference_protected(fobj->shared[0],
131 reservation_object_held(resv));
132 else
133 fence = reservation_object_get_excl(nvbo->bo.resv);
134
135 if (!fence || !mapped) {
136 nouveau_gem_object_delete(vma); 124 nouveau_gem_object_delete(vma);
137 return; 125 return;
138 } 126 }
@@ -344,9 +332,20 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
344 nvbo = list_entry(op->list.next, struct nouveau_bo, entry); 332 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
345 b = &pbbo[nvbo->pbbo_index]; 333 b = &pbbo[nvbo->pbbo_index];
346 334
347 if (likely(fence)) 335 if (likely(fence)) {
336 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
337 struct nouveau_vma *vma;
338
348 nouveau_bo_fence(nvbo, fence, !!b->write_domains); 339 nouveau_bo_fence(nvbo, fence, !!b->write_domains);
349 340
341 if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
342 vma = (void *)(unsigned long)b->user_priv;
343 nouveau_fence_unref(&vma->fence);
344 dma_fence_get(&fence->base);
345 vma->fence = fence;
346 }
347 }
348
350 if (unlikely(nvbo->validate_mapped)) { 349 if (unlikely(nvbo->validate_mapped)) {
351 ttm_bo_kunmap(&nvbo->kmap); 350 ttm_bo_kunmap(&nvbo->kmap);
352 nvbo->validate_mapped = false; 351 nvbo->validate_mapped = false;
@@ -432,7 +431,20 @@ retry:
432 } 431 }
433 } 432 }
434 433
435 b->user_priv = (uint64_t)(unsigned long)nvbo; 434 if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
435 struct nouveau_vmm *vmm = &cli->vmm;
436 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
437 if (!vma) {
438 NV_PRINTK(err, cli, "vma not found!\n");
439 ret = -EINVAL;
440 break;
441 }
442
443 b->user_priv = (uint64_t)(unsigned long)vma;
444 } else {
445 b->user_priv = (uint64_t)(unsigned long)nvbo;
446 }
447
436 nvbo->reserved_by = file_priv; 448 nvbo->reserved_by = file_priv;
437 nvbo->pbbo_index = i; 449 nvbo->pbbo_index = i;
438 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 450 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -763,10 +775,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
763 } 775 }
764 776
765 for (i = 0; i < req->nr_push; i++) { 777 for (i = 0; i < req->nr_push; i++) {
766 struct nouveau_bo *nvbo = (void *)(unsigned long) 778 struct nouveau_vma *vma = (void *)(unsigned long)
767 bo[push[i].bo_index].user_priv; 779 bo[push[i].bo_index].user_priv;
768 780
769 nv50_dma_push(chan, nvbo, push[i].offset, 781 nv50_dma_push(chan, vma->addr + push[i].offset,
770 push[i].length); 782 push[i].length);
771 } 783 }
772 } else 784 } else
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 7c965648df80..44178b4c3599 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -327,7 +327,7 @@ nouveau_temp_is_visible(const void *data, u32 attr, int channel)
327 struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data); 327 struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
328 struct nvkm_therm *therm = nvxx_therm(&drm->client.device); 328 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
329 329
330 if (therm && therm->attr_get && nvkm_therm_temp_get(therm) < 0) 330 if (!therm || !therm->attr_get || nvkm_therm_temp_get(therm) < 0)
331 return 0; 331 return 0;
332 332
333 switch (attr) { 333 switch (attr) {
@@ -351,8 +351,8 @@ nouveau_pwm_is_visible(const void *data, u32 attr, int channel)
351 struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data); 351 struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
352 struct nvkm_therm *therm = nvxx_therm(&drm->client.device); 352 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
353 353
354 if (therm && therm->attr_get && therm->fan_get && 354 if (!therm || !therm->attr_get || !therm->fan_get ||
355 therm->fan_get(therm) < 0) 355 therm->fan_get(therm) < 0)
356 return 0; 356 return 0;
357 357
358 switch (attr) { 358 switch (attr) {
@@ -707,13 +707,20 @@ nouveau_hwmon_init(struct drm_device *dev)
707{ 707{
708#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 708#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
709 struct nouveau_drm *drm = nouveau_drm(dev); 709 struct nouveau_drm *drm = nouveau_drm(dev);
710 struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
710 struct nvkm_therm *therm = nvxx_therm(&drm->client.device); 711 struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
712 struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
711 const struct attribute_group *special_groups[N_ATTR_GROUPS]; 713 const struct attribute_group *special_groups[N_ATTR_GROUPS];
712 struct nouveau_hwmon *hwmon; 714 struct nouveau_hwmon *hwmon;
713 struct device *hwmon_dev; 715 struct device *hwmon_dev;
714 int ret = 0; 716 int ret = 0;
715 int i = 0; 717 int i = 0;
716 718
719 if (!iccsense && !therm && !volt) {
720 NV_DEBUG(drm, "Skipping hwmon registration\n");
721 return 0;
722 }
723
717 hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL); 724 hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
718 if (!hwmon) 725 if (!hwmon)
719 return -ENOMEM; 726 return -ENOMEM;
@@ -749,6 +756,9 @@ nouveau_hwmon_fini(struct drm_device *dev)
749#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 756#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
750 struct nouveau_hwmon *hwmon = nouveau_hwmon(dev); 757 struct nouveau_hwmon *hwmon = nouveau_hwmon(dev);
751 758
759 if (!hwmon)
760 return;
761
752 if (hwmon->hwmon) 762 if (hwmon->hwmon)
753 hwmon_device_unregister(hwmon->hwmon); 763 hwmon_device_unregister(hwmon->hwmon);
754 764
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index f5371d96b003..2032c3e4f6e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -92,6 +92,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
92 vma->refs = 1; 92 vma->refs = 1;
93 vma->addr = ~0ULL; 93 vma->addr = ~0ULL;
94 vma->mem = NULL; 94 vma->mem = NULL;
95 vma->fence = NULL;
95 list_add_tail(&vma->head, &nvbo->vma_list); 96 list_add_tail(&vma->head, &nvbo->vma_list);
96 97
97 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && 98 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
index 5c31f43678d3..7e3b118cf7c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -11,6 +11,8 @@ struct nouveau_vma {
11 u64 addr; 11 u64 addr;
12 12
13 struct nouveau_mem *mem; 13 struct nouveau_mem *mem;
14
15 struct nouveau_fence *fence;
14}; 16};
15 17
16struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *); 18struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index fa8f2375c398..c41e82be4893 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -109,7 +109,5 @@ nv04_fence_create(struct nouveau_drm *drm)
109 priv->base.dtor = nv04_fence_destroy; 109 priv->base.dtor = nv04_fence_destroy;
110 priv->base.context_new = nv04_fence_context_new; 110 priv->base.context_new = nv04_fence_context_new;
111 priv->base.context_del = nv04_fence_context_del; 111 priv->base.context_del = nv04_fence_context_del;
112 priv->base.contexts = 15;
113 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
114 return 0; 112 return 0;
115} 113}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 2998bde29211..4476b712dc84 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -103,8 +103,6 @@ nv10_fence_create(struct nouveau_drm *drm)
103 priv->base.dtor = nv10_fence_destroy; 103 priv->base.dtor = nv10_fence_destroy;
104 priv->base.context_new = nv10_fence_context_new; 104 priv->base.context_new = nv10_fence_context_new;
105 priv->base.context_del = nv10_fence_context_del; 105 priv->base.context_del = nv10_fence_context_del;
106 priv->base.contexts = 31;
107 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
108 spin_lock_init(&priv->lock); 106 spin_lock_init(&priv->lock);
109 return 0; 107 return 0;
110} 108}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 6477b7069e14..5d613d43b84d 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -125,8 +125,6 @@ nv17_fence_create(struct nouveau_drm *drm)
125 priv->base.resume = nv17_fence_resume; 125 priv->base.resume = nv17_fence_resume;
126 priv->base.context_new = nv17_fence_context_new; 126 priv->base.context_new = nv17_fence_context_new;
127 priv->base.context_del = nv10_fence_context_del; 127 priv->base.context_del = nv10_fence_context_del;
128 priv->base.contexts = 31;
129 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
130 spin_lock_init(&priv->lock); 128 spin_lock_init(&priv->lock);
131 129
132 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, 130 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
deleted file mode 100644
index 2b3ccd850750..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ /dev/null
@@ -1,4558 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/dma-mapping.h>
26#include <linux/hdmi.h>
27
28#include <drm/drmP.h>
29#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_helper.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_dp_helper.h>
33#include <drm/drm_fb_helper.h>
34#include <drm/drm_plane_helper.h>
35#include <drm/drm_edid.h>
36
37#include <nvif/class.h>
38#include <nvif/cl0002.h>
39#include <nvif/cl5070.h>
40#include <nvif/cl507a.h>
41#include <nvif/cl507b.h>
42#include <nvif/cl507c.h>
43#include <nvif/cl507d.h>
44#include <nvif/cl507e.h>
45#include <nvif/event.h>
46
47#include "nouveau_drv.h"
48#include "nouveau_dma.h"
49#include "nouveau_gem.h"
50#include "nouveau_connector.h"
51#include "nouveau_encoder.h"
52#include "nouveau_crtc.h"
53#include "nouveau_fence.h"
54#include "nouveau_fbcon.h"
55#include "nv50_display.h"
56
57#define EVO_DMA_NR 9
58
59#define EVO_MASTER (0x00)
60#define EVO_FLIP(c) (0x01 + (c))
61#define EVO_OVLY(c) (0x05 + (c))
62#define EVO_OIMM(c) (0x09 + (c))
63#define EVO_CURS(c) (0x0d + (c))
64
65/* offsets in shared sync bo of various structures */
66#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
67#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
68#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
69#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
70#define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
71#define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
72
73/******************************************************************************
74 * Atomic state
75 *****************************************************************************/
76#define nv50_atom(p) container_of((p), struct nv50_atom, state)
77
78struct nv50_atom {
79 struct drm_atomic_state state;
80
81 struct list_head outp;
82 bool lock_core;
83 bool flush_disable;
84};
85
86struct nv50_outp_atom {
87 struct list_head head;
88
89 struct drm_encoder *encoder;
90 bool flush_disable;
91
92 union {
93 struct {
94 bool ctrl:1;
95 };
96 u8 mask;
97 } clr;
98
99 union {
100 struct {
101 bool ctrl:1;
102 };
103 u8 mask;
104 } set;
105};
106
107#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
108
109struct nv50_head_atom {
110 struct drm_crtc_state state;
111
112 struct {
113 u16 iW;
114 u16 iH;
115 u16 oW;
116 u16 oH;
117 } view;
118
119 struct nv50_head_mode {
120 bool interlace;
121 u32 clock;
122 struct {
123 u16 active;
124 u16 synce;
125 u16 blanke;
126 u16 blanks;
127 } h;
128 struct {
129 u32 active;
130 u16 synce;
131 u16 blanke;
132 u16 blanks;
133 u16 blank2s;
134 u16 blank2e;
135 u16 blankus;
136 } v;
137 } mode;
138
139 struct {
140 bool visible;
141 u32 handle;
142 u64 offset:40;
143 u8 mode:4;
144 } lut;
145
146 struct {
147 bool visible;
148 u32 handle;
149 u64 offset:40;
150 u8 format;
151 u8 kind:7;
152 u8 layout:1;
153 u8 block:4;
154 u32 pitch:20;
155 u16 x;
156 u16 y;
157 u16 w;
158 u16 h;
159 } core;
160
161 struct {
162 bool visible;
163 u32 handle;
164 u64 offset:40;
165 u8 layout:1;
166 u8 format:1;
167 } curs;
168
169 struct {
170 u8 depth;
171 u8 cpp;
172 u16 x;
173 u16 y;
174 u16 w;
175 u16 h;
176 } base;
177
178 struct {
179 u8 cpp;
180 } ovly;
181
182 struct {
183 bool enable:1;
184 u8 bits:2;
185 u8 mode:4;
186 } dither;
187
188 struct {
189 struct {
190 u16 cos:12;
191 u16 sin:12;
192 } sat;
193 } procamp;
194
195 union {
196 struct {
197 bool ilut:1;
198 bool core:1;
199 bool curs:1;
200 };
201 u8 mask;
202 } clr;
203
204 union {
205 struct {
206 bool ilut:1;
207 bool core:1;
208 bool curs:1;
209 bool view:1;
210 bool mode:1;
211 bool base:1;
212 bool ovly:1;
213 bool dither:1;
214 bool procamp:1;
215 };
216 u16 mask;
217 } set;
218};
219
220static inline struct nv50_head_atom *
221nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
222{
223 struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
224 if (IS_ERR(statec))
225 return (void *)statec;
226 return nv50_head_atom(statec);
227}
228
229#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
230
231struct nv50_wndw_atom {
232 struct drm_plane_state state;
233 u8 interval;
234
235 struct {
236 u32 handle;
237 u16 offset:12;
238 bool awaken:1;
239 } ntfy;
240
241 struct {
242 u32 handle;
243 u16 offset:12;
244 u32 acquire;
245 u32 release;
246 } sema;
247
248 struct {
249 u8 enable:2;
250 } lut;
251
252 struct {
253 u8 mode:2;
254 u8 interval:4;
255
256 u8 format;
257 u8 kind:7;
258 u8 layout:1;
259 u8 block:4;
260 u32 pitch:20;
261 u16 w;
262 u16 h;
263
264 u32 handle;
265 u64 offset;
266 } image;
267
268 struct {
269 u16 x;
270 u16 y;
271 } point;
272
273 union {
274 struct {
275 bool ntfy:1;
276 bool sema:1;
277 bool image:1;
278 };
279 u8 mask;
280 } clr;
281
282 union {
283 struct {
284 bool ntfy:1;
285 bool sema:1;
286 bool image:1;
287 bool lut:1;
288 bool point:1;
289 };
290 u8 mask;
291 } set;
292};
293
294/******************************************************************************
295 * EVO channel
296 *****************************************************************************/
297
298struct nv50_chan {
299 struct nvif_object user;
300 struct nvif_device *device;
301};
302
303static int
304nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
305 const s32 *oclass, u8 head, void *data, u32 size,
306 struct nv50_chan *chan)
307{
308 struct nvif_sclass *sclass;
309 int ret, i, n;
310
311 chan->device = device;
312
313 ret = n = nvif_object_sclass_get(disp, &sclass);
314 if (ret < 0)
315 return ret;
316
317 while (oclass[0]) {
318 for (i = 0; i < n; i++) {
319 if (sclass[i].oclass == oclass[0]) {
320 ret = nvif_object_init(disp, 0, oclass[0],
321 data, size, &chan->user);
322 if (ret == 0)
323 nvif_object_map(&chan->user, NULL, 0);
324 nvif_object_sclass_put(&sclass);
325 return ret;
326 }
327 }
328 oclass++;
329 }
330
331 nvif_object_sclass_put(&sclass);
332 return -ENOSYS;
333}
334
335static void
336nv50_chan_destroy(struct nv50_chan *chan)
337{
338 nvif_object_fini(&chan->user);
339}
340
341/******************************************************************************
342 * PIO EVO channel
343 *****************************************************************************/
344
345struct nv50_pioc {
346 struct nv50_chan base;
347};
348
349static void
350nv50_pioc_destroy(struct nv50_pioc *pioc)
351{
352 nv50_chan_destroy(&pioc->base);
353}
354
355static int
356nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
357 const s32 *oclass, u8 head, void *data, u32 size,
358 struct nv50_pioc *pioc)
359{
360 return nv50_chan_create(device, disp, oclass, head, data, size,
361 &pioc->base);
362}
363
364/******************************************************************************
365 * Overlay Immediate
366 *****************************************************************************/
367
368struct nv50_oimm {
369 struct nv50_pioc base;
370};
371
372static int
373nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
374 int head, struct nv50_oimm *oimm)
375{
376 struct nv50_disp_cursor_v0 args = {
377 .head = head,
378 };
379 static const s32 oclass[] = {
380 GK104_DISP_OVERLAY,
381 GF110_DISP_OVERLAY,
382 GT214_DISP_OVERLAY,
383 G82_DISP_OVERLAY,
384 NV50_DISP_OVERLAY,
385 0
386 };
387
388 return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
389 &oimm->base);
390}
391
392/******************************************************************************
393 * DMA EVO channel
394 *****************************************************************************/
395
396struct nv50_dmac_ctxdma {
397 struct list_head head;
398 struct nvif_object object;
399};
400
401struct nv50_dmac {
402 struct nv50_chan base;
403 dma_addr_t handle;
404 u32 *ptr;
405
406 struct nvif_object sync;
407 struct nvif_object vram;
408 struct list_head ctxdma;
409
410 /* Protects against concurrent pushbuf access to this channel, lock is
411 * grabbed by evo_wait (if the pushbuf reservation is successful) and
412 * dropped again by evo_kick. */
413 struct mutex lock;
414};
415
416static void
417nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
418{
419 nvif_object_fini(&ctxdma->object);
420 list_del(&ctxdma->head);
421 kfree(ctxdma);
422}
423
424static struct nv50_dmac_ctxdma *
425nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
426{
427 struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
428 struct nv50_dmac_ctxdma *ctxdma;
429 const u8 kind = fb->nvbo->kind;
430 const u32 handle = 0xfb000000 | kind;
431 struct {
432 struct nv_dma_v0 base;
433 union {
434 struct nv50_dma_v0 nv50;
435 struct gf100_dma_v0 gf100;
436 struct gf119_dma_v0 gf119;
437 };
438 } args = {};
439 u32 argc = sizeof(args.base);
440 int ret;
441
442 list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
443 if (ctxdma->object.handle == handle)
444 return ctxdma;
445 }
446
447 if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
448 return ERR_PTR(-ENOMEM);
449 list_add(&ctxdma->head, &dmac->ctxdma);
450
451 args.base.target = NV_DMA_V0_TARGET_VRAM;
452 args.base.access = NV_DMA_V0_ACCESS_RDWR;
453 args.base.start = 0;
454 args.base.limit = drm->client.device.info.ram_user - 1;
455
456 if (drm->client.device.info.chipset < 0x80) {
457 args.nv50.part = NV50_DMA_V0_PART_256;
458 argc += sizeof(args.nv50);
459 } else
460 if (drm->client.device.info.chipset < 0xc0) {
461 args.nv50.part = NV50_DMA_V0_PART_256;
462 args.nv50.kind = kind;
463 argc += sizeof(args.nv50);
464 } else
465 if (drm->client.device.info.chipset < 0xd0) {
466 args.gf100.kind = kind;
467 argc += sizeof(args.gf100);
468 } else {
469 args.gf119.page = GF119_DMA_V0_PAGE_LP;
470 args.gf119.kind = kind;
471 argc += sizeof(args.gf119);
472 }
473
474 ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
475 &args, argc, &ctxdma->object);
476 if (ret) {
477 nv50_dmac_ctxdma_del(ctxdma);
478 return ERR_PTR(ret);
479 }
480
481 return ctxdma;
482}
483
484static void
485nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
486{
487 struct nvif_device *device = dmac->base.device;
488 struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
489
490 list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
491 nv50_dmac_ctxdma_del(ctxdma);
492 }
493
494 nvif_object_fini(&dmac->vram);
495 nvif_object_fini(&dmac->sync);
496
497 nv50_chan_destroy(&dmac->base);
498
499 if (dmac->ptr) {
500 struct device *dev = nvxx_device(device)->dev;
501 dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
502 }
503}
504
505static int
506nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
507 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
508 struct nv50_dmac *dmac)
509{
510 struct nv50_disp_core_channel_dma_v0 *args = data;
511 struct nvif_object pushbuf;
512 int ret;
513
514 mutex_init(&dmac->lock);
515 INIT_LIST_HEAD(&dmac->ctxdma);
516
517 dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
518 &dmac->handle, GFP_KERNEL);
519 if (!dmac->ptr)
520 return -ENOMEM;
521
522 ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
523 &(struct nv_dma_v0) {
524 .target = NV_DMA_V0_TARGET_PCI_US,
525 .access = NV_DMA_V0_ACCESS_RD,
526 .start = dmac->handle + 0x0000,
527 .limit = dmac->handle + 0x0fff,
528 }, sizeof(struct nv_dma_v0), &pushbuf);
529 if (ret)
530 return ret;
531
532 args->pushbuf = nvif_handle(&pushbuf);
533
534 ret = nv50_chan_create(device, disp, oclass, head, data, size,
535 &dmac->base);
536 nvif_object_fini(&pushbuf);
537 if (ret)
538 return ret;
539
540 ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
541 &(struct nv_dma_v0) {
542 .target = NV_DMA_V0_TARGET_VRAM,
543 .access = NV_DMA_V0_ACCESS_RDWR,
544 .start = syncbuf + 0x0000,
545 .limit = syncbuf + 0x0fff,
546 }, sizeof(struct nv_dma_v0),
547 &dmac->sync);
548 if (ret)
549 return ret;
550
551 ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
552 &(struct nv_dma_v0) {
553 .target = NV_DMA_V0_TARGET_VRAM,
554 .access = NV_DMA_V0_ACCESS_RDWR,
555 .start = 0,
556 .limit = device->info.ram_user - 1,
557 }, sizeof(struct nv_dma_v0),
558 &dmac->vram);
559 if (ret)
560 return ret;
561
562 return ret;
563}
564
565/******************************************************************************
566 * Core
567 *****************************************************************************/
568
569struct nv50_mast {
570 struct nv50_dmac base;
571};
572
573static int
574nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
575 u64 syncbuf, struct nv50_mast *core)
576{
577 struct nv50_disp_core_channel_dma_v0 args = {
578 .pushbuf = 0xb0007d00,
579 };
580 static const s32 oclass[] = {
581 GP102_DISP_CORE_CHANNEL_DMA,
582 GP100_DISP_CORE_CHANNEL_DMA,
583 GM200_DISP_CORE_CHANNEL_DMA,
584 GM107_DISP_CORE_CHANNEL_DMA,
585 GK110_DISP_CORE_CHANNEL_DMA,
586 GK104_DISP_CORE_CHANNEL_DMA,
587 GF110_DISP_CORE_CHANNEL_DMA,
588 GT214_DISP_CORE_CHANNEL_DMA,
589 GT206_DISP_CORE_CHANNEL_DMA,
590 GT200_DISP_CORE_CHANNEL_DMA,
591 G82_DISP_CORE_CHANNEL_DMA,
592 NV50_DISP_CORE_CHANNEL_DMA,
593 0
594 };
595
596 return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
597 syncbuf, &core->base);
598}
599
600/******************************************************************************
601 * Base
602 *****************************************************************************/
603
604struct nv50_sync {
605 struct nv50_dmac base;
606 u32 addr;
607 u32 data;
608};
609
610static int
611nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
612 int head, u64 syncbuf, struct nv50_sync *base)
613{
614 struct nv50_disp_base_channel_dma_v0 args = {
615 .pushbuf = 0xb0007c00 | head,
616 .head = head,
617 };
618 static const s32 oclass[] = {
619 GK110_DISP_BASE_CHANNEL_DMA,
620 GK104_DISP_BASE_CHANNEL_DMA,
621 GF110_DISP_BASE_CHANNEL_DMA,
622 GT214_DISP_BASE_CHANNEL_DMA,
623 GT200_DISP_BASE_CHANNEL_DMA,
624 G82_DISP_BASE_CHANNEL_DMA,
625 NV50_DISP_BASE_CHANNEL_DMA,
626 0
627 };
628
629 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
630 syncbuf, &base->base);
631}
632
633/******************************************************************************
634 * Overlay
635 *****************************************************************************/
636
637struct nv50_ovly {
638 struct nv50_dmac base;
639};
640
641static int
642nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
643 int head, u64 syncbuf, struct nv50_ovly *ovly)
644{
645 struct nv50_disp_overlay_channel_dma_v0 args = {
646 .pushbuf = 0xb0007e00 | head,
647 .head = head,
648 };
649 static const s32 oclass[] = {
650 GK104_DISP_OVERLAY_CONTROL_DMA,
651 GF110_DISP_OVERLAY_CONTROL_DMA,
652 GT214_DISP_OVERLAY_CHANNEL_DMA,
653 GT200_DISP_OVERLAY_CHANNEL_DMA,
654 G82_DISP_OVERLAY_CHANNEL_DMA,
655 NV50_DISP_OVERLAY_CHANNEL_DMA,
656 0
657 };
658
659 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
660 syncbuf, &ovly->base);
661}
662
663struct nv50_head {
664 struct nouveau_crtc base;
665 struct {
666 struct nouveau_bo *nvbo[2];
667 int next;
668 } lut;
669 struct nv50_ovly ovly;
670 struct nv50_oimm oimm;
671};
672
673#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
674#define nv50_ovly(c) (&nv50_head(c)->ovly)
675#define nv50_oimm(c) (&nv50_head(c)->oimm)
676#define nv50_chan(c) (&(c)->base.base)
677#define nv50_vers(c) nv50_chan(c)->user.oclass
678
679struct nv50_disp {
680 struct nvif_object *disp;
681 struct nv50_mast mast;
682
683 struct nouveau_bo *sync;
684
685 struct mutex mutex;
686};
687
688static struct nv50_disp *
689nv50_disp(struct drm_device *dev)
690{
691 return nouveau_display(dev)->priv;
692}
693
694#define nv50_mast(d) (&nv50_disp(d)->mast)
695
696/******************************************************************************
697 * EVO channel helpers
698 *****************************************************************************/
699static u32 *
700evo_wait(void *evoc, int nr)
701{
702 struct nv50_dmac *dmac = evoc;
703 struct nvif_device *device = dmac->base.device;
704 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
705
706 mutex_lock(&dmac->lock);
707 if (put + nr >= (PAGE_SIZE / 4) - 8) {
708 dmac->ptr[put] = 0x20000000;
709
710 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
711 if (nvif_msec(device, 2000,
712 if (!nvif_rd32(&dmac->base.user, 0x0004))
713 break;
714 ) < 0) {
715 mutex_unlock(&dmac->lock);
716 pr_err("nouveau: evo channel stalled\n");
717 return NULL;
718 }
719
720 put = 0;
721 }
722
723 return dmac->ptr + put;
724}
725
726static void
727evo_kick(u32 *push, void *evoc)
728{
729 struct nv50_dmac *dmac = evoc;
730 nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
731 mutex_unlock(&dmac->lock);
732}
733
734#define evo_mthd(p, m, s) do { \
735 const u32 _m = (m), _s = (s); \
736 if (drm_debug & DRM_UT_KMS) \
737 pr_err("%04x %d %s\n", _m, _s, __func__); \
738 *((p)++) = ((_s << 18) | _m); \
739} while(0)
740
741#define evo_data(p, d) do { \
742 const u32 _d = (d); \
743 if (drm_debug & DRM_UT_KMS) \
744 pr_err("\t%08x\n", _d); \
745 *((p)++) = _d; \
746} while(0)
747
748/******************************************************************************
749 * Plane
750 *****************************************************************************/
751#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
752
753struct nv50_wndw {
754 const struct nv50_wndw_func *func;
755 struct nv50_dmac *dmac;
756
757 struct drm_plane plane;
758
759 struct nvif_notify notify;
760 u16 ntfy;
761 u16 sema;
762 u32 data;
763};
764
765struct nv50_wndw_func {
766 void *(*dtor)(struct nv50_wndw *);
767 int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
768 struct nv50_head_atom *asyh);
769 void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
770 struct nv50_head_atom *asyh);
771 void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
772 struct nv50_wndw_atom *asyw);
773
774 void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
775 void (*sema_clr)(struct nv50_wndw *);
776 void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
777 void (*ntfy_clr)(struct nv50_wndw *);
778 int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
779 void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
780 void (*image_clr)(struct nv50_wndw *);
781 void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
782 void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
783
784 u32 (*update)(struct nv50_wndw *, u32 interlock);
785};
786
787static int
788nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
789{
790 if (asyw->set.ntfy)
791 return wndw->func->ntfy_wait_begun(wndw, asyw);
792 return 0;
793}
794
795static u32
796nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
797 struct nv50_wndw_atom *asyw)
798{
799 if (asyw->clr.sema && (!asyw->set.sema || flush))
800 wndw->func->sema_clr(wndw);
801 if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
802 wndw->func->ntfy_clr(wndw);
803 if (asyw->clr.image && (!asyw->set.image || flush))
804 wndw->func->image_clr(wndw);
805
806 return flush ? wndw->func->update(wndw, interlock) : 0;
807}
808
809static u32
810nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
811 struct nv50_wndw_atom *asyw)
812{
813 if (interlock) {
814 asyw->image.mode = 0;
815 asyw->image.interval = 1;
816 }
817
818 if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
819 if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
820 if (asyw->set.image) wndw->func->image_set(wndw, asyw);
821 if (asyw->set.lut ) wndw->func->lut (wndw, asyw);
822 if (asyw->set.point) wndw->func->point (wndw, asyw);
823
824 return wndw->func->update(wndw, interlock);
825}
826
827static void
828nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
829 struct nv50_wndw_atom *asyw,
830 struct nv50_head_atom *asyh)
831{
832 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
833 NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
834 wndw->func->release(wndw, asyw, asyh);
835 asyw->ntfy.handle = 0;
836 asyw->sema.handle = 0;
837}
838
839static int
840nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
841 struct nv50_wndw_atom *asyw,
842 struct nv50_head_atom *asyh)
843{
844 struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
845 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
846 int ret;
847
848 NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
849
850 asyw->image.w = fb->base.width;
851 asyw->image.h = fb->base.height;
852 asyw->image.kind = fb->nvbo->kind;
853
854 if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
855 asyw->interval = 0;
856 else
857 asyw->interval = 1;
858
859 if (asyw->image.kind) {
860 asyw->image.layout = 0;
861 if (drm->client.device.info.chipset >= 0xc0)
862 asyw->image.block = fb->nvbo->mode >> 4;
863 else
864 asyw->image.block = fb->nvbo->mode;
865 asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
866 } else {
867 asyw->image.layout = 1;
868 asyw->image.block = 0;
869 asyw->image.pitch = fb->base.pitches[0];
870 }
871
872 ret = wndw->func->acquire(wndw, asyw, asyh);
873 if (ret)
874 return ret;
875
876 if (asyw->set.image) {
877 if (!(asyw->image.mode = asyw->interval ? 0 : 1))
878 asyw->image.interval = asyw->interval;
879 else
880 asyw->image.interval = 0;
881 }
882
883 return 0;
884}
885
886static int
887nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
888{
889 struct nouveau_drm *drm = nouveau_drm(plane->dev);
890 struct nv50_wndw *wndw = nv50_wndw(plane);
891 struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
892 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
893 struct nv50_head_atom *harm = NULL, *asyh = NULL;
894 bool varm = false, asyv = false, asym = false;
895 int ret;
896
897 NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
898 if (asyw->state.crtc) {
899 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
900 if (IS_ERR(asyh))
901 return PTR_ERR(asyh);
902 asym = drm_atomic_crtc_needs_modeset(&asyh->state);
903 asyv = asyh->state.active;
904 }
905
906 if (armw->state.crtc) {
907 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
908 if (IS_ERR(harm))
909 return PTR_ERR(harm);
910 varm = harm->state.crtc->state->active;
911 }
912
913 if (asyv) {
914 asyw->point.x = asyw->state.crtc_x;
915 asyw->point.y = asyw->state.crtc_y;
916 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
917 asyw->set.point = true;
918
919 ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
920 if (ret)
921 return ret;
922 } else
923 if (varm) {
924 nv50_wndw_atomic_check_release(wndw, asyw, harm);
925 } else {
926 return 0;
927 }
928
929 if (!asyv || asym) {
930 asyw->clr.ntfy = armw->ntfy.handle != 0;
931 asyw->clr.sema = armw->sema.handle != 0;
932 if (wndw->func->image_clr)
933 asyw->clr.image = armw->image.handle != 0;
934 asyw->set.lut = wndw->func->lut && asyv;
935 }
936
937 return 0;
938}
939
940static void
941nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
942{
943 struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
944 struct nouveau_drm *drm = nouveau_drm(plane->dev);
945
946 NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
947 if (!old_state->fb)
948 return;
949
950 nouveau_bo_unpin(fb->nvbo);
951}
952
953static int
954nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
955{
956 struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
957 struct nouveau_drm *drm = nouveau_drm(plane->dev);
958 struct nv50_wndw *wndw = nv50_wndw(plane);
959 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
960 struct nv50_head_atom *asyh;
961 struct nv50_dmac_ctxdma *ctxdma;
962 int ret;
963
964 NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
965 if (!asyw->state.fb)
966 return 0;
967
968 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
969 if (ret)
970 return ret;
971
972 ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
973 if (IS_ERR(ctxdma)) {
974 nouveau_bo_unpin(fb->nvbo);
975 return PTR_ERR(ctxdma);
976 }
977
978 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
979 asyw->image.handle = ctxdma->object.handle;
980 asyw->image.offset = fb->nvbo->bo.offset;
981
982 if (wndw->func->prepare) {
983 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
984 if (IS_ERR(asyh))
985 return PTR_ERR(asyh);
986
987 wndw->func->prepare(wndw, asyh, asyw);
988 }
989
990 return 0;
991}
992
993static const struct drm_plane_helper_funcs
994nv50_wndw_helper = {
995 .prepare_fb = nv50_wndw_prepare_fb,
996 .cleanup_fb = nv50_wndw_cleanup_fb,
997 .atomic_check = nv50_wndw_atomic_check,
998};
999
1000static void
1001nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
1002 struct drm_plane_state *state)
1003{
1004 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
1005 __drm_atomic_helper_plane_destroy_state(&asyw->state);
1006 kfree(asyw);
1007}
1008
1009static struct drm_plane_state *
1010nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
1011{
1012 struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
1013 struct nv50_wndw_atom *asyw;
1014 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
1015 return NULL;
1016 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
1017 asyw->interval = 1;
1018 asyw->sema = armw->sema;
1019 asyw->ntfy = armw->ntfy;
1020 asyw->image = armw->image;
1021 asyw->point = armw->point;
1022 asyw->lut = armw->lut;
1023 asyw->clr.mask = 0;
1024 asyw->set.mask = 0;
1025 return &asyw->state;
1026}
1027
1028static void
1029nv50_wndw_reset(struct drm_plane *plane)
1030{
1031 struct nv50_wndw_atom *asyw;
1032
1033 if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
1034 return;
1035
1036 if (plane->state)
1037 plane->funcs->atomic_destroy_state(plane, plane->state);
1038 plane->state = &asyw->state;
1039 plane->state->plane = plane;
1040 plane->state->rotation = DRM_MODE_ROTATE_0;
1041}
1042
1043static void
1044nv50_wndw_destroy(struct drm_plane *plane)
1045{
1046 struct nv50_wndw *wndw = nv50_wndw(plane);
1047 void *data;
1048 nvif_notify_fini(&wndw->notify);
1049 data = wndw->func->dtor(wndw);
1050 drm_plane_cleanup(&wndw->plane);
1051 kfree(data);
1052}
1053
1054static const struct drm_plane_funcs
1055nv50_wndw = {
1056 .update_plane = drm_atomic_helper_update_plane,
1057 .disable_plane = drm_atomic_helper_disable_plane,
1058 .destroy = nv50_wndw_destroy,
1059 .reset = nv50_wndw_reset,
1060 .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
1061 .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
1062};
1063
1064static void
1065nv50_wndw_fini(struct nv50_wndw *wndw)
1066{
1067 nvif_notify_put(&wndw->notify);
1068}
1069
1070static void
1071nv50_wndw_init(struct nv50_wndw *wndw)
1072{
1073 nvif_notify_get(&wndw->notify);
1074}
1075
1076static int
1077nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
1078 enum drm_plane_type type, const char *name, int index,
1079 struct nv50_dmac *dmac, const u32 *format, int nformat,
1080 struct nv50_wndw *wndw)
1081{
1082 int ret;
1083
1084 wndw->func = func;
1085 wndw->dmac = dmac;
1086
1087 ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
1088 format, nformat, NULL,
1089 type, "%s-%d", name, index);
1090 if (ret)
1091 return ret;
1092
1093 drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
1094 return 0;
1095}
1096
1097/******************************************************************************
1098 * Cursor plane
1099 *****************************************************************************/
1100#define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
1101
1102struct nv50_curs {
1103 struct nv50_wndw wndw;
1104 struct nvif_object chan;
1105};
1106
1107static u32
1108nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
1109{
1110 struct nv50_curs *curs = nv50_curs(wndw);
1111 nvif_wr32(&curs->chan, 0x0080, 0x00000000);
1112 return 0;
1113}
1114
1115static void
1116nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1117{
1118 struct nv50_curs *curs = nv50_curs(wndw);
1119 nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
1120}
1121
1122static void
1123nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
1124 struct nv50_wndw_atom *asyw)
1125{
1126 u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
1127 u32 offset = asyw->image.offset;
1128 if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
1129 asyh->curs.handle = handle;
1130 asyh->curs.offset = offset;
1131 asyh->set.curs = asyh->curs.visible;
1132 }
1133}
1134
1135static void
1136nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1137 struct nv50_head_atom *asyh)
1138{
1139 asyh->curs.visible = false;
1140}
1141
1142static int
1143nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1144 struct nv50_head_atom *asyh)
1145{
1146 int ret;
1147
1148 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1149 DRM_PLANE_HELPER_NO_SCALING,
1150 DRM_PLANE_HELPER_NO_SCALING,
1151 true, true);
1152 asyh->curs.visible = asyw->state.visible;
1153 if (ret || !asyh->curs.visible)
1154 return ret;
1155
1156 switch (asyw->state.fb->width) {
1157 case 32: asyh->curs.layout = 0; break;
1158 case 64: asyh->curs.layout = 1; break;
1159 default:
1160 return -EINVAL;
1161 }
1162
1163 if (asyw->state.fb->width != asyw->state.fb->height)
1164 return -EINVAL;
1165
1166 switch (asyw->state.fb->format->format) {
1167 case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
1168 default:
1169 WARN_ON(1);
1170 return -EINVAL;
1171 }
1172
1173 return 0;
1174}
1175
1176static void *
1177nv50_curs_dtor(struct nv50_wndw *wndw)
1178{
1179 struct nv50_curs *curs = nv50_curs(wndw);
1180 nvif_object_fini(&curs->chan);
1181 return curs;
1182}
1183
1184static const u32
1185nv50_curs_format[] = {
1186 DRM_FORMAT_ARGB8888,
1187};
1188
1189static const struct nv50_wndw_func
1190nv50_curs = {
1191 .dtor = nv50_curs_dtor,
1192 .acquire = nv50_curs_acquire,
1193 .release = nv50_curs_release,
1194 .prepare = nv50_curs_prepare,
1195 .point = nv50_curs_point,
1196 .update = nv50_curs_update,
1197};
1198
1199static int
1200nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
1201 struct nv50_curs **pcurs)
1202{
1203 static const struct nvif_mclass curses[] = {
1204 { GK104_DISP_CURSOR, 0 },
1205 { GF110_DISP_CURSOR, 0 },
1206 { GT214_DISP_CURSOR, 0 },
1207 { G82_DISP_CURSOR, 0 },
1208 { NV50_DISP_CURSOR, 0 },
1209 {}
1210 };
1211 struct nv50_disp_cursor_v0 args = {
1212 .head = head->base.index,
1213 };
1214 struct nv50_disp *disp = nv50_disp(drm->dev);
1215 struct nv50_curs *curs;
1216 int cid, ret;
1217
1218 cid = nvif_mclass(disp->disp, curses);
1219 if (cid < 0) {
1220 NV_ERROR(drm, "No supported cursor immediate class\n");
1221 return cid;
1222 }
1223
1224 if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
1225 return -ENOMEM;
1226
1227 ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
1228 "curs", head->base.index, &disp->mast.base,
1229 nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
1230 &curs->wndw);
1231 if (ret) {
1232 kfree(curs);
1233 return ret;
1234 }
1235
1236 ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
1237 sizeof(args), &curs->chan);
1238 if (ret) {
1239 NV_ERROR(drm, "curs%04x allocation failed: %d\n",
1240 curses[cid].oclass, ret);
1241 return ret;
1242 }
1243
1244 return 0;
1245}
1246
1247/******************************************************************************
1248 * Primary plane
1249 *****************************************************************************/
1250#define nv50_base(p) container_of((p), struct nv50_base, wndw)
1251
1252struct nv50_base {
1253 struct nv50_wndw wndw;
1254 struct nv50_sync chan;
1255 int id;
1256};
1257
1258static int
1259nv50_base_notify(struct nvif_notify *notify)
1260{
1261 return NVIF_NOTIFY_KEEP;
1262}
1263
1264static void
1265nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1266{
1267 struct nv50_base *base = nv50_base(wndw);
1268 u32 *push;
1269 if ((push = evo_wait(&base->chan, 2))) {
1270 evo_mthd(push, 0x00e0, 1);
1271 evo_data(push, asyw->lut.enable << 30);
1272 evo_kick(push, &base->chan);
1273 }
1274}
1275
1276static void
1277nv50_base_image_clr(struct nv50_wndw *wndw)
1278{
1279 struct nv50_base *base = nv50_base(wndw);
1280 u32 *push;
1281 if ((push = evo_wait(&base->chan, 4))) {
1282 evo_mthd(push, 0x0084, 1);
1283 evo_data(push, 0x00000000);
1284 evo_mthd(push, 0x00c0, 1);
1285 evo_data(push, 0x00000000);
1286 evo_kick(push, &base->chan);
1287 }
1288}
1289
1290static void
1291nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1292{
1293 struct nv50_base *base = nv50_base(wndw);
1294 const s32 oclass = base->chan.base.base.user.oclass;
1295 u32 *push;
1296 if ((push = evo_wait(&base->chan, 10))) {
1297 evo_mthd(push, 0x0084, 1);
1298 evo_data(push, (asyw->image.mode << 8) |
1299 (asyw->image.interval << 4));
1300 evo_mthd(push, 0x00c0, 1);
1301 evo_data(push, asyw->image.handle);
1302 if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
1303 evo_mthd(push, 0x0800, 5);
1304 evo_data(push, asyw->image.offset >> 8);
1305 evo_data(push, 0x00000000);
1306 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1307 evo_data(push, (asyw->image.layout << 20) |
1308 asyw->image.pitch |
1309 asyw->image.block);
1310 evo_data(push, (asyw->image.kind << 16) |
1311 (asyw->image.format << 8));
1312 } else
1313 if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1314 evo_mthd(push, 0x0800, 5);
1315 evo_data(push, asyw->image.offset >> 8);
1316 evo_data(push, 0x00000000);
1317 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1318 evo_data(push, (asyw->image.layout << 20) |
1319 asyw->image.pitch |
1320 asyw->image.block);
1321 evo_data(push, asyw->image.format << 8);
1322 } else {
1323 evo_mthd(push, 0x0400, 5);
1324 evo_data(push, asyw->image.offset >> 8);
1325 evo_data(push, 0x00000000);
1326 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1327 evo_data(push, (asyw->image.layout << 24) |
1328 asyw->image.pitch |
1329 asyw->image.block);
1330 evo_data(push, asyw->image.format << 8);
1331 }
1332 evo_kick(push, &base->chan);
1333 }
1334}
1335
1336static void
1337nv50_base_ntfy_clr(struct nv50_wndw *wndw)
1338{
1339 struct nv50_base *base = nv50_base(wndw);
1340 u32 *push;
1341 if ((push = evo_wait(&base->chan, 2))) {
1342 evo_mthd(push, 0x00a4, 1);
1343 evo_data(push, 0x00000000);
1344 evo_kick(push, &base->chan);
1345 }
1346}
1347
1348static void
1349nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1350{
1351 struct nv50_base *base = nv50_base(wndw);
1352 u32 *push;
1353 if ((push = evo_wait(&base->chan, 3))) {
1354 evo_mthd(push, 0x00a0, 2);
1355 evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
1356 evo_data(push, asyw->ntfy.handle);
1357 evo_kick(push, &base->chan);
1358 }
1359}
1360
1361static void
1362nv50_base_sema_clr(struct nv50_wndw *wndw)
1363{
1364 struct nv50_base *base = nv50_base(wndw);
1365 u32 *push;
1366 if ((push = evo_wait(&base->chan, 2))) {
1367 evo_mthd(push, 0x0094, 1);
1368 evo_data(push, 0x00000000);
1369 evo_kick(push, &base->chan);
1370 }
1371}
1372
1373static void
1374nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1375{
1376 struct nv50_base *base = nv50_base(wndw);
1377 u32 *push;
1378 if ((push = evo_wait(&base->chan, 5))) {
1379 evo_mthd(push, 0x0088, 4);
1380 evo_data(push, asyw->sema.offset);
1381 evo_data(push, asyw->sema.acquire);
1382 evo_data(push, asyw->sema.release);
1383 evo_data(push, asyw->sema.handle);
1384 evo_kick(push, &base->chan);
1385 }
1386}
1387
1388static u32
1389nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
1390{
1391 struct nv50_base *base = nv50_base(wndw);
1392 u32 *push;
1393
1394 if (!(push = evo_wait(&base->chan, 2)))
1395 return 0;
1396 evo_mthd(push, 0x0080, 1);
1397 evo_data(push, interlock);
1398 evo_kick(push, &base->chan);
1399
1400 if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
1401 return interlock ? 2 << (base->id * 8) : 0;
1402 return interlock ? 2 << (base->id * 4) : 0;
1403}
1404
1405static int
1406nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1407{
1408 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
1409 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1410 if (nvif_msec(&drm->client.device, 2000ULL,
1411 u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
1412 if ((data & 0xc0000000) == 0x40000000)
1413 break;
1414 usleep_range(1, 2);
1415 ) < 0)
1416 return -ETIMEDOUT;
1417 return 0;
1418}
1419
1420static void
1421nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1422 struct nv50_head_atom *asyh)
1423{
1424 asyh->base.cpp = 0;
1425}
1426
1427static int
1428nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1429 struct nv50_head_atom *asyh)
1430{
1431 const struct drm_framebuffer *fb = asyw->state.fb;
1432 int ret;
1433
1434 if (!fb->format->depth)
1435 return -EINVAL;
1436
1437 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1438 DRM_PLANE_HELPER_NO_SCALING,
1439 DRM_PLANE_HELPER_NO_SCALING,
1440 false, true);
1441 if (ret)
1442 return ret;
1443
1444 asyh->base.depth = fb->format->depth;
1445 asyh->base.cpp = fb->format->cpp[0];
1446 asyh->base.x = asyw->state.src.x1 >> 16;
1447 asyh->base.y = asyw->state.src.y1 >> 16;
1448 asyh->base.w = asyw->state.fb->width;
1449 asyh->base.h = asyw->state.fb->height;
1450
1451 switch (fb->format->format) {
1452 case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
1453 case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
1454 case DRM_FORMAT_XRGB1555 :
1455 case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
1456 case DRM_FORMAT_XRGB8888 :
1457 case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
1458 case DRM_FORMAT_XBGR2101010:
1459 case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
1460 case DRM_FORMAT_XBGR8888 :
1461 case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
1462 default:
1463 WARN_ON(1);
1464 return -EINVAL;
1465 }
1466
1467 asyw->lut.enable = 1;
1468 asyw->set.image = true;
1469 return 0;
1470}
1471
1472static void *
1473nv50_base_dtor(struct nv50_wndw *wndw)
1474{
1475 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1476 struct nv50_base *base = nv50_base(wndw);
1477 nv50_dmac_destroy(&base->chan.base, disp->disp);
1478 return base;
1479}
1480
1481static const u32
1482nv50_base_format[] = {
1483 DRM_FORMAT_C8,
1484 DRM_FORMAT_RGB565,
1485 DRM_FORMAT_XRGB1555,
1486 DRM_FORMAT_ARGB1555,
1487 DRM_FORMAT_XRGB8888,
1488 DRM_FORMAT_ARGB8888,
1489 DRM_FORMAT_XBGR2101010,
1490 DRM_FORMAT_ABGR2101010,
1491 DRM_FORMAT_XBGR8888,
1492 DRM_FORMAT_ABGR8888,
1493};
1494
1495static const struct nv50_wndw_func
1496nv50_base = {
1497 .dtor = nv50_base_dtor,
1498 .acquire = nv50_base_acquire,
1499 .release = nv50_base_release,
1500 .sema_set = nv50_base_sema_set,
1501 .sema_clr = nv50_base_sema_clr,
1502 .ntfy_set = nv50_base_ntfy_set,
1503 .ntfy_clr = nv50_base_ntfy_clr,
1504 .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
1505 .image_set = nv50_base_image_set,
1506 .image_clr = nv50_base_image_clr,
1507 .lut = nv50_base_lut,
1508 .update = nv50_base_update,
1509};
1510
1511static int
1512nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
1513 struct nv50_base **pbase)
1514{
1515 struct nv50_disp *disp = nv50_disp(drm->dev);
1516 struct nv50_base *base;
1517 int ret;
1518
1519 if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
1520 return -ENOMEM;
1521 base->id = head->base.index;
1522 base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
1523 base->wndw.sema = EVO_FLIP_SEM0(base->id);
1524 base->wndw.data = 0x00000000;
1525
1526 ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
1527 "base", base->id, &base->chan.base,
1528 nv50_base_format, ARRAY_SIZE(nv50_base_format),
1529 &base->wndw);
1530 if (ret) {
1531 kfree(base);
1532 return ret;
1533 }
1534
1535 ret = nv50_base_create(&drm->client.device, disp->disp, base->id,
1536 disp->sync->bo.offset, &base->chan);
1537 if (ret)
1538 return ret;
1539
1540 return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
1541 false,
1542 NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
1543 &(struct nvif_notify_uevent_req) {},
1544 sizeof(struct nvif_notify_uevent_req),
1545 sizeof(struct nvif_notify_uevent_rep),
1546 &base->wndw.notify);
1547}
1548
1549/******************************************************************************
1550 * Head
1551 *****************************************************************************/
1552static void
1553nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
1554{
1555 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1556 u32 *push;
1557 if ((push = evo_wait(core, 2))) {
1558 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1559 evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
1560 else
1561 evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
1562 evo_data(push, (asyh->procamp.sat.sin << 20) |
1563 (asyh->procamp.sat.cos << 8));
1564 evo_kick(push, core);
1565 }
1566}
1567
1568static void
1569nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
1570{
1571 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1572 u32 *push;
1573 if ((push = evo_wait(core, 2))) {
1574 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1575 evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
1576 else
1577 if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
1578 evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
1579 else
1580 evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
1581 evo_data(push, (asyh->dither.mode << 3) |
1582 (asyh->dither.bits << 1) |
1583 asyh->dither.enable);
1584 evo_kick(push, core);
1585 }
1586}
1587
1588static void
1589nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
1590{
1591 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1592 u32 bounds = 0;
1593 u32 *push;
1594
1595 if (asyh->base.cpp) {
1596 switch (asyh->base.cpp) {
1597 case 8: bounds |= 0x00000500; break;
1598 case 4: bounds |= 0x00000300; break;
1599 case 2: bounds |= 0x00000100; break;
1600 default:
1601 WARN_ON(1);
1602 break;
1603 }
1604 bounds |= 0x00000001;
1605 }
1606
1607 if ((push = evo_wait(core, 2))) {
1608 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1609 evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
1610 else
1611 evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
1612 evo_data(push, bounds);
1613 evo_kick(push, core);
1614 }
1615}
1616
1617static void
1618nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
1619{
1620 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1621 u32 bounds = 0;
1622 u32 *push;
1623
1624 if (asyh->base.cpp) {
1625 switch (asyh->base.cpp) {
1626 case 8: bounds |= 0x00000500; break;
1627 case 4: bounds |= 0x00000300; break;
1628 case 2: bounds |= 0x00000100; break;
1629 case 1: bounds |= 0x00000000; break;
1630 default:
1631 WARN_ON(1);
1632 break;
1633 }
1634 bounds |= 0x00000001;
1635 }
1636
1637 if ((push = evo_wait(core, 2))) {
1638 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1639 evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
1640 else
1641 evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
1642 evo_data(push, bounds);
1643 evo_kick(push, core);
1644 }
1645}
1646
1647static void
1648nv50_head_curs_clr(struct nv50_head *head)
1649{
1650 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1651 u32 *push;
1652 if ((push = evo_wait(core, 4))) {
1653 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1654 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1655 evo_data(push, 0x05000000);
1656 } else
1657 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1658 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1659 evo_data(push, 0x05000000);
1660 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1661 evo_data(push, 0x00000000);
1662 } else {
1663 evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
1664 evo_data(push, 0x05000000);
1665 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1666 evo_data(push, 0x00000000);
1667 }
1668 evo_kick(push, core);
1669 }
1670}
1671
1672static void
1673nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1674{
1675 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1676 u32 *push;
1677 if ((push = evo_wait(core, 5))) {
1678 if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
1679 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1680 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1681 (asyh->curs.format << 24));
1682 evo_data(push, asyh->curs.offset >> 8);
1683 } else
1684 if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1685 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1686 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1687 (asyh->curs.format << 24));
1688 evo_data(push, asyh->curs.offset >> 8);
1689 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1690 evo_data(push, asyh->curs.handle);
1691 } else {
1692 evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
1693 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1694 (asyh->curs.format << 24));
1695 evo_data(push, asyh->curs.offset >> 8);
1696 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1697 evo_data(push, asyh->curs.handle);
1698 }
1699 evo_kick(push, core);
1700 }
1701}
1702
1703static void
1704nv50_head_core_clr(struct nv50_head *head)
1705{
1706 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1707 u32 *push;
1708 if ((push = evo_wait(core, 2))) {
1709 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1710 evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
1711 else
1712 evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
1713 evo_data(push, 0x00000000);
1714 evo_kick(push, core);
1715 }
1716}
1717
1718static void
1719nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1720{
1721 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1722 u32 *push;
1723 if ((push = evo_wait(core, 9))) {
1724 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1725 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1726 evo_data(push, asyh->core.offset >> 8);
1727 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1728 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1729 evo_data(push, asyh->core.layout << 20 |
1730 (asyh->core.pitch >> 8) << 8 |
1731 asyh->core.block);
1732 evo_data(push, asyh->core.kind << 16 |
1733 asyh->core.format << 8);
1734 evo_data(push, asyh->core.handle);
1735 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1736 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1737 /* EVO will complain with INVALID_STATE if we have an
1738 * active cursor and (re)specify HeadSetContextDmaIso
1739 * without also updating HeadSetOffsetCursor.
1740 */
1741 asyh->set.curs = asyh->curs.visible;
1742 } else
1743 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1744 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1745 evo_data(push, asyh->core.offset >> 8);
1746 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1747 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1748 evo_data(push, asyh->core.layout << 20 |
1749 (asyh->core.pitch >> 8) << 8 |
1750 asyh->core.block);
1751 evo_data(push, asyh->core.format << 8);
1752 evo_data(push, asyh->core.handle);
1753 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1754 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1755 } else {
1756 evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
1757 evo_data(push, asyh->core.offset >> 8);
1758 evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
1759 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1760 evo_data(push, asyh->core.layout << 24 |
1761 (asyh->core.pitch >> 8) << 8 |
1762 asyh->core.block);
1763 evo_data(push, asyh->core.format << 8);
1764 evo_data(push, asyh->core.handle);
1765 evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
1766 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1767 }
1768 evo_kick(push, core);
1769 }
1770}
1771
1772static void
1773nv50_head_lut_clr(struct nv50_head *head)
1774{
1775 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1776 u32 *push;
1777 if ((push = evo_wait(core, 4))) {
1778 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1779 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1780 evo_data(push, 0x40000000);
1781 } else
1782 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1783 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1784 evo_data(push, 0x40000000);
1785 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1786 evo_data(push, 0x00000000);
1787 } else {
1788 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
1789 evo_data(push, 0x03000000);
1790 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1791 evo_data(push, 0x00000000);
1792 }
1793 evo_kick(push, core);
1794 }
1795}
1796
1797static void
1798nv50_head_lut_load(struct drm_property_blob *blob, int mode,
1799 struct nouveau_bo *nvbo)
1800{
1801 struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
1802 void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
1803 const int size = blob->length / sizeof(*in);
1804 int bits, shift, i;
1805 u16 zero, r, g, b;
1806
1807 /* This can't happen.. But it shuts the compiler up. */
1808 if (WARN_ON(size != 256))
1809 return;
1810
1811 switch (mode) {
1812 case 0: /* LORES. */
1813 case 1: /* HIRES. */
1814 bits = 11;
1815 shift = 3;
1816 zero = 0x0000;
1817 break;
1818 case 7: /* INTERPOLATE_257_UNITY_RANGE. */
1819 bits = 14;
1820 shift = 0;
1821 zero = 0x6000;
1822 break;
1823 default:
1824 WARN_ON(1);
1825 return;
1826 }
1827
1828 for (i = 0; i < size; i++) {
1829 r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
1830 g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
1831 b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
1832 writew(r, lut + (i * 0x08) + 0);
1833 writew(g, lut + (i * 0x08) + 2);
1834 writew(b, lut + (i * 0x08) + 4);
1835 }
1836
1837 /* INTERPOLATE modes require a "next" entry to interpolate with,
1838 * so we replicate the last entry to deal with this for now.
1839 */
1840 writew(r, lut + (i * 0x08) + 0);
1841 writew(g, lut + (i * 0x08) + 2);
1842 writew(b, lut + (i * 0x08) + 4);
1843}
1844
1845static void
1846nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1847{
1848 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1849 u32 *push;
1850 if ((push = evo_wait(core, 7))) {
1851 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1852 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1853 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1854 evo_data(push, asyh->lut.offset >> 8);
1855 } else
1856 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1857 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1858 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1859 evo_data(push, asyh->lut.offset >> 8);
1860 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1861 evo_data(push, asyh->lut.handle);
1862 } else {
1863 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
1864 evo_data(push, 0x80000000 | asyh->lut.mode << 24);
1865 evo_data(push, asyh->lut.offset >> 8);
1866 evo_data(push, 0x00000000);
1867 evo_data(push, 0x00000000);
1868 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1869 evo_data(push, asyh->lut.handle);
1870 }
1871 evo_kick(push, core);
1872 }
1873}
1874
1875static void
1876nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
1877{
1878 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1879 struct nv50_head_mode *m = &asyh->mode;
1880 u32 *push;
1881 if ((push = evo_wait(core, 14))) {
1882 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1883 evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
1884 evo_data(push, 0x00800000 | m->clock);
1885 evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
1886 evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
1887 evo_data(push, 0x00000000);
1888 evo_data(push, (m->v.active << 16) | m->h.active );
1889 evo_data(push, (m->v.synce << 16) | m->h.synce );
1890 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
1891 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
1892 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1893 evo_data(push, asyh->mode.v.blankus);
1894 evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
1895 evo_data(push, 0x00000000);
1896 } else {
1897 evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
1898 evo_data(push, 0x00000000);
1899 evo_data(push, (m->v.active << 16) | m->h.active );
1900 evo_data(push, (m->v.synce << 16) | m->h.synce );
1901 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
1902 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
1903 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1904 evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
1905 evo_data(push, 0x00000000); /* ??? */
1906 evo_data(push, 0xffffff00);
1907 evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
1908 evo_data(push, m->clock * 1000);
1909 evo_data(push, 0x00200000); /* ??? */
1910 evo_data(push, m->clock * 1000);
1911 }
1912 evo_kick(push, core);
1913 }
1914}
1915
1916static void
1917nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
1918{
1919 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1920 u32 *push;
1921 if ((push = evo_wait(core, 10))) {
1922 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1923 evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
1924 evo_data(push, 0x00000000);
1925 evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
1926 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1927 evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
1928 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1929 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1930 } else {
1931 evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
1932 evo_data(push, 0x00000000);
1933 evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
1934 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1935 evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
1936 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1937 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1938 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1939 }
1940 evo_kick(push, core);
1941 }
1942}
1943
1944static void
1945nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
1946{
1947 if (asyh->clr.ilut && (!asyh->set.ilut || y))
1948 nv50_head_lut_clr(head);
1949 if (asyh->clr.core && (!asyh->set.core || y))
1950 nv50_head_core_clr(head);
1951 if (asyh->clr.curs && (!asyh->set.curs || y))
1952 nv50_head_curs_clr(head);
1953}
1954
1955static void
1956nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1957{
1958 if (asyh->set.view ) nv50_head_view (head, asyh);
1959 if (asyh->set.mode ) nv50_head_mode (head, asyh);
1960 if (asyh->set.ilut ) {
1961 struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
1962 struct drm_property_blob *blob = asyh->state.gamma_lut;
1963 if (blob)
1964 nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
1965 asyh->lut.offset = nvbo->bo.offset;
1966 head->lut.next ^= 1;
1967 nv50_head_lut_set(head, asyh);
1968 }
1969 if (asyh->set.core ) nv50_head_core_set(head, asyh);
1970 if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
1971 if (asyh->set.base ) nv50_head_base (head, asyh);
1972 if (asyh->set.ovly ) nv50_head_ovly (head, asyh);
1973 if (asyh->set.dither ) nv50_head_dither (head, asyh);
1974 if (asyh->set.procamp) nv50_head_procamp (head, asyh);
1975}
1976
1977static void
1978nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
1979 struct nv50_head_atom *asyh,
1980 struct nouveau_conn_atom *asyc)
1981{
1982 const int vib = asyc->procamp.color_vibrance - 100;
1983 const int hue = asyc->procamp.vibrant_hue - 90;
1984 const int adj = (vib > 0) ? 50 : 0;
1985 asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
1986 asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
1987 asyh->set.procamp = true;
1988}
1989
1990static void
1991nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
1992 struct nv50_head_atom *asyh,
1993 struct nouveau_conn_atom *asyc)
1994{
1995 struct drm_connector *connector = asyc->state.connector;
1996 u32 mode = 0x00;
1997
1998 if (asyc->dither.mode == DITHERING_MODE_AUTO) {
1999 if (asyh->base.depth > connector->display_info.bpc * 3)
2000 mode = DITHERING_MODE_DYNAMIC2X2;
2001 } else {
2002 mode = asyc->dither.mode;
2003 }
2004
2005 if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
2006 if (connector->display_info.bpc >= 8)
2007 mode |= DITHERING_DEPTH_8BPC;
2008 } else {
2009 mode |= asyc->dither.depth;
2010 }
2011
2012 asyh->dither.enable = mode;
2013 asyh->dither.bits = mode >> 1;
2014 asyh->dither.mode = mode >> 3;
2015 asyh->set.dither = true;
2016}
2017
2018static void
2019nv50_head_atomic_check_view(struct nv50_head_atom *armh,
2020 struct nv50_head_atom *asyh,
2021 struct nouveau_conn_atom *asyc)
2022{
2023 struct drm_connector *connector = asyc->state.connector;
2024 struct drm_display_mode *omode = &asyh->state.adjusted_mode;
2025 struct drm_display_mode *umode = &asyh->state.mode;
2026 int mode = asyc->scaler.mode;
2027 struct edid *edid;
2028 int umode_vdisplay, omode_hdisplay, omode_vdisplay;
2029
2030 if (connector->edid_blob_ptr)
2031 edid = (struct edid *)connector->edid_blob_ptr->data;
2032 else
2033 edid = NULL;
2034
2035 if (!asyc->scaler.full) {
2036 if (mode == DRM_MODE_SCALE_NONE)
2037 omode = umode;
2038 } else {
2039 /* Non-EDID LVDS/eDP mode. */
2040 mode = DRM_MODE_SCALE_FULLSCREEN;
2041 }
2042
2043 /* For the user-specified mode, we must ignore doublescan and
2044 * the like, but honor frame packing.
2045 */
2046 umode_vdisplay = umode->vdisplay;
2047 if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
2048 umode_vdisplay += umode->vtotal;
2049 asyh->view.iW = umode->hdisplay;
2050 asyh->view.iH = umode_vdisplay;
2051 /* For the output mode, we can just use the stock helper. */
2052 drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
2053 asyh->view.oW = omode_hdisplay;
2054 asyh->view.oH = omode_vdisplay;
2055
2056 /* Add overscan compensation if necessary, will keep the aspect
2057 * ratio the same as the backend mode unless overridden by the
2058 * user setting both hborder and vborder properties.
2059 */
2060 if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
2061 (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
2062 drm_detect_hdmi_monitor(edid)))) {
2063 u32 bX = asyc->scaler.underscan.hborder;
2064 u32 bY = asyc->scaler.underscan.vborder;
2065 u32 r = (asyh->view.oH << 19) / asyh->view.oW;
2066
2067 if (bX) {
2068 asyh->view.oW -= (bX * 2);
2069 if (bY) asyh->view.oH -= (bY * 2);
2070 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2071 } else {
2072 asyh->view.oW -= (asyh->view.oW >> 4) + 32;
2073 if (bY) asyh->view.oH -= (bY * 2);
2074 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2075 }
2076 }
2077
2078 /* Handle CENTER/ASPECT scaling, taking into account the areas
2079 * removed already for overscan compensation.
2080 */
2081 switch (mode) {
2082 case DRM_MODE_SCALE_CENTER:
2083 asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
2084 asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
2085 /* fall-through */
2086 case DRM_MODE_SCALE_ASPECT:
2087 if (asyh->view.oH < asyh->view.oW) {
2088 u32 r = (asyh->view.iW << 19) / asyh->view.iH;
2089 asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
2090 } else {
2091 u32 r = (asyh->view.iH << 19) / asyh->view.iW;
2092 asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2093 }
2094 break;
2095 default:
2096 break;
2097 }
2098
2099 asyh->set.view = true;
2100}
2101
2102static void
2103nv50_head_atomic_check_lut(struct nv50_head *head,
2104 struct nv50_head_atom *armh,
2105 struct nv50_head_atom *asyh)
2106{
2107 struct nv50_disp *disp = nv50_disp(head->base.base.dev);
2108
2109 /* An I8 surface without an input LUT makes no sense, and
2110 * EVO will throw an error if you try.
2111 *
2112 * Legacy clients actually cause this due to the order in
2113 * which they call ioctls, so we will enable the LUT with
2114 * whatever contents the buffer already contains to avoid
2115 * triggering the error check.
2116 */
2117 if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
2118 asyh->lut.handle = 0;
2119 asyh->clr.ilut = armh->lut.visible;
2120 return;
2121 }
2122
2123 if (disp->disp->oclass < GF110_DISP) {
2124 asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
2125 asyh->set.ilut = true;
2126 } else {
2127 asyh->lut.mode = 7;
2128 asyh->set.ilut = asyh->state.color_mgmt_changed;
2129 }
2130 asyh->lut.handle = disp->mast.base.vram.handle;
2131}
2132
2133static void
2134nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2135{
2136 struct drm_display_mode *mode = &asyh->state.adjusted_mode;
2137 struct nv50_head_mode *m = &asyh->mode;
2138 u32 blankus;
2139
2140 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
2141
2142 /*
2143 * DRM modes are defined in terms of a repeating interval
2144 * starting with the active display area. The hardware modes
2145 * are defined in terms of a repeating interval starting one
2146 * unit (pixel or line) into the sync pulse. So, add bias.
2147 */
2148
2149 m->h.active = mode->crtc_htotal;
2150 m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
2151 m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
2152 m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
2153
2154 m->v.active = mode->crtc_vtotal;
2155 m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
2156 m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
2157 m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
2158
2159 /*XXX: Safe underestimate, even "0" works */
2160 blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
2161 blankus *= 1000;
2162 blankus /= mode->crtc_clock;
2163 m->v.blankus = blankus;
2164
2165 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2166 m->v.blank2e = m->v.active + m->v.blanke;
2167 m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay;
2168 m->v.active = (m->v.active * 2) + 1;
2169 m->interlace = true;
2170 } else {
2171 m->v.blank2e = 0;
2172 m->v.blank2s = 1;
2173 m->interlace = false;
2174 }
2175 m->clock = mode->crtc_clock;
2176
2177 asyh->set.mode = true;
2178}
2179
2180static int
2181nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2182{
2183 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
2184 struct nv50_disp *disp = nv50_disp(crtc->dev);
2185 struct nv50_head *head = nv50_head(crtc);
2186 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2187 struct nv50_head_atom *asyh = nv50_head_atom(state);
2188 struct nouveau_conn_atom *asyc = NULL;
2189 struct drm_connector_state *conns;
2190 struct drm_connector *conn;
2191 int i;
2192
2193 NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
2194 if (asyh->state.active) {
2195 for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
2196 if (conns->crtc == crtc) {
2197 asyc = nouveau_conn_atom(conns);
2198 break;
2199 }
2200 }
2201
2202 if (armh->state.active) {
2203 if (asyc) {
2204 if (asyh->state.mode_changed)
2205 asyc->set.scaler = true;
2206 if (armh->base.depth != asyh->base.depth)
2207 asyc->set.dither = true;
2208 }
2209 } else {
2210 if (asyc)
2211 asyc->set.mask = ~0;
2212 asyh->set.mask = ~0;
2213 }
2214
2215 if (asyh->state.mode_changed)
2216 nv50_head_atomic_check_mode(head, asyh);
2217
2218 if (asyh->state.color_mgmt_changed ||
2219 asyh->base.cpp != armh->base.cpp)
2220 nv50_head_atomic_check_lut(head, armh, asyh);
2221 asyh->lut.visible = asyh->lut.handle != 0;
2222
2223 if (asyc) {
2224 if (asyc->set.scaler)
2225 nv50_head_atomic_check_view(armh, asyh, asyc);
2226 if (asyc->set.dither)
2227 nv50_head_atomic_check_dither(armh, asyh, asyc);
2228 if (asyc->set.procamp)
2229 nv50_head_atomic_check_procamp(armh, asyh, asyc);
2230 }
2231
2232 if ((asyh->core.visible = (asyh->base.cpp != 0))) {
2233 asyh->core.x = asyh->base.x;
2234 asyh->core.y = asyh->base.y;
2235 asyh->core.w = asyh->base.w;
2236 asyh->core.h = asyh->base.h;
2237 } else
2238 if ((asyh->core.visible = asyh->curs.visible) ||
2239 (asyh->core.visible = asyh->lut.visible)) {
2240 /*XXX: We need to either find some way of having the
2241 * primary base layer appear black, while still
2242 * being able to display the other layers, or we
2243 * need to allocate a dummy black surface here.
2244 */
2245 asyh->core.x = 0;
2246 asyh->core.y = 0;
2247 asyh->core.w = asyh->state.mode.hdisplay;
2248 asyh->core.h = asyh->state.mode.vdisplay;
2249 }
2250 asyh->core.handle = disp->mast.base.vram.handle;
2251 asyh->core.offset = 0;
2252 asyh->core.format = 0xcf;
2253 asyh->core.kind = 0;
2254 asyh->core.layout = 1;
2255 asyh->core.block = 0;
2256 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
2257 asyh->set.base = armh->base.cpp != asyh->base.cpp;
2258 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
2259 } else {
2260 asyh->lut.visible = false;
2261 asyh->core.visible = false;
2262 asyh->curs.visible = false;
2263 asyh->base.cpp = 0;
2264 asyh->ovly.cpp = 0;
2265 }
2266
2267 if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
2268 if (asyh->core.visible) {
2269 if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
2270 asyh->set.core = true;
2271 } else
2272 if (armh->core.visible) {
2273 asyh->clr.core = true;
2274 }
2275
2276 if (asyh->curs.visible) {
2277 if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
2278 asyh->set.curs = true;
2279 } else
2280 if (armh->curs.visible) {
2281 asyh->clr.curs = true;
2282 }
2283 } else {
2284 asyh->clr.ilut = armh->lut.visible;
2285 asyh->clr.core = armh->core.visible;
2286 asyh->clr.curs = armh->curs.visible;
2287 asyh->set.ilut = asyh->lut.visible;
2288 asyh->set.core = asyh->core.visible;
2289 asyh->set.curs = asyh->curs.visible;
2290 }
2291
2292 if (asyh->clr.mask || asyh->set.mask)
2293 nv50_atom(asyh->state.state)->lock_core = true;
2294 return 0;
2295}
2296
2297static const struct drm_crtc_helper_funcs
2298nv50_head_help = {
2299 .atomic_check = nv50_head_atomic_check,
2300};
2301
2302static void
2303nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
2304 struct drm_crtc_state *state)
2305{
2306 struct nv50_head_atom *asyh = nv50_head_atom(state);
2307 __drm_atomic_helper_crtc_destroy_state(&asyh->state);
2308 kfree(asyh);
2309}
2310
2311static struct drm_crtc_state *
2312nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
2313{
2314 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2315 struct nv50_head_atom *asyh;
2316 if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
2317 return NULL;
2318 __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
2319 asyh->view = armh->view;
2320 asyh->mode = armh->mode;
2321 asyh->lut = armh->lut;
2322 asyh->core = armh->core;
2323 asyh->curs = armh->curs;
2324 asyh->base = armh->base;
2325 asyh->ovly = armh->ovly;
2326 asyh->dither = armh->dither;
2327 asyh->procamp = armh->procamp;
2328 asyh->clr.mask = 0;
2329 asyh->set.mask = 0;
2330 return &asyh->state;
2331}
2332
2333static void
2334__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
2335 struct drm_crtc_state *state)
2336{
2337 if (crtc->state)
2338 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
2339 crtc->state = state;
2340 crtc->state->crtc = crtc;
2341}
2342
2343static void
2344nv50_head_reset(struct drm_crtc *crtc)
2345{
2346 struct nv50_head_atom *asyh;
2347
2348 if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
2349 return;
2350
2351 __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
2352}
2353
2354static void
2355nv50_head_destroy(struct drm_crtc *crtc)
2356{
2357 struct nv50_disp *disp = nv50_disp(crtc->dev);
2358 struct nv50_head *head = nv50_head(crtc);
2359 int i;
2360
2361 nv50_dmac_destroy(&head->ovly.base, disp->disp);
2362 nv50_pioc_destroy(&head->oimm.base);
2363
2364 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
2365 nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
2366
2367 drm_crtc_cleanup(crtc);
2368 kfree(crtc);
2369}
2370
2371static const struct drm_crtc_funcs
2372nv50_head_func = {
2373 .reset = nv50_head_reset,
2374 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2375 .destroy = nv50_head_destroy,
2376 .set_config = drm_atomic_helper_set_config,
2377 .page_flip = drm_atomic_helper_page_flip,
2378 .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
2379 .atomic_destroy_state = nv50_head_atomic_destroy_state,
2380};
2381
2382static int
2383nv50_head_create(struct drm_device *dev, int index)
2384{
2385 struct nouveau_drm *drm = nouveau_drm(dev);
2386 struct nvif_device *device = &drm->client.device;
2387 struct nv50_disp *disp = nv50_disp(dev);
2388 struct nv50_head *head;
2389 struct nv50_base *base;
2390 struct nv50_curs *curs;
2391 struct drm_crtc *crtc;
2392 int ret, i;
2393
2394 head = kzalloc(sizeof(*head), GFP_KERNEL);
2395 if (!head)
2396 return -ENOMEM;
2397
2398 head->base.index = index;
2399 ret = nv50_base_new(drm, head, &base);
2400 if (ret == 0)
2401 ret = nv50_curs_new(drm, head, &curs);
2402 if (ret) {
2403 kfree(head);
2404 return ret;
2405 }
2406
2407 crtc = &head->base.base;
2408 drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
2409 &curs->wndw.plane, &nv50_head_func,
2410 "head-%d", head->base.index);
2411 drm_crtc_helper_add(crtc, &nv50_head_help);
2412 drm_mode_crtc_set_gamma_size(crtc, 256);
2413
2414 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
2415 ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
2416 TTM_PL_FLAG_VRAM,
2417 &head->lut.nvbo[i]);
2418 if (ret)
2419 goto out;
2420 }
2421
2422 /* allocate overlay resources */
2423 ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
2424 if (ret)
2425 goto out;
2426
2427 ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
2428 &head->ovly);
2429 if (ret)
2430 goto out;
2431
2432out:
2433 if (ret)
2434 nv50_head_destroy(crtc);
2435 return ret;
2436}
2437
2438/******************************************************************************
2439 * Output path helpers
2440 *****************************************************************************/
2441static void
2442nv50_outp_release(struct nouveau_encoder *nv_encoder)
2443{
2444 struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
2445 struct {
2446 struct nv50_disp_mthd_v1 base;
2447 } args = {
2448 .base.version = 1,
2449 .base.method = NV50_DISP_MTHD_V1_RELEASE,
2450 .base.hasht = nv_encoder->dcb->hasht,
2451 .base.hashm = nv_encoder->dcb->hashm,
2452 };
2453
2454 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2455 nv_encoder->or = -1;
2456 nv_encoder->link = 0;
2457}
2458
2459static int
2460nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
2461{
2462 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
2463 struct nv50_disp *disp = nv50_disp(drm->dev);
2464 struct {
2465 struct nv50_disp_mthd_v1 base;
2466 struct nv50_disp_acquire_v0 info;
2467 } args = {
2468 .base.version = 1,
2469 .base.method = NV50_DISP_MTHD_V1_ACQUIRE,
2470 .base.hasht = nv_encoder->dcb->hasht,
2471 .base.hashm = nv_encoder->dcb->hashm,
2472 };
2473 int ret;
2474
2475 ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
2476 if (ret) {
2477 NV_ERROR(drm, "error acquiring output path: %d\n", ret);
2478 return ret;
2479 }
2480
2481 nv_encoder->or = args.info.or;
2482 nv_encoder->link = args.info.link;
2483 return 0;
2484}
2485
2486static int
2487nv50_outp_atomic_check_view(struct drm_encoder *encoder,
2488 struct drm_crtc_state *crtc_state,
2489 struct drm_connector_state *conn_state,
2490 struct drm_display_mode *native_mode)
2491{
2492 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
2493 struct drm_display_mode *mode = &crtc_state->mode;
2494 struct drm_connector *connector = conn_state->connector;
2495 struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
2496 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
2497
2498 NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
2499 asyc->scaler.full = false;
2500 if (!native_mode)
2501 return 0;
2502
2503 if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
2504 switch (connector->connector_type) {
2505 case DRM_MODE_CONNECTOR_LVDS:
2506 case DRM_MODE_CONNECTOR_eDP:
2507 /* Force use of scaler for non-EDID modes. */
2508 if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
2509 break;
2510 mode = native_mode;
2511 asyc->scaler.full = true;
2512 break;
2513 default:
2514 break;
2515 }
2516 } else {
2517 mode = native_mode;
2518 }
2519
2520 if (!drm_mode_equal(adjusted_mode, mode)) {
2521 drm_mode_copy(adjusted_mode, mode);
2522 crtc_state->mode_changed = true;
2523 }
2524
2525 return 0;
2526}
2527
2528static int
2529nv50_outp_atomic_check(struct drm_encoder *encoder,
2530 struct drm_crtc_state *crtc_state,
2531 struct drm_connector_state *conn_state)
2532{
2533 struct nouveau_connector *nv_connector =
2534 nouveau_connector(conn_state->connector);
2535 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2536 nv_connector->native_mode);
2537}
2538
2539/******************************************************************************
2540 * DAC
2541 *****************************************************************************/
2542static void
2543nv50_dac_disable(struct drm_encoder *encoder)
2544{
2545 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2546 struct nv50_mast *mast = nv50_mast(encoder->dev);
2547 const int or = nv_encoder->or;
2548 u32 *push;
2549
2550 if (nv_encoder->crtc) {
2551 push = evo_wait(mast, 4);
2552 if (push) {
2553 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2554 evo_mthd(push, 0x0400 + (or * 0x080), 1);
2555 evo_data(push, 0x00000000);
2556 } else {
2557 evo_mthd(push, 0x0180 + (or * 0x020), 1);
2558 evo_data(push, 0x00000000);
2559 }
2560 evo_kick(push, mast);
2561 }
2562 }
2563
2564 nv_encoder->crtc = NULL;
2565 nv50_outp_release(nv_encoder);
2566}
2567
2568static void
2569nv50_dac_enable(struct drm_encoder *encoder)
2570{
2571 struct nv50_mast *mast = nv50_mast(encoder->dev);
2572 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2573 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2574 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
2575 u32 *push;
2576
2577 nv50_outp_acquire(nv_encoder);
2578
2579 push = evo_wait(mast, 8);
2580 if (push) {
2581 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2582 u32 syncs = 0x00000000;
2583
2584 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2585 syncs |= 0x00000001;
2586 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2587 syncs |= 0x00000002;
2588
2589 evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
2590 evo_data(push, 1 << nv_crtc->index);
2591 evo_data(push, syncs);
2592 } else {
2593 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
2594 u32 syncs = 0x00000001;
2595
2596 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2597 syncs |= 0x00000008;
2598 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2599 syncs |= 0x00000010;
2600
2601 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2602 magic |= 0x00000001;
2603
2604 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
2605 evo_data(push, syncs);
2606 evo_data(push, magic);
2607 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
2608 evo_data(push, 1 << nv_crtc->index);
2609 }
2610
2611 evo_kick(push, mast);
2612 }
2613
2614 nv_encoder->crtc = encoder->crtc;
2615}
2616
2617static enum drm_connector_status
2618nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2619{
2620 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2621 struct nv50_disp *disp = nv50_disp(encoder->dev);
2622 struct {
2623 struct nv50_disp_mthd_v1 base;
2624 struct nv50_disp_dac_load_v0 load;
2625 } args = {
2626 .base.version = 1,
2627 .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
2628 .base.hasht = nv_encoder->dcb->hasht,
2629 .base.hashm = nv_encoder->dcb->hashm,
2630 };
2631 int ret;
2632
2633 args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
2634 if (args.load.data == 0)
2635 args.load.data = 340;
2636
2637 ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
2638 if (ret || !args.load.load)
2639 return connector_status_disconnected;
2640
2641 return connector_status_connected;
2642}
2643
2644static const struct drm_encoder_helper_funcs
2645nv50_dac_help = {
2646 .atomic_check = nv50_outp_atomic_check,
2647 .enable = nv50_dac_enable,
2648 .disable = nv50_dac_disable,
2649 .detect = nv50_dac_detect
2650};
2651
2652static void
2653nv50_dac_destroy(struct drm_encoder *encoder)
2654{
2655 drm_encoder_cleanup(encoder);
2656 kfree(encoder);
2657}
2658
2659static const struct drm_encoder_funcs
2660nv50_dac_func = {
2661 .destroy = nv50_dac_destroy,
2662};
2663
2664static int
2665nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
2666{
2667 struct nouveau_drm *drm = nouveau_drm(connector->dev);
2668 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
2669 struct nvkm_i2c_bus *bus;
2670 struct nouveau_encoder *nv_encoder;
2671 struct drm_encoder *encoder;
2672 int type = DRM_MODE_ENCODER_DAC;
2673
2674 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
2675 if (!nv_encoder)
2676 return -ENOMEM;
2677 nv_encoder->dcb = dcbe;
2678
2679 bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
2680 if (bus)
2681 nv_encoder->i2c = &bus->i2c;
2682
2683 encoder = to_drm_encoder(nv_encoder);
2684 encoder->possible_crtcs = dcbe->heads;
2685 encoder->possible_clones = 0;
2686 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
2687 "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
2688 drm_encoder_helper_add(encoder, &nv50_dac_help);
2689
2690 drm_mode_connector_attach_encoder(connector, encoder);
2691 return 0;
2692}
2693
2694/******************************************************************************
2695 * Audio
2696 *****************************************************************************/
2697static void
2698nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2699{
2700 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2701 struct nv50_disp *disp = nv50_disp(encoder->dev);
2702 struct {
2703 struct nv50_disp_mthd_v1 base;
2704 struct nv50_disp_sor_hda_eld_v0 eld;
2705 } args = {
2706 .base.version = 1,
2707 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2708 .base.hasht = nv_encoder->dcb->hasht,
2709 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2710 (0x0100 << nv_crtc->index),
2711 };
2712
2713 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2714}
2715
2716static void
2717nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2718{
2719 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2720 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2721 struct nouveau_connector *nv_connector;
2722 struct nv50_disp *disp = nv50_disp(encoder->dev);
2723 struct __packed {
2724 struct {
2725 struct nv50_disp_mthd_v1 mthd;
2726 struct nv50_disp_sor_hda_eld_v0 eld;
2727 } base;
2728 u8 data[sizeof(nv_connector->base.eld)];
2729 } args = {
2730 .base.mthd.version = 1,
2731 .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2732 .base.mthd.hasht = nv_encoder->dcb->hasht,
2733 .base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2734 (0x0100 << nv_crtc->index),
2735 };
2736
2737 nv_connector = nouveau_encoder_connector_get(nv_encoder);
2738 if (!drm_detect_monitor_audio(nv_connector->edid))
2739 return;
2740
2741 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
2742
2743 nvif_mthd(disp->disp, 0, &args,
2744 sizeof(args.base) + drm_eld_size(args.data));
2745}
2746
2747/******************************************************************************
2748 * HDMI
2749 *****************************************************************************/
2750static void
2751nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2752{
2753 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2754 struct nv50_disp *disp = nv50_disp(encoder->dev);
2755 struct {
2756 struct nv50_disp_mthd_v1 base;
2757 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2758 } args = {
2759 .base.version = 1,
2760 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2761 .base.hasht = nv_encoder->dcb->hasht,
2762 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2763 (0x0100 << nv_crtc->index),
2764 };
2765
2766 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2767}
2768
2769static void
2770nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2771{
2772 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2773 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2774 struct nv50_disp *disp = nv50_disp(encoder->dev);
2775 struct {
2776 struct nv50_disp_mthd_v1 base;
2777 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2778 u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
2779 } args = {
2780 .base.version = 1,
2781 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2782 .base.hasht = nv_encoder->dcb->hasht,
2783 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2784 (0x0100 << nv_crtc->index),
2785 .pwr.state = 1,
2786 .pwr.rekey = 56, /* binary driver, and tegra, constant */
2787 };
2788 struct nouveau_connector *nv_connector;
2789 u32 max_ac_packet;
2790 union hdmi_infoframe avi_frame;
2791 union hdmi_infoframe vendor_frame;
2792 int ret;
2793 int size;
2794
2795 nv_connector = nouveau_encoder_connector_get(nv_encoder);
2796 if (!drm_detect_hdmi_monitor(nv_connector->edid))
2797 return;
2798
2799 ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
2800 false);
2801 if (!ret) {
2802 /* We have an AVI InfoFrame, populate it to the display */
2803 args.pwr.avi_infoframe_length
2804 = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
2805 }
2806
2807 ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
2808 &nv_connector->base, mode);
2809 if (!ret) {
2810 /* We have a Vendor InfoFrame, populate it to the display */
2811 args.pwr.vendor_infoframe_length
2812 = hdmi_infoframe_pack(&vendor_frame,
2813 args.infoframes
2814 + args.pwr.avi_infoframe_length,
2815 17);
2816 }
2817
2818 max_ac_packet = mode->htotal - mode->hdisplay;
2819 max_ac_packet -= args.pwr.rekey;
2820 max_ac_packet -= 18; /* constant from tegra */
2821 args.pwr.max_ac_packet = max_ac_packet / 32;
2822
2823 size = sizeof(args.base)
2824 + sizeof(args.pwr)
2825 + args.pwr.avi_infoframe_length
2826 + args.pwr.vendor_infoframe_length;
2827 nvif_mthd(disp->disp, 0, &args, size);
2828 nv50_audio_enable(encoder, mode);
2829}
2830
2831/******************************************************************************
2832 * MST
2833 *****************************************************************************/
2834#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
2835#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
2836#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
2837
2838struct nv50_mstm {
2839 struct nouveau_encoder *outp;
2840
2841 struct drm_dp_mst_topology_mgr mgr;
2842 struct nv50_msto *msto[4];
2843
2844 bool modified;
2845 bool disabled;
2846 int links;
2847};
2848
2849struct nv50_mstc {
2850 struct nv50_mstm *mstm;
2851 struct drm_dp_mst_port *port;
2852 struct drm_connector connector;
2853
2854 struct drm_display_mode *native;
2855 struct edid *edid;
2856
2857 int pbn;
2858};
2859
2860struct nv50_msto {
2861 struct drm_encoder encoder;
2862
2863 struct nv50_head *head;
2864 struct nv50_mstc *mstc;
2865 bool disabled;
2866};
2867
2868static struct drm_dp_payload *
2869nv50_msto_payload(struct nv50_msto *msto)
2870{
2871 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2872 struct nv50_mstc *mstc = msto->mstc;
2873 struct nv50_mstm *mstm = mstc->mstm;
2874 int vcpi = mstc->port->vcpi.vcpi, i;
2875
2876 NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
2877 for (i = 0; i < mstm->mgr.max_payloads; i++) {
2878 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2879 NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
2880 mstm->outp->base.base.name, i, payload->vcpi,
2881 payload->start_slot, payload->num_slots);
2882 }
2883
2884 for (i = 0; i < mstm->mgr.max_payloads; i++) {
2885 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2886 if (payload->vcpi == vcpi)
2887 return payload;
2888 }
2889
2890 return NULL;
2891}
2892
2893static void
2894nv50_msto_cleanup(struct nv50_msto *msto)
2895{
2896 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2897 struct nv50_mstc *mstc = msto->mstc;
2898 struct nv50_mstm *mstm = mstc->mstm;
2899
2900 NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
2901 if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
2902 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
2903 if (msto->disabled) {
2904 msto->mstc = NULL;
2905 msto->head = NULL;
2906 msto->disabled = false;
2907 }
2908}
2909
2910static void
2911nv50_msto_prepare(struct nv50_msto *msto)
2912{
2913 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2914 struct nv50_mstc *mstc = msto->mstc;
2915 struct nv50_mstm *mstm = mstc->mstm;
2916 struct {
2917 struct nv50_disp_mthd_v1 base;
2918 struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
2919 } args = {
2920 .base.version = 1,
2921 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
2922 .base.hasht = mstm->outp->dcb->hasht,
2923 .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
2924 (0x0100 << msto->head->base.index),
2925 };
2926
2927 NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
2928 if (mstc->port && mstc->port->vcpi.vcpi > 0) {
2929 struct drm_dp_payload *payload = nv50_msto_payload(msto);
2930 if (payload) {
2931 args.vcpi.start_slot = payload->start_slot;
2932 args.vcpi.num_slots = payload->num_slots;
2933 args.vcpi.pbn = mstc->port->vcpi.pbn;
2934 args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
2935 }
2936 }
2937
2938 NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
2939 msto->encoder.name, msto->head->base.base.name,
2940 args.vcpi.start_slot, args.vcpi.num_slots,
2941 args.vcpi.pbn, args.vcpi.aligned_pbn);
2942 nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
2943}
2944
2945static int
2946nv50_msto_atomic_check(struct drm_encoder *encoder,
2947 struct drm_crtc_state *crtc_state,
2948 struct drm_connector_state *conn_state)
2949{
2950 struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
2951 struct nv50_mstm *mstm = mstc->mstm;
2952 int bpp = conn_state->connector->display_info.bpc * 3;
2953 int slots;
2954
2955 mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
2956
2957 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
2958 if (slots < 0)
2959 return slots;
2960
2961 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2962 mstc->native);
2963}
2964
2965static void
2966nv50_msto_enable(struct drm_encoder *encoder)
2967{
2968 struct nv50_head *head = nv50_head(encoder->crtc);
2969 struct nv50_msto *msto = nv50_msto(encoder);
2970 struct nv50_mstc *mstc = NULL;
2971 struct nv50_mstm *mstm = NULL;
2972 struct drm_connector *connector;
2973 struct drm_connector_list_iter conn_iter;
2974 u8 proto, depth;
2975 int slots;
2976 bool r;
2977
2978 drm_connector_list_iter_begin(encoder->dev, &conn_iter);
2979 drm_for_each_connector_iter(connector, &conn_iter) {
2980 if (connector->state->best_encoder == &msto->encoder) {
2981 mstc = nv50_mstc(connector);
2982 mstm = mstc->mstm;
2983 break;
2984 }
2985 }
2986 drm_connector_list_iter_end(&conn_iter);
2987
2988 if (WARN_ON(!mstc))
2989 return;
2990
2991 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
2992 r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
2993 WARN_ON(!r);
2994
2995 if (!mstm->links++)
2996 nv50_outp_acquire(mstm->outp);
2997
2998 if (mstm->outp->link & 1)
2999 proto = 0x8;
3000 else
3001 proto = 0x9;
3002
3003 switch (mstc->connector.display_info.bpc) {
3004 case 6: depth = 0x2; break;
3005 case 8: depth = 0x5; break;
3006 case 10:
3007 default: depth = 0x6; break;
3008 }
3009
3010 mstm->outp->update(mstm->outp, head->base.index,
3011 &head->base.base.state->adjusted_mode, proto, depth);
3012
3013 msto->head = head;
3014 msto->mstc = mstc;
3015 mstm->modified = true;
3016}
3017
3018static void
3019nv50_msto_disable(struct drm_encoder *encoder)
3020{
3021 struct nv50_msto *msto = nv50_msto(encoder);
3022 struct nv50_mstc *mstc = msto->mstc;
3023 struct nv50_mstm *mstm = mstc->mstm;
3024
3025 if (mstc->port)
3026 drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
3027
3028 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
3029 mstm->modified = true;
3030 if (!--mstm->links)
3031 mstm->disabled = true;
3032 msto->disabled = true;
3033}
3034
3035static const struct drm_encoder_helper_funcs
3036nv50_msto_help = {
3037 .disable = nv50_msto_disable,
3038 .enable = nv50_msto_enable,
3039 .atomic_check = nv50_msto_atomic_check,
3040};
3041
3042static void
3043nv50_msto_destroy(struct drm_encoder *encoder)
3044{
3045 struct nv50_msto *msto = nv50_msto(encoder);
3046 drm_encoder_cleanup(&msto->encoder);
3047 kfree(msto);
3048}
3049
3050static const struct drm_encoder_funcs
3051nv50_msto = {
3052 .destroy = nv50_msto_destroy,
3053};
3054
3055static int
3056nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
3057 struct nv50_msto **pmsto)
3058{
3059 struct nv50_msto *msto;
3060 int ret;
3061
3062 if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
3063 return -ENOMEM;
3064
3065 ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
3066 DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
3067 if (ret) {
3068 kfree(*pmsto);
3069 *pmsto = NULL;
3070 return ret;
3071 }
3072
3073 drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
3074 msto->encoder.possible_crtcs = heads;
3075 return 0;
3076}
3077
3078static struct drm_encoder *
3079nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
3080 struct drm_connector_state *connector_state)
3081{
3082 struct nv50_head *head = nv50_head(connector_state->crtc);
3083 struct nv50_mstc *mstc = nv50_mstc(connector);
3084 if (mstc->port) {
3085 struct nv50_mstm *mstm = mstc->mstm;
3086 return &mstm->msto[head->base.index]->encoder;
3087 }
3088 return NULL;
3089}
3090
3091static struct drm_encoder *
3092nv50_mstc_best_encoder(struct drm_connector *connector)
3093{
3094 struct nv50_mstc *mstc = nv50_mstc(connector);
3095 if (mstc->port) {
3096 struct nv50_mstm *mstm = mstc->mstm;
3097 return &mstm->msto[0]->encoder;
3098 }
3099 return NULL;
3100}
3101
3102static enum drm_mode_status
3103nv50_mstc_mode_valid(struct drm_connector *connector,
3104 struct drm_display_mode *mode)
3105{
3106 return MODE_OK;
3107}
3108
3109static int
3110nv50_mstc_get_modes(struct drm_connector *connector)
3111{
3112 struct nv50_mstc *mstc = nv50_mstc(connector);
3113 int ret = 0;
3114
3115 mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
3116 drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
3117 if (mstc->edid)
3118 ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
3119
3120 if (!mstc->connector.display_info.bpc)
3121 mstc->connector.display_info.bpc = 8;
3122
3123 if (mstc->native)
3124 drm_mode_destroy(mstc->connector.dev, mstc->native);
3125 mstc->native = nouveau_conn_native_mode(&mstc->connector);
3126 return ret;
3127}
3128
3129static const struct drm_connector_helper_funcs
3130nv50_mstc_help = {
3131 .get_modes = nv50_mstc_get_modes,
3132 .mode_valid = nv50_mstc_mode_valid,
3133 .best_encoder = nv50_mstc_best_encoder,
3134 .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
3135};
3136
3137static enum drm_connector_status
3138nv50_mstc_detect(struct drm_connector *connector, bool force)
3139{
3140 struct nv50_mstc *mstc = nv50_mstc(connector);
3141 if (!mstc->port)
3142 return connector_status_disconnected;
3143 return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
3144}
3145
3146static void
3147nv50_mstc_destroy(struct drm_connector *connector)
3148{
3149 struct nv50_mstc *mstc = nv50_mstc(connector);
3150 drm_connector_cleanup(&mstc->connector);
3151 kfree(mstc);
3152}
3153
3154static const struct drm_connector_funcs
3155nv50_mstc = {
3156 .reset = nouveau_conn_reset,
3157 .detect = nv50_mstc_detect,
3158 .fill_modes = drm_helper_probe_single_connector_modes,
3159 .destroy = nv50_mstc_destroy,
3160 .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
3161 .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
3162 .atomic_set_property = nouveau_conn_atomic_set_property,
3163 .atomic_get_property = nouveau_conn_atomic_get_property,
3164};
3165
3166static int
3167nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
3168 const char *path, struct nv50_mstc **pmstc)
3169{
3170 struct drm_device *dev = mstm->outp->base.base.dev;
3171 struct nv50_mstc *mstc;
3172 int ret, i;
3173
3174 if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
3175 return -ENOMEM;
3176 mstc->mstm = mstm;
3177 mstc->port = port;
3178
3179 ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
3180 DRM_MODE_CONNECTOR_DisplayPort);
3181 if (ret) {
3182 kfree(*pmstc);
3183 *pmstc = NULL;
3184 return ret;
3185 }
3186
3187 drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
3188
3189 mstc->connector.funcs->reset(&mstc->connector);
3190 nouveau_conn_attach_properties(&mstc->connector);
3191
3192 for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
3193 drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
3194
3195 drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
3196 drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
3197 drm_mode_connector_set_path_property(&mstc->connector, path);
3198 return 0;
3199}
3200
3201static void
3202nv50_mstm_cleanup(struct nv50_mstm *mstm)
3203{
3204 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3205 struct drm_encoder *encoder;
3206 int ret;
3207
3208 NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
3209 ret = drm_dp_check_act_status(&mstm->mgr);
3210
3211 ret = drm_dp_update_payload_part2(&mstm->mgr);
3212
3213 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3214 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3215 struct nv50_msto *msto = nv50_msto(encoder);
3216 struct nv50_mstc *mstc = msto->mstc;
3217 if (mstc && mstc->mstm == mstm)
3218 nv50_msto_cleanup(msto);
3219 }
3220 }
3221
3222 mstm->modified = false;
3223}
3224
3225static void
3226nv50_mstm_prepare(struct nv50_mstm *mstm)
3227{
3228 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3229 struct drm_encoder *encoder;
3230 int ret;
3231
3232 NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
3233 ret = drm_dp_update_payload_part1(&mstm->mgr);
3234
3235 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3236 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3237 struct nv50_msto *msto = nv50_msto(encoder);
3238 struct nv50_mstc *mstc = msto->mstc;
3239 if (mstc && mstc->mstm == mstm)
3240 nv50_msto_prepare(msto);
3241 }
3242 }
3243
3244 if (mstm->disabled) {
3245 if (!mstm->links)
3246 nv50_outp_release(mstm->outp);
3247 mstm->disabled = false;
3248 }
3249}
3250
3251static void
3252nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
3253{
3254 struct nv50_mstm *mstm = nv50_mstm(mgr);
3255 drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
3256}
3257
3258static void
3259nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
3260 struct drm_connector *connector)
3261{
3262 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3263 struct nv50_mstc *mstc = nv50_mstc(connector);
3264
3265 drm_connector_unregister(&mstc->connector);
3266
3267 drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
3268
3269 drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
3270 mstc->port = NULL;
3271 drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
3272
3273 drm_connector_unreference(&mstc->connector);
3274}
3275
3276static void
3277nv50_mstm_register_connector(struct drm_connector *connector)
3278{
3279 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3280
3281 drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
3282
3283 drm_connector_register(connector);
3284}
3285
3286static struct drm_connector *
3287nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
3288 struct drm_dp_mst_port *port, const char *path)
3289{
3290 struct nv50_mstm *mstm = nv50_mstm(mgr);
3291 struct nv50_mstc *mstc;
3292 int ret;
3293
3294 ret = nv50_mstc_new(mstm, port, path, &mstc);
3295 if (ret) {
3296 if (mstc)
3297 mstc->connector.funcs->destroy(&mstc->connector);
3298 return NULL;
3299 }
3300
3301 return &mstc->connector;
3302}
3303
3304static const struct drm_dp_mst_topology_cbs
3305nv50_mstm = {
3306 .add_connector = nv50_mstm_add_connector,
3307 .register_connector = nv50_mstm_register_connector,
3308 .destroy_connector = nv50_mstm_destroy_connector,
3309 .hotplug = nv50_mstm_hotplug,
3310};
3311
3312void
3313nv50_mstm_service(struct nv50_mstm *mstm)
3314{
3315 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3316 bool handled = true;
3317 int ret;
3318 u8 esi[8] = {};
3319
3320 if (!aux)
3321 return;
3322
3323 while (handled) {
3324 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3325 if (ret != 8) {
3326 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3327 return;
3328 }
3329
3330 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
3331 if (!handled)
3332 break;
3333
3334 drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
3335 }
3336}
3337
3338void
3339nv50_mstm_remove(struct nv50_mstm *mstm)
3340{
3341 if (mstm)
3342 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3343}
3344
3345static int
3346nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
3347{
3348 struct nouveau_encoder *outp = mstm->outp;
3349 struct {
3350 struct nv50_disp_mthd_v1 base;
3351 struct nv50_disp_sor_dp_mst_link_v0 mst;
3352 } args = {
3353 .base.version = 1,
3354 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
3355 .base.hasht = outp->dcb->hasht,
3356 .base.hashm = outp->dcb->hashm,
3357 .mst.state = state,
3358 };
3359 struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
3360 struct nvif_object *disp = &drm->display->disp;
3361 int ret;
3362
3363 if (dpcd >= 0x12) {
3364 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
3365 if (ret < 0)
3366 return ret;
3367
3368 dpcd &= ~DP_MST_EN;
3369 if (state)
3370 dpcd |= DP_MST_EN;
3371
3372 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
3373 if (ret < 0)
3374 return ret;
3375 }
3376
3377 return nvif_mthd(disp, 0, &args, sizeof(args));
3378}
3379
3380int
3381nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
3382{
3383 int ret, state = 0;
3384
3385 if (!mstm)
3386 return 0;
3387
3388 if (dpcd[0] >= 0x12) {
3389 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
3390 if (ret < 0)
3391 return ret;
3392
3393 if (!(dpcd[1] & DP_MST_CAP))
3394 dpcd[0] = 0x11;
3395 else
3396 state = allow;
3397 }
3398
3399 ret = nv50_mstm_enable(mstm, dpcd[0], state);
3400 if (ret)
3401 return ret;
3402
3403 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
3404 if (ret)
3405 return nv50_mstm_enable(mstm, dpcd[0], 0);
3406
3407 return mstm->mgr.mst_state;
3408}
3409
3410static void
3411nv50_mstm_fini(struct nv50_mstm *mstm)
3412{
3413 if (mstm && mstm->mgr.mst_state)
3414 drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
3415}
3416
3417static void
3418nv50_mstm_init(struct nv50_mstm *mstm)
3419{
3420 if (mstm && mstm->mgr.mst_state)
3421 drm_dp_mst_topology_mgr_resume(&mstm->mgr);
3422}
3423
3424static void
3425nv50_mstm_del(struct nv50_mstm **pmstm)
3426{
3427 struct nv50_mstm *mstm = *pmstm;
3428 if (mstm) {
3429 kfree(*pmstm);
3430 *pmstm = NULL;
3431 }
3432}
3433
3434static int
3435nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
3436 int conn_base_id, struct nv50_mstm **pmstm)
3437{
3438 const int max_payloads = hweight8(outp->dcb->heads);
3439 struct drm_device *dev = outp->base.base.dev;
3440 struct nv50_mstm *mstm;
3441 int ret, i;
3442 u8 dpcd;
3443
3444 /* This is a workaround for some monitors not functioning
3445 * correctly in MST mode on initial module load. I think
3446 * some bad interaction with the VBIOS may be responsible.
3447 *
3448 * A good ol' off and on again seems to work here ;)
3449 */
3450 ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
3451 if (ret >= 0 && dpcd >= 0x12)
3452 drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
3453
3454 if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
3455 return -ENOMEM;
3456 mstm->outp = outp;
3457 mstm->mgr.cbs = &nv50_mstm;
3458
3459 ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
3460 max_payloads, conn_base_id);
3461 if (ret)
3462 return ret;
3463
3464 for (i = 0; i < max_payloads; i++) {
3465 ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
3466 i, &mstm->msto[i]);
3467 if (ret)
3468 return ret;
3469 }
3470
3471 return 0;
3472}
3473
3474/******************************************************************************
3475 * SOR
3476 *****************************************************************************/
3477static void
3478nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
3479 struct drm_display_mode *mode, u8 proto, u8 depth)
3480{
3481 struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
3482 u32 *push;
3483
3484 if (!mode) {
3485 nv_encoder->ctrl &= ~BIT(head);
3486 if (!(nv_encoder->ctrl & 0x0000000f))
3487 nv_encoder->ctrl = 0;
3488 } else {
3489 nv_encoder->ctrl |= proto << 8;
3490 nv_encoder->ctrl |= BIT(head);
3491 }
3492
3493 if ((push = evo_wait(core, 6))) {
3494 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
3495 if (mode) {
3496 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3497 nv_encoder->ctrl |= 0x00001000;
3498 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3499 nv_encoder->ctrl |= 0x00002000;
3500 nv_encoder->ctrl |= depth << 16;
3501 }
3502 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
3503 } else {
3504 if (mode) {
3505 u32 magic = 0x31ec6000 | (head << 25);
3506 u32 syncs = 0x00000001;
3507 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3508 syncs |= 0x00000008;
3509 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3510 syncs |= 0x00000010;
3511 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
3512 magic |= 0x00000001;
3513
3514 evo_mthd(push, 0x0404 + (head * 0x300), 2);
3515 evo_data(push, syncs | (depth << 6));
3516 evo_data(push, magic);
3517 }
3518 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
3519 }
3520 evo_data(push, nv_encoder->ctrl);
3521 evo_kick(push, core);
3522 }
3523}
3524
3525static void
3526nv50_sor_disable(struct drm_encoder *encoder)
3527{
3528 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3529 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
3530
3531 nv_encoder->crtc = NULL;
3532
3533 if (nv_crtc) {
3534 struct nvkm_i2c_aux *aux = nv_encoder->aux;
3535 u8 pwr;
3536
3537 if (aux) {
3538 int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
3539 if (ret == 0) {
3540 pwr &= ~DP_SET_POWER_MASK;
3541 pwr |= DP_SET_POWER_D3;
3542 nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
3543 }
3544 }
3545
3546 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
3547 nv50_audio_disable(encoder, nv_crtc);
3548 nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
3549 nv50_outp_release(nv_encoder);
3550 }
3551}
3552
3553static void
3554nv50_sor_enable(struct drm_encoder *encoder)
3555{
3556 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3557 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3558 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3559 struct {
3560 struct nv50_disp_mthd_v1 base;
3561 struct nv50_disp_sor_lvds_script_v0 lvds;
3562 } lvds = {
3563 .base.version = 1,
3564 .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
3565 .base.hasht = nv_encoder->dcb->hasht,
3566 .base.hashm = nv_encoder->dcb->hashm,
3567 };
3568 struct nv50_disp *disp = nv50_disp(encoder->dev);
3569 struct drm_device *dev = encoder->dev;
3570 struct nouveau_drm *drm = nouveau_drm(dev);
3571 struct nouveau_connector *nv_connector;
3572 struct nvbios *bios = &drm->vbios;
3573 u8 proto = 0xf;
3574 u8 depth = 0x0;
3575
3576 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3577 nv_encoder->crtc = encoder->crtc;
3578 nv50_outp_acquire(nv_encoder);
3579
3580 switch (nv_encoder->dcb->type) {
3581 case DCB_OUTPUT_TMDS:
3582 if (nv_encoder->link & 1) {
3583 proto = 0x1;
3584 /* Only enable dual-link if:
3585 * - Need to (i.e. rate > 165MHz)
3586 * - DCB says we can
3587 * - Not an HDMI monitor, since there's no dual-link
3588 * on HDMI.
3589 */
3590 if (mode->clock >= 165000 &&
3591 nv_encoder->dcb->duallink_possible &&
3592 !drm_detect_hdmi_monitor(nv_connector->edid))
3593 proto |= 0x4;
3594 } else {
3595 proto = 0x2;
3596 }
3597
3598 nv50_hdmi_enable(&nv_encoder->base.base, mode);
3599 break;
3600 case DCB_OUTPUT_LVDS:
3601 proto = 0x0;
3602
3603 if (bios->fp_no_ddc) {
3604 if (bios->fp.dual_link)
3605 lvds.lvds.script |= 0x0100;
3606 if (bios->fp.if_is_24bit)
3607 lvds.lvds.script |= 0x0200;
3608 } else {
3609 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
3610 if (((u8 *)nv_connector->edid)[121] == 2)
3611 lvds.lvds.script |= 0x0100;
3612 } else
3613 if (mode->clock >= bios->fp.duallink_transition_clk) {
3614 lvds.lvds.script |= 0x0100;
3615 }
3616
3617 if (lvds.lvds.script & 0x0100) {
3618 if (bios->fp.strapless_is_24bit & 2)
3619 lvds.lvds.script |= 0x0200;
3620 } else {
3621 if (bios->fp.strapless_is_24bit & 1)
3622 lvds.lvds.script |= 0x0200;
3623 }
3624
3625 if (nv_connector->base.display_info.bpc == 8)
3626 lvds.lvds.script |= 0x0200;
3627 }
3628
3629 nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
3630 break;
3631 case DCB_OUTPUT_DP:
3632 if (nv_connector->base.display_info.bpc == 6)
3633 depth = 0x2;
3634 else
3635 if (nv_connector->base.display_info.bpc == 8)
3636 depth = 0x5;
3637 else
3638 depth = 0x6;
3639
3640 if (nv_encoder->link & 1)
3641 proto = 0x8;
3642 else
3643 proto = 0x9;
3644
3645 nv50_audio_enable(encoder, mode);
3646 break;
3647 default:
3648 BUG();
3649 break;
3650 }
3651
3652 nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
3653}
3654
3655static const struct drm_encoder_helper_funcs
3656nv50_sor_help = {
3657 .atomic_check = nv50_outp_atomic_check,
3658 .enable = nv50_sor_enable,
3659 .disable = nv50_sor_disable,
3660};
3661
3662static void
3663nv50_sor_destroy(struct drm_encoder *encoder)
3664{
3665 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3666 nv50_mstm_del(&nv_encoder->dp.mstm);
3667 drm_encoder_cleanup(encoder);
3668 kfree(encoder);
3669}
3670
3671static const struct drm_encoder_funcs
3672nv50_sor_func = {
3673 .destroy = nv50_sor_destroy,
3674};
3675
3676static int
3677nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3678{
3679 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3680 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3681 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3682 struct nouveau_encoder *nv_encoder;
3683 struct drm_encoder *encoder;
3684 int type, ret;
3685
3686 switch (dcbe->type) {
3687 case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
3688 case DCB_OUTPUT_TMDS:
3689 case DCB_OUTPUT_DP:
3690 default:
3691 type = DRM_MODE_ENCODER_TMDS;
3692 break;
3693 }
3694
3695 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3696 if (!nv_encoder)
3697 return -ENOMEM;
3698 nv_encoder->dcb = dcbe;
3699 nv_encoder->update = nv50_sor_update;
3700
3701 encoder = to_drm_encoder(nv_encoder);
3702 encoder->possible_crtcs = dcbe->heads;
3703 encoder->possible_clones = 0;
3704 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
3705 "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
3706 drm_encoder_helper_add(encoder, &nv50_sor_help);
3707
3708 drm_mode_connector_attach_encoder(connector, encoder);
3709
3710 if (dcbe->type == DCB_OUTPUT_DP) {
3711 struct nv50_disp *disp = nv50_disp(encoder->dev);
3712 struct nvkm_i2c_aux *aux =
3713 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3714 if (aux) {
3715 if (disp->disp->oclass < GF110_DISP) {
3716 /* HW has no support for address-only
3717 * transactions, so we're required to
3718 * use custom I2C-over-AUX code.
3719 */
3720 nv_encoder->i2c = &aux->i2c;
3721 } else {
3722 nv_encoder->i2c = &nv_connector->aux.ddc;
3723 }
3724 nv_encoder->aux = aux;
3725 }
3726
3727 /*TODO: Use DP Info Table to check for support. */
3728 if (disp->disp->oclass >= GF110_DISP) {
3729 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
3730 nv_connector->base.base.id,
3731 &nv_encoder->dp.mstm);
3732 if (ret)
3733 return ret;
3734 }
3735 } else {
3736 struct nvkm_i2c_bus *bus =
3737 nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
3738 if (bus)
3739 nv_encoder->i2c = &bus->i2c;
3740 }
3741
3742 return 0;
3743}
3744
3745/******************************************************************************
3746 * PIOR
3747 *****************************************************************************/
3748static int
3749nv50_pior_atomic_check(struct drm_encoder *encoder,
3750 struct drm_crtc_state *crtc_state,
3751 struct drm_connector_state *conn_state)
3752{
3753 int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
3754 if (ret)
3755 return ret;
3756 crtc_state->adjusted_mode.clock *= 2;
3757 return 0;
3758}
3759
3760static void
3761nv50_pior_disable(struct drm_encoder *encoder)
3762{
3763 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3764 struct nv50_mast *mast = nv50_mast(encoder->dev);
3765 const int or = nv_encoder->or;
3766 u32 *push;
3767
3768 if (nv_encoder->crtc) {
3769 push = evo_wait(mast, 4);
3770 if (push) {
3771 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3772 evo_mthd(push, 0x0700 + (or * 0x040), 1);
3773 evo_data(push, 0x00000000);
3774 }
3775 evo_kick(push, mast);
3776 }
3777 }
3778
3779 nv_encoder->crtc = NULL;
3780 nv50_outp_release(nv_encoder);
3781}
3782
3783static void
3784nv50_pior_enable(struct drm_encoder *encoder)
3785{
3786 struct nv50_mast *mast = nv50_mast(encoder->dev);
3787 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3788 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3789 struct nouveau_connector *nv_connector;
3790 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3791 u8 owner = 1 << nv_crtc->index;
3792 u8 proto, depth;
3793 u32 *push;
3794
3795 nv50_outp_acquire(nv_encoder);
3796
3797 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3798 switch (nv_connector->base.display_info.bpc) {
3799 case 10: depth = 0x6; break;
3800 case 8: depth = 0x5; break;
3801 case 6: depth = 0x2; break;
3802 default: depth = 0x0; break;
3803 }
3804
3805 switch (nv_encoder->dcb->type) {
3806 case DCB_OUTPUT_TMDS:
3807 case DCB_OUTPUT_DP:
3808 proto = 0x0;
3809 break;
3810 default:
3811 BUG();
3812 break;
3813 }
3814
3815 push = evo_wait(mast, 8);
3816 if (push) {
3817 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3818 u32 ctrl = (depth << 16) | (proto << 8) | owner;
3819 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3820 ctrl |= 0x00001000;
3821 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3822 ctrl |= 0x00002000;
3823 evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
3824 evo_data(push, ctrl);
3825 }
3826
3827 evo_kick(push, mast);
3828 }
3829
3830 nv_encoder->crtc = encoder->crtc;
3831}
3832
3833static const struct drm_encoder_helper_funcs
3834nv50_pior_help = {
3835 .atomic_check = nv50_pior_atomic_check,
3836 .enable = nv50_pior_enable,
3837 .disable = nv50_pior_disable,
3838};
3839
3840static void
3841nv50_pior_destroy(struct drm_encoder *encoder)
3842{
3843 drm_encoder_cleanup(encoder);
3844 kfree(encoder);
3845}
3846
3847static const struct drm_encoder_funcs
3848nv50_pior_func = {
3849 .destroy = nv50_pior_destroy,
3850};
3851
3852static int
3853nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3854{
3855 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3856 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3857 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3858 struct nvkm_i2c_bus *bus = NULL;
3859 struct nvkm_i2c_aux *aux = NULL;
3860 struct i2c_adapter *ddc;
3861 struct nouveau_encoder *nv_encoder;
3862 struct drm_encoder *encoder;
3863 int type;
3864
3865 switch (dcbe->type) {
3866 case DCB_OUTPUT_TMDS:
3867 bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
3868 ddc = bus ? &bus->i2c : NULL;
3869 type = DRM_MODE_ENCODER_TMDS;
3870 break;
3871 case DCB_OUTPUT_DP:
3872 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
3873 ddc = aux ? &nv_connector->aux.ddc : NULL;
3874 type = DRM_MODE_ENCODER_TMDS;
3875 break;
3876 default:
3877 return -ENODEV;
3878 }
3879
3880 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3881 if (!nv_encoder)
3882 return -ENOMEM;
3883 nv_encoder->dcb = dcbe;
3884 nv_encoder->i2c = ddc;
3885 nv_encoder->aux = aux;
3886
3887 encoder = to_drm_encoder(nv_encoder);
3888 encoder->possible_crtcs = dcbe->heads;
3889 encoder->possible_clones = 0;
3890 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
3891 "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
3892 drm_encoder_helper_add(encoder, &nv50_pior_help);
3893
3894 drm_mode_connector_attach_encoder(connector, encoder);
3895 return 0;
3896}
3897
3898/******************************************************************************
3899 * Atomic
3900 *****************************************************************************/
3901
3902static void
3903nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
3904{
3905 struct nv50_disp *disp = nv50_disp(drm->dev);
3906 struct nv50_dmac *core = &disp->mast.base;
3907 struct nv50_mstm *mstm;
3908 struct drm_encoder *encoder;
3909 u32 *push;
3910
3911 NV_ATOMIC(drm, "commit core %08x\n", interlock);
3912
3913 drm_for_each_encoder(encoder, drm->dev) {
3914 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3915 mstm = nouveau_encoder(encoder)->dp.mstm;
3916 if (mstm && mstm->modified)
3917 nv50_mstm_prepare(mstm);
3918 }
3919 }
3920
3921 if ((push = evo_wait(core, 5))) {
3922 evo_mthd(push, 0x0084, 1);
3923 evo_data(push, 0x80000000);
3924 evo_mthd(push, 0x0080, 2);
3925 evo_data(push, interlock);
3926 evo_data(push, 0x00000000);
3927 nouveau_bo_wr32(disp->sync, 0, 0x00000000);
3928 evo_kick(push, core);
3929 if (nvif_msec(&drm->client.device, 2000ULL,
3930 if (nouveau_bo_rd32(disp->sync, 0))
3931 break;
3932 usleep_range(1, 2);
3933 ) < 0)
3934 NV_ERROR(drm, "EVO timeout\n");
3935 }
3936
3937 drm_for_each_encoder(encoder, drm->dev) {
3938 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3939 mstm = nouveau_encoder(encoder)->dp.mstm;
3940 if (mstm && mstm->modified)
3941 nv50_mstm_cleanup(mstm);
3942 }
3943 }
3944}
3945
3946static void
3947nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
3948{
3949 struct drm_device *dev = state->dev;
3950 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
3951 struct drm_crtc *crtc;
3952 struct drm_plane_state *new_plane_state;
3953 struct drm_plane *plane;
3954 struct nouveau_drm *drm = nouveau_drm(dev);
3955 struct nv50_disp *disp = nv50_disp(dev);
3956 struct nv50_atom *atom = nv50_atom(state);
3957 struct nv50_outp_atom *outp, *outt;
3958 u32 interlock_core = 0;
3959 u32 interlock_chan = 0;
3960 int i;
3961
3962 NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
3963 drm_atomic_helper_wait_for_fences(dev, state, false);
3964 drm_atomic_helper_wait_for_dependencies(state);
3965 drm_atomic_helper_update_legacy_modeset_state(dev, state);
3966
3967 if (atom->lock_core)
3968 mutex_lock(&disp->mutex);
3969
3970 /* Disable head(s). */
3971 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3972 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
3973 struct nv50_head *head = nv50_head(crtc);
3974
3975 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
3976 asyh->clr.mask, asyh->set.mask);
3977 if (old_crtc_state->active && !new_crtc_state->active)
3978 drm_crtc_vblank_off(crtc);
3979
3980 if (asyh->clr.mask) {
3981 nv50_head_flush_clr(head, asyh, atom->flush_disable);
3982 interlock_core |= 1;
3983 }
3984 }
3985
3986 /* Disable plane(s). */
3987 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
3988 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
3989 struct nv50_wndw *wndw = nv50_wndw(plane);
3990
3991 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
3992 asyw->clr.mask, asyw->set.mask);
3993 if (!asyw->clr.mask)
3994 continue;
3995
3996 interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
3997 atom->flush_disable,
3998 asyw);
3999 }
4000
4001 /* Disable output path(s). */
4002 list_for_each_entry(outp, &atom->outp, head) {
4003 const struct drm_encoder_helper_funcs *help;
4004 struct drm_encoder *encoder;
4005
4006 encoder = outp->encoder;
4007 help = encoder->helper_private;
4008
4009 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
4010 outp->clr.mask, outp->set.mask);
4011
4012 if (outp->clr.mask) {
4013 help->disable(encoder);
4014 interlock_core |= 1;
4015 if (outp->flush_disable) {
4016 nv50_disp_atomic_commit_core(drm, interlock_chan);
4017 interlock_core = 0;
4018 interlock_chan = 0;
4019 }
4020 }
4021 }
4022
4023 /* Flush disable. */
4024 if (interlock_core) {
4025 if (atom->flush_disable) {
4026 nv50_disp_atomic_commit_core(drm, interlock_chan);
4027 interlock_core = 0;
4028 interlock_chan = 0;
4029 }
4030 }
4031
4032 /* Update output path(s). */
4033 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4034 const struct drm_encoder_helper_funcs *help;
4035 struct drm_encoder *encoder;
4036
4037 encoder = outp->encoder;
4038 help = encoder->helper_private;
4039
4040 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
4041 outp->set.mask, outp->clr.mask);
4042
4043 if (outp->set.mask) {
4044 help->enable(encoder);
4045 interlock_core = 1;
4046 }
4047
4048 list_del(&outp->head);
4049 kfree(outp);
4050 }
4051
4052 /* Update head(s). */
4053 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4054 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
4055 struct nv50_head *head = nv50_head(crtc);
4056
4057 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
4058 asyh->set.mask, asyh->clr.mask);
4059
4060 if (asyh->set.mask) {
4061 nv50_head_flush_set(head, asyh);
4062 interlock_core = 1;
4063 }
4064
4065 if (new_crtc_state->active) {
4066 if (!old_crtc_state->active)
4067 drm_crtc_vblank_on(crtc);
4068 if (new_crtc_state->event)
4069 drm_crtc_vblank_get(crtc);
4070 }
4071 }
4072
4073 /* Update plane(s). */
4074 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4075 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4076 struct nv50_wndw *wndw = nv50_wndw(plane);
4077
4078 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
4079 asyw->set.mask, asyw->clr.mask);
4080 if ( !asyw->set.mask &&
4081 (!asyw->clr.mask || atom->flush_disable))
4082 continue;
4083
4084 interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
4085 }
4086
4087 /* Flush update. */
4088 if (interlock_core) {
4089 if (!interlock_chan && atom->state.legacy_cursor_update) {
4090 u32 *push = evo_wait(&disp->mast, 2);
4091 if (push) {
4092 evo_mthd(push, 0x0080, 1);
4093 evo_data(push, 0x00000000);
4094 evo_kick(push, &disp->mast);
4095 }
4096 } else {
4097 nv50_disp_atomic_commit_core(drm, interlock_chan);
4098 }
4099 }
4100
4101 if (atom->lock_core)
4102 mutex_unlock(&disp->mutex);
4103
4104 /* Wait for HW to signal completion. */
4105 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4106 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4107 struct nv50_wndw *wndw = nv50_wndw(plane);
4108 int ret = nv50_wndw_wait_armed(wndw, asyw);
4109 if (ret)
4110 NV_ERROR(drm, "%s: timeout\n", plane->name);
4111 }
4112
4113 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4114 if (new_crtc_state->event) {
4115 unsigned long flags;
4116 /* Get correct count/ts if racing with vblank irq */
4117 if (new_crtc_state->active)
4118 drm_crtc_accurate_vblank_count(crtc);
4119 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4120 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
4121 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4122
4123 new_crtc_state->event = NULL;
4124 if (new_crtc_state->active)
4125 drm_crtc_vblank_put(crtc);
4126 }
4127 }
4128
4129 drm_atomic_helper_commit_hw_done(state);
4130 drm_atomic_helper_cleanup_planes(dev, state);
4131 drm_atomic_helper_commit_cleanup_done(state);
4132 drm_atomic_state_put(state);
4133}
4134
4135static void
4136nv50_disp_atomic_commit_work(struct work_struct *work)
4137{
4138 struct drm_atomic_state *state =
4139 container_of(work, typeof(*state), commit_work);
4140 nv50_disp_atomic_commit_tail(state);
4141}
4142
4143static int
4144nv50_disp_atomic_commit(struct drm_device *dev,
4145 struct drm_atomic_state *state, bool nonblock)
4146{
4147 struct nouveau_drm *drm = nouveau_drm(dev);
4148 struct nv50_disp *disp = nv50_disp(dev);
4149 struct drm_plane_state *new_plane_state;
4150 struct drm_plane *plane;
4151 struct drm_crtc *crtc;
4152 bool active = false;
4153 int ret, i;
4154
4155 ret = pm_runtime_get_sync(dev->dev);
4156 if (ret < 0 && ret != -EACCES)
4157 return ret;
4158
4159 ret = drm_atomic_helper_setup_commit(state, nonblock);
4160 if (ret)
4161 goto done;
4162
4163 INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
4164
4165 ret = drm_atomic_helper_prepare_planes(dev, state);
4166 if (ret)
4167 goto done;
4168
4169 if (!nonblock) {
4170 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
4171 if (ret)
4172 goto err_cleanup;
4173 }
4174
4175 ret = drm_atomic_helper_swap_state(state, true);
4176 if (ret)
4177 goto err_cleanup;
4178
4179 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4180 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4181 struct nv50_wndw *wndw = nv50_wndw(plane);
4182
4183 if (asyw->set.image) {
4184 asyw->ntfy.handle = wndw->dmac->sync.handle;
4185 asyw->ntfy.offset = wndw->ntfy;
4186 asyw->ntfy.awaken = false;
4187 asyw->set.ntfy = true;
4188 nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
4189 wndw->ntfy ^= 0x10;
4190 }
4191 }
4192
4193 drm_atomic_state_get(state);
4194
4195 if (nonblock)
4196 queue_work(system_unbound_wq, &state->commit_work);
4197 else
4198 nv50_disp_atomic_commit_tail(state);
4199
4200 drm_for_each_crtc(crtc, dev) {
4201 if (crtc->state->enable) {
4202 if (!drm->have_disp_power_ref) {
4203 drm->have_disp_power_ref = true;
4204 return 0;
4205 }
4206 active = true;
4207 break;
4208 }
4209 }
4210
4211 if (!active && drm->have_disp_power_ref) {
4212 pm_runtime_put_autosuspend(dev->dev);
4213 drm->have_disp_power_ref = false;
4214 }
4215
4216err_cleanup:
4217 if (ret)
4218 drm_atomic_helper_cleanup_planes(dev, state);
4219done:
4220 pm_runtime_put_autosuspend(dev->dev);
4221 return ret;
4222}
4223
4224static struct nv50_outp_atom *
4225nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
4226{
4227 struct nv50_outp_atom *outp;
4228
4229 list_for_each_entry(outp, &atom->outp, head) {
4230 if (outp->encoder == encoder)
4231 return outp;
4232 }
4233
4234 outp = kzalloc(sizeof(*outp), GFP_KERNEL);
4235 if (!outp)
4236 return ERR_PTR(-ENOMEM);
4237
4238 list_add(&outp->head, &atom->outp);
4239 outp->encoder = encoder;
4240 return outp;
4241}
4242
4243static int
4244nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
4245 struct drm_connector_state *old_connector_state)
4246{
4247 struct drm_encoder *encoder = old_connector_state->best_encoder;
4248 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4249 struct drm_crtc *crtc;
4250 struct nv50_outp_atom *outp;
4251
4252 if (!(crtc = old_connector_state->crtc))
4253 return 0;
4254
4255 old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
4256 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4257 if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4258 outp = nv50_disp_outp_atomic_add(atom, encoder);
4259 if (IS_ERR(outp))
4260 return PTR_ERR(outp);
4261
4262 if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
4263 outp->flush_disable = true;
4264 atom->flush_disable = true;
4265 }
4266 outp->clr.ctrl = true;
4267 atom->lock_core = true;
4268 }
4269
4270 return 0;
4271}
4272
4273static int
4274nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
4275 struct drm_connector_state *connector_state)
4276{
4277 struct drm_encoder *encoder = connector_state->best_encoder;
4278 struct drm_crtc_state *new_crtc_state;
4279 struct drm_crtc *crtc;
4280 struct nv50_outp_atom *outp;
4281
4282 if (!(crtc = connector_state->crtc))
4283 return 0;
4284
4285 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4286 if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4287 outp = nv50_disp_outp_atomic_add(atom, encoder);
4288 if (IS_ERR(outp))
4289 return PTR_ERR(outp);
4290
4291 outp->set.ctrl = true;
4292 atom->lock_core = true;
4293 }
4294
4295 return 0;
4296}
4297
4298static int
4299nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
4300{
4301 struct nv50_atom *atom = nv50_atom(state);
4302 struct drm_connector_state *old_connector_state, *new_connector_state;
4303 struct drm_connector *connector;
4304 int ret, i;
4305
4306 ret = drm_atomic_helper_check(dev, state);
4307 if (ret)
4308 return ret;
4309
4310 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
4311 ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
4312 if (ret)
4313 return ret;
4314
4315 ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
4316 if (ret)
4317 return ret;
4318 }
4319
4320 return 0;
4321}
4322
4323static void
4324nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
4325{
4326 struct nv50_atom *atom = nv50_atom(state);
4327 struct nv50_outp_atom *outp, *outt;
4328
4329 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4330 list_del(&outp->head);
4331 kfree(outp);
4332 }
4333
4334 drm_atomic_state_default_clear(state);
4335}
4336
4337static void
4338nv50_disp_atomic_state_free(struct drm_atomic_state *state)
4339{
4340 struct nv50_atom *atom = nv50_atom(state);
4341 drm_atomic_state_default_release(&atom->state);
4342 kfree(atom);
4343}
4344
4345static struct drm_atomic_state *
4346nv50_disp_atomic_state_alloc(struct drm_device *dev)
4347{
4348 struct nv50_atom *atom;
4349 if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
4350 drm_atomic_state_init(dev, &atom->state) < 0) {
4351 kfree(atom);
4352 return NULL;
4353 }
4354 INIT_LIST_HEAD(&atom->outp);
4355 return &atom->state;
4356}
4357
4358static const struct drm_mode_config_funcs
4359nv50_disp_func = {
4360 .fb_create = nouveau_user_framebuffer_create,
4361 .output_poll_changed = drm_fb_helper_output_poll_changed,
4362 .atomic_check = nv50_disp_atomic_check,
4363 .atomic_commit = nv50_disp_atomic_commit,
4364 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
4365 .atomic_state_clear = nv50_disp_atomic_state_clear,
4366 .atomic_state_free = nv50_disp_atomic_state_free,
4367};
4368
4369/******************************************************************************
4370 * Init
4371 *****************************************************************************/
4372
4373void
4374nv50_display_fini(struct drm_device *dev)
4375{
4376 struct nouveau_encoder *nv_encoder;
4377 struct drm_encoder *encoder;
4378 struct drm_plane *plane;
4379
4380 drm_for_each_plane(plane, dev) {
4381 struct nv50_wndw *wndw = nv50_wndw(plane);
4382 if (plane->funcs != &nv50_wndw)
4383 continue;
4384 nv50_wndw_fini(wndw);
4385 }
4386
4387 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4388 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4389 nv_encoder = nouveau_encoder(encoder);
4390 nv50_mstm_fini(nv_encoder->dp.mstm);
4391 }
4392 }
4393}
4394
4395int
4396nv50_display_init(struct drm_device *dev)
4397{
4398 struct drm_encoder *encoder;
4399 struct drm_plane *plane;
4400 u32 *push;
4401
4402 push = evo_wait(nv50_mast(dev), 32);
4403 if (!push)
4404 return -EBUSY;
4405
4406 evo_mthd(push, 0x0088, 1);
4407 evo_data(push, nv50_mast(dev)->base.sync.handle);
4408 evo_kick(push, nv50_mast(dev));
4409
4410 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4411 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4412 struct nouveau_encoder *nv_encoder =
4413 nouveau_encoder(encoder);
4414 nv50_mstm_init(nv_encoder->dp.mstm);
4415 }
4416 }
4417
4418 drm_for_each_plane(plane, dev) {
4419 struct nv50_wndw *wndw = nv50_wndw(plane);
4420 if (plane->funcs != &nv50_wndw)
4421 continue;
4422 nv50_wndw_init(wndw);
4423 }
4424
4425 return 0;
4426}
4427
4428void
4429nv50_display_destroy(struct drm_device *dev)
4430{
4431 struct nv50_disp *disp = nv50_disp(dev);
4432
4433 nv50_dmac_destroy(&disp->mast.base, disp->disp);
4434
4435 nouveau_bo_unmap(disp->sync);
4436 if (disp->sync)
4437 nouveau_bo_unpin(disp->sync);
4438 nouveau_bo_ref(NULL, &disp->sync);
4439
4440 nouveau_display(dev)->priv = NULL;
4441 kfree(disp);
4442}
4443
4444MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
4445static int nouveau_atomic = 0;
4446module_param_named(atomic, nouveau_atomic, int, 0400);
4447
4448int
4449nv50_display_create(struct drm_device *dev)
4450{
4451 struct nvif_device *device = &nouveau_drm(dev)->client.device;
4452 struct nouveau_drm *drm = nouveau_drm(dev);
4453 struct dcb_table *dcb = &drm->vbios.dcb;
4454 struct drm_connector *connector, *tmp;
4455 struct nv50_disp *disp;
4456 struct dcb_output *dcbe;
4457 int crtcs, ret, i;
4458
4459 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
4460 if (!disp)
4461 return -ENOMEM;
4462
4463 mutex_init(&disp->mutex);
4464
4465 nouveau_display(dev)->priv = disp;
4466 nouveau_display(dev)->dtor = nv50_display_destroy;
4467 nouveau_display(dev)->init = nv50_display_init;
4468 nouveau_display(dev)->fini = nv50_display_fini;
4469 disp->disp = &nouveau_display(dev)->disp;
4470 dev->mode_config.funcs = &nv50_disp_func;
4471 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
4472 if (nouveau_atomic)
4473 dev->driver->driver_features |= DRIVER_ATOMIC;
4474
4475 /* small shared memory area we use for notifiers and semaphores */
4476 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
4477 0, 0x0000, NULL, NULL, &disp->sync);
4478 if (!ret) {
4479 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
4480 if (!ret) {
4481 ret = nouveau_bo_map(disp->sync);
4482 if (ret)
4483 nouveau_bo_unpin(disp->sync);
4484 }
4485 if (ret)
4486 nouveau_bo_ref(NULL, &disp->sync);
4487 }
4488
4489 if (ret)
4490 goto out;
4491
4492 /* allocate master evo channel */
4493 ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
4494 &disp->mast);
4495 if (ret)
4496 goto out;
4497
4498 /* create crtc objects to represent the hw heads */
4499 if (disp->disp->oclass >= GF110_DISP)
4500 crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
4501 else
4502 crtcs = 0x3;
4503
4504 for (i = 0; i < fls(crtcs); i++) {
4505 if (!(crtcs & (1 << i)))
4506 continue;
4507 ret = nv50_head_create(dev, i);
4508 if (ret)
4509 goto out;
4510 }
4511
4512 /* create encoder/connector objects based on VBIOS DCB table */
4513 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
4514 connector = nouveau_connector_create(dev, dcbe->connector);
4515 if (IS_ERR(connector))
4516 continue;
4517
4518 if (dcbe->location == DCB_LOC_ON_CHIP) {
4519 switch (dcbe->type) {
4520 case DCB_OUTPUT_TMDS:
4521 case DCB_OUTPUT_LVDS:
4522 case DCB_OUTPUT_DP:
4523 ret = nv50_sor_create(connector, dcbe);
4524 break;
4525 case DCB_OUTPUT_ANALOG:
4526 ret = nv50_dac_create(connector, dcbe);
4527 break;
4528 default:
4529 ret = -ENODEV;
4530 break;
4531 }
4532 } else {
4533 ret = nv50_pior_create(connector, dcbe);
4534 }
4535
4536 if (ret) {
4537 NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
4538 dcbe->location, dcbe->type,
4539 ffs(dcbe->or) - 1, ret);
4540 ret = 0;
4541 }
4542 }
4543
4544 /* cull any connectors we created that don't have an encoder */
4545 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
4546 if (connector->encoder_ids[0])
4547 continue;
4548
4549 NV_WARN(drm, "%s has no encoders, removing\n",
4550 connector->name);
4551 connector->funcs->destroy(connector);
4552 }
4553
4554out:
4555 if (ret)
4556 nv50_display_destroy(dev);
4557 return ret;
4558}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 918187cee84b..fbd3b15583bc 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -28,7 +28,6 @@
28#define __NV50_DISPLAY_H__ 28#define __NV50_DISPLAY_H__
29 29
30#include "nouveau_display.h" 30#include "nouveau_display.h"
31#include "nouveau_crtc.h"
32#include "nouveau_reg.h" 31#include "nouveau_reg.h"
33 32
34int nv50_display_create(struct drm_device *); 33int nv50_display_create(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index a369d978e267..a00ecc3de053 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -78,8 +78,6 @@ nv50_fence_create(struct nouveau_drm *drm)
78 priv->base.resume = nv17_fence_resume; 78 priv->base.resume = nv17_fence_resume;
79 priv->base.context_new = nv50_fence_context_new; 79 priv->base.context_new = nv50_fence_context_new;
80 priv->base.context_del = nv10_fence_context_del; 80 priv->base.context_del = nv10_fence_context_del;
81 priv->base.contexts = 127;
82 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
83 spin_lock_init(&priv->lock); 81 spin_lock_init(&priv->lock);
84 82
85 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, 83 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 5f0c0c27d5dc..090664899247 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -141,9 +141,9 @@ nv84_fence_suspend(struct nouveau_drm *drm)
141 struct nv84_fence_priv *priv = drm->fence; 141 struct nv84_fence_priv *priv = drm->fence;
142 int i; 142 int i;
143 143
144 priv->suspend = vmalloc(priv->base.contexts * sizeof(u32)); 144 priv->suspend = vmalloc(drm->chan.nr * sizeof(u32));
145 if (priv->suspend) { 145 if (priv->suspend) {
146 for (i = 0; i < priv->base.contexts; i++) 146 for (i = 0; i < drm->chan.nr; i++)
147 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); 147 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
148 } 148 }
149 149
@@ -157,7 +157,7 @@ nv84_fence_resume(struct nouveau_drm *drm)
157 int i; 157 int i;
158 158
159 if (priv->suspend) { 159 if (priv->suspend) {
160 for (i = 0; i < priv->base.contexts; i++) 160 for (i = 0; i < drm->chan.nr; i++)
161 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]); 161 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
162 vfree(priv->suspend); 162 vfree(priv->suspend);
163 priv->suspend = NULL; 163 priv->suspend = NULL;
@@ -179,7 +179,6 @@ nv84_fence_destroy(struct nouveau_drm *drm)
179int 179int
180nv84_fence_create(struct nouveau_drm *drm) 180nv84_fence_create(struct nouveau_drm *drm)
181{ 181{
182 struct nvkm_fifo *fifo = nvxx_fifo(&drm->client.device);
183 struct nv84_fence_priv *priv; 182 struct nv84_fence_priv *priv;
184 u32 domain; 183 u32 domain;
185 int ret; 184 int ret;
@@ -194,8 +193,6 @@ nv84_fence_create(struct nouveau_drm *drm)
194 priv->base.context_new = nv84_fence_context_new; 193 priv->base.context_new = nv84_fence_context_new;
195 priv->base.context_del = nv84_fence_context_del; 194 priv->base.context_del = nv84_fence_context_del;
196 195
197 priv->base.contexts = fifo->nr;
198 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
199 priv->base.uevent = true; 196 priv->base.uevent = true;
200 197
201 mutex_init(&priv->mutex); 198 mutex_init(&priv->mutex);
@@ -207,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm)
207 * will lose CPU/GPU coherency! 204 * will lose CPU/GPU coherency!
208 */ 205 */
209 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; 206 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
210 ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0, 207 ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0,
211 domain, 0, 0, NULL, NULL, &priv->bo); 208 domain, 0, 0, NULL, NULL, &priv->bo);
212 if (ret == 0) { 209 if (ret == 0) {
213 ret = nouveau_bo_pin(priv->bo, domain, false); 210 ret = nouveau_bo_pin(priv->bo, domain, false);
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index f1675a4ab6fa..42e8c85caa33 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -1,8 +1,14 @@
1nvif-y := nvif/object.o 1nvif-y := nvif/object.o
2nvif-y += nvif/client.o 2nvif-y += nvif/client.o
3nvif-y += nvif/device.o 3nvif-y += nvif/device.o
4nvif-y += nvif/disp.o
4nvif-y += nvif/driver.o 5nvif-y += nvif/driver.o
6nvif-y += nvif/fifo.o
5nvif-y += nvif/mem.o 7nvif-y += nvif/mem.o
6nvif-y += nvif/mmu.o 8nvif-y += nvif/mmu.o
7nvif-y += nvif/notify.o 9nvif-y += nvif/notify.o
8nvif-y += nvif/vmm.o 10nvif-y += nvif/vmm.o
11
12# Usermode classes
13nvif-y += nvif/user.o
14nvif-y += nvif/userc361.o
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 252d8c33215b..1ec101ba3b42 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -37,6 +37,9 @@ nvif_device_time(struct nvif_device *device)
37void 37void
38nvif_device_fini(struct nvif_device *device) 38nvif_device_fini(struct nvif_device *device)
39{ 39{
40 nvif_user_fini(device);
41 kfree(device->runlist);
42 device->runlist = NULL;
40 nvif_object_fini(&device->object); 43 nvif_object_fini(&device->object);
41} 44}
42 45
@@ -46,6 +49,8 @@ nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
46{ 49{
47 int ret = nvif_object_init(parent, handle, oclass, data, size, 50 int ret = nvif_object_init(parent, handle, oclass, data, size,
48 &device->object); 51 &device->object);
52 device->runlist = NULL;
53 device->user.func = NULL;
49 if (ret == 0) { 54 if (ret == 0) {
50 device->info.version = 0; 55 device->info.version = 0;
51 ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO, 56 ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
new file mode 100644
index 000000000000..18c7d064f75c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include <nvif/disp.h>
23#include <nvif/device.h>
24
25#include <nvif/class.h>
26
27void
28nvif_disp_dtor(struct nvif_disp *disp)
29{
30 nvif_object_fini(&disp->object);
31}
32
33int
34nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
35{
36 static const struct nvif_mclass disps[] = {
37 { GV100_DISP, -1 },
38 { GP102_DISP, -1 },
39 { GP100_DISP, -1 },
40 { GM200_DISP, -1 },
41 { GM107_DISP, -1 },
42 { GK110_DISP, -1 },
43 { GK104_DISP, -1 },
44 { GF110_DISP, -1 },
45 { GT214_DISP, -1 },
46 { GT206_DISP, -1 },
47 { GT200_DISP, -1 },
48 { G82_DISP, -1 },
49 { NV50_DISP, -1 },
50 { NV04_DISP, -1 },
51 {}
52 };
53 int cid = nvif_sclass(&device->object, disps, oclass);
54 disp->object.client = NULL;
55 if (cid < 0)
56 return cid;
57
58 return nvif_object_init(&device->object, 0, disps[cid].oclass,
59 NULL, 0, &disp->object);
60}
diff --git a/drivers/gpu/drm/nouveau/nvif/fifo.c b/drivers/gpu/drm/nouveau/nvif/fifo.c
new file mode 100644
index 000000000000..99d4fd17543c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/fifo.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include <nvif/fifo.h>
23
24static int
25nvif_fifo_runlists(struct nvif_device *device)
26{
27 struct nvif_object *object = &device->object;
28 struct {
29 struct nv_device_info_v1 m;
30 struct {
31 struct nv_device_info_v1_data runlists;
32 struct nv_device_info_v1_data runlist[64];
33 } v;
34 } *a;
35 int ret, i;
36
37 if (device->runlist)
38 return 0;
39
40 if (!(a = kmalloc(sizeof(*a), GFP_KERNEL)))
41 return -ENOMEM;
42 a->m.version = 1;
43 a->m.count = sizeof(a->v) / sizeof(a->v.runlists);
44 a->v.runlists.mthd = NV_DEVICE_FIFO_RUNLISTS;
45 for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++)
46 a->v.runlist[i].mthd = NV_DEVICE_FIFO_RUNLIST_ENGINES(i);
47
48 ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, a, sizeof(*a));
49 if (ret)
50 goto done;
51
52 device->runlists = fls64(a->v.runlists.data);
53 device->runlist = kzalloc(sizeof(*device->runlist) *
54 device->runlists, GFP_KERNEL);
55 if (!device->runlist) {
56 ret = -ENOMEM;
57 goto done;
58 }
59
60 for (i = 0; i < device->runlists; i++) {
61 if (a->v.runlists.data & BIT_ULL(i))
62 device->runlist[i].engines = a->v.runlist[i].data;
63 }
64
65done:
66 kfree(a);
67 return ret;
68}
69
70u64
71nvif_fifo_runlist(struct nvif_device *device, u64 engine)
72{
73 struct nvif_object *object = &device->object;
74 struct {
75 struct nv_device_info_v1 m;
76 struct {
77 struct nv_device_info_v1_data engine;
78 } v;
79 } a = {
80 .m.version = 1,
81 .m.count = sizeof(a.v) / sizeof(a.v.engine),
82 .v.engine.mthd = engine,
83 };
84 u64 runm = 0;
85 int ret, i;
86
87 if ((ret = nvif_fifo_runlists(device)))
88 return runm;
89
90 ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, &a, sizeof(a));
91 if (ret == 0) {
92 for (i = 0; i < device->runlists; i++) {
93 if (device->runlist[i].engines & a.v.engine.data)
94 runm |= BIT_ULL(i);
95 }
96 }
97
98 return runm;
99}
diff --git a/drivers/gpu/drm/nouveau/nvif/mem.c b/drivers/gpu/drm/nouveau/nvif/mem.c
index 0f9382c60145..b6ebb3b58673 100644
--- a/drivers/gpu/drm/nouveau/nvif/mem.c
+++ b/drivers/gpu/drm/nouveau/nvif/mem.c
@@ -24,6 +24,19 @@
24 24
25#include <nvif/if000a.h> 25#include <nvif/if000a.h>
26 26
27int
28nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem)
29{
30 int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0,
31 size, NULL, 0, mem);
32 if (ret == 0) {
33 ret = nvif_object_map(&mem->object, NULL, 0);
34 if (ret)
35 nvif_mem_fini(mem);
36 }
37 return ret;
38}
39
27void 40void
28nvif_mem_fini(struct nvif_mem *mem) 41nvif_mem_fini(struct nvif_mem *mem)
29{ 42{
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index 15d0dcbf7ab4..358ac4f3cf91 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -36,6 +36,12 @@ nvif_mmu_fini(struct nvif_mmu *mmu)
36int 36int
37nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu) 37nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
38{ 38{
39 static const struct nvif_mclass mems[] = {
40 { NVIF_CLASS_MEM_GF100, -1 },
41 { NVIF_CLASS_MEM_NV50 , -1 },
42 { NVIF_CLASS_MEM_NV04 , -1 },
43 {}
44 };
39 struct nvif_mmu_v0 args; 45 struct nvif_mmu_v0 args;
40 int ret, i; 46 int ret, i;
41 47
@@ -54,6 +60,11 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
54 mmu->type_nr = args.type_nr; 60 mmu->type_nr = args.type_nr;
55 mmu->kind_nr = args.kind_nr; 61 mmu->kind_nr = args.kind_nr;
56 62
63 ret = nvif_mclass(&mmu->object, mems);
64 if (ret < 0)
65 goto done;
66 mmu->mem = mems[ret].oclass;
67
57 mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL); 68 mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
58 mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL); 69 mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
59 if (ret = -ENOMEM, !mmu->heap || !mmu->type) 70 if (ret = -ENOMEM, !mmu->heap || !mmu->type)
diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c
new file mode 100644
index 000000000000..10da3cdca647
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/user.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include <nvif/user.h>
23#include <nvif/device.h>
24
25#include <nvif/class.h>
26
27void
28nvif_user_fini(struct nvif_device *device)
29{
30 if (device->user.func) {
31 nvif_object_fini(&device->user.object);
32 device->user.func = NULL;
33 }
34}
35
36int
37nvif_user_init(struct nvif_device *device)
38{
39 struct {
40 s32 oclass;
41 int version;
42 const struct nvif_user_func *func;
43 } users[] = {
44 { VOLTA_USERMODE_A, -1, &nvif_userc361 },
45 {}
46 };
47 int cid, ret;
48
49 if (device->user.func)
50 return 0;
51
52 cid = nvif_mclass(&device->object, users);
53 if (cid < 0)
54 return cid;
55
56 ret = nvif_object_init(&device->object, 0, users[cid].oclass, NULL, 0,
57 &device->user.object);
58 if (ret)
59 return ret;
60
61 nvif_object_map(&device->user.object, NULL, 0);
62 device->user.func = users[cid].func;
63 return 0;
64}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvif/userc361.c
index 1530a9217aea..19f9958e7e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
+++ b/drivers/gpu/drm/nouveau/nvif/userc361.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2016 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,17 +18,16 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */ 21 */
24#include "changk104.h" 22#include <nvif/user.h>
25 23
26#include <nvif/class.h> 24static void
25nvif_userc361_doorbell(struct nvif_user *user, u32 token)
26{
27 nvif_wr32(&user->object, 0x90, token);
28}
27 29
28const struct nvkm_fifo_chan_oclass 30const struct nvif_user_func
29gp100_fifo_gpfifo_oclass = { 31nvif_userc361 = {
30 .base.oclass = PASCAL_CHANNEL_GPFIFO_A, 32 .doorbell = nvif_userc361_doorbell,
31 .base.minver = 0,
32 .base.maxver = 0,
33 .ctor = gk104_fifo_gpfifo_new,
34}; 33};
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 31cdb2d2e1ff..191832be6c65 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -37,7 +37,7 @@ nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
37 struct nvif_mem *mem, u64 offset) 37 struct nvif_mem *mem, u64 offset)
38{ 38{
39 struct nvif_vmm_map_v0 *args; 39 struct nvif_vmm_map_v0 *args;
40 u8 stack[16]; 40 u8 stack[48];
41 int ret; 41 int ret;
42 42
43 if (sizeof(*args) + argc > sizeof(stack)) { 43 if (sizeof(*args) + argc > sizeof(stack)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 657231c3c098..d0322ce85172 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -83,6 +83,20 @@ nvkm_engine_intr(struct nvkm_subdev *subdev)
83} 83}
84 84
85static int 85static int
86nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
87{
88 struct nvkm_engine *engine = nvkm_engine(subdev);
89 if (engine->func->info) {
90 if ((engine = nvkm_engine_ref(engine))) {
91 int ret = engine->func->info(engine, mthd, data);
92 nvkm_engine_unref(&engine);
93 return ret;
94 }
95 }
96 return -ENOSYS;
97}
98
99static int
86nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend) 100nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
87{ 101{
88 struct nvkm_engine *engine = nvkm_engine(subdev); 102 struct nvkm_engine *engine = nvkm_engine(subdev);
@@ -150,6 +164,7 @@ nvkm_engine_func = {
150 .preinit = nvkm_engine_preinit, 164 .preinit = nvkm_engine_preinit,
151 .init = nvkm_engine_init, 165 .init = nvkm_engine_init,
152 .fini = nvkm_engine_fini, 166 .fini = nvkm_engine_fini,
167 .info = nvkm_engine_info,
153 .intr = nvkm_engine_intr, 168 .intr = nvkm_engine_intr,
154}; 169};
155 170
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index a134d225f958..03f676c18aad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -35,6 +35,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
35 [NVKM_SUBDEV_BUS ] = "bus", 35 [NVKM_SUBDEV_BUS ] = "bus",
36 [NVKM_SUBDEV_CLK ] = "clk", 36 [NVKM_SUBDEV_CLK ] = "clk",
37 [NVKM_SUBDEV_DEVINIT ] = "devinit", 37 [NVKM_SUBDEV_DEVINIT ] = "devinit",
38 [NVKM_SUBDEV_FAULT ] = "fault",
38 [NVKM_SUBDEV_FB ] = "fb", 39 [NVKM_SUBDEV_FB ] = "fb",
39 [NVKM_SUBDEV_FUSE ] = "fuse", 40 [NVKM_SUBDEV_FUSE ] = "fuse",
40 [NVKM_SUBDEV_GPIO ] = "gpio", 41 [NVKM_SUBDEV_GPIO ] = "gpio",
@@ -60,6 +61,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
60 [NVKM_ENGINE_CE3 ] = "ce3", 61 [NVKM_ENGINE_CE3 ] = "ce3",
61 [NVKM_ENGINE_CE4 ] = "ce4", 62 [NVKM_ENGINE_CE4 ] = "ce4",
62 [NVKM_ENGINE_CE5 ] = "ce5", 63 [NVKM_ENGINE_CE5 ] = "ce5",
64 [NVKM_ENGINE_CE6 ] = "ce6",
65 [NVKM_ENGINE_CE7 ] = "ce7",
66 [NVKM_ENGINE_CE8 ] = "ce8",
63 [NVKM_ENGINE_CIPHER ] = "cipher", 67 [NVKM_ENGINE_CIPHER ] = "cipher",
64 [NVKM_ENGINE_DISP ] = "disp", 68 [NVKM_ENGINE_DISP ] = "disp",
65 [NVKM_ENGINE_DMAOBJ ] = "dma", 69 [NVKM_ENGINE_DMAOBJ ] = "dma",
@@ -92,6 +96,14 @@ nvkm_subdev_intr(struct nvkm_subdev *subdev)
92} 96}
93 97
94int 98int
99nvkm_subdev_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
100{
101 if (subdev->func->info)
102 return subdev->func->info(subdev, mthd, data);
103 return -ENOSYS;
104}
105
106int
95nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) 107nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
96{ 108{
97 struct nvkm_device *device = subdev->device; 109 struct nvkm_device *device = subdev->device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 255d81ccf916..80d784441904 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -5,3 +5,4 @@ nvkm-y += nvkm/engine/ce/gm107.o
5nvkm-y += nvkm/engine/ce/gm200.o 5nvkm-y += nvkm/engine/ce/gm200.o
6nvkm-y += nvkm/engine/ce/gp100.o 6nvkm-y += nvkm/engine/ce/gp100.o
7nvkm-y += nvkm/engine/ce/gp102.o 7nvkm-y += nvkm/engine/ce/gp102.o
8nvkm-y += nvkm/engine/ce/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
index fa781b5a7e07..fcda3de45857 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,20 +18,23 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 21 */
24#include "channv50.h" 22#include "priv.h"
25#include "rootnv50.h"
26 23
27#include <nvif/class.h> 24#include <nvif/class.h>
28 25
29const struct nv50_disp_pioc_oclass 26static const struct nvkm_engine_func
30g84_disp_curs_oclass = { 27gv100_ce = {
31 .base.oclass = G82_DISP_CURSOR, 28 .intr = gp100_ce_intr,
32 .base.minver = 0, 29 .sclass = {
33 .base.maxver = 0, 30 { -1, -1, VOLTA_DMA_COPY_A },
34 .ctor = nv50_disp_curs_new, 31 {}
35 .func = &nv50_disp_pioc_func, 32 }
36 .chid = { 7, 7 },
37}; 33};
34
35int
36gv100_ce_new(struct nvkm_device *device, int index,
37 struct nvkm_engine **pengine)
38{
39 return nvkm_engine_new_(&gv100_ce, device, index, true, pengine);
40}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 05cd674326a6..e294013426ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2161,6 +2161,7 @@ nv130_chipset = {
2161 .bios = nvkm_bios_new, 2161 .bios = nvkm_bios_new,
2162 .bus = gf100_bus_new, 2162 .bus = gf100_bus_new,
2163 .devinit = gm200_devinit_new, 2163 .devinit = gm200_devinit_new,
2164 .fault = gp100_fault_new,
2164 .fb = gp100_fb_new, 2165 .fb = gp100_fb_new,
2165 .fuse = gm107_fuse_new, 2166 .fuse = gm107_fuse_new,
2166 .gpio = gk104_gpio_new, 2167 .gpio = gk104_gpio_new,
@@ -2196,13 +2197,14 @@ nv132_chipset = {
2196 .bios = nvkm_bios_new, 2197 .bios = nvkm_bios_new,
2197 .bus = gf100_bus_new, 2198 .bus = gf100_bus_new,
2198 .devinit = gm200_devinit_new, 2199 .devinit = gm200_devinit_new,
2200 .fault = gp100_fault_new,
2199 .fb = gp102_fb_new, 2201 .fb = gp102_fb_new,
2200 .fuse = gm107_fuse_new, 2202 .fuse = gm107_fuse_new,
2201 .gpio = gk104_gpio_new, 2203 .gpio = gk104_gpio_new,
2202 .i2c = gm200_i2c_new, 2204 .i2c = gm200_i2c_new,
2203 .ibus = gm200_ibus_new, 2205 .ibus = gm200_ibus_new,
2204 .imem = nv50_instmem_new, 2206 .imem = nv50_instmem_new,
2205 .ltc = gp100_ltc_new, 2207 .ltc = gp102_ltc_new,
2206 .mc = gp100_mc_new, 2208 .mc = gp100_mc_new,
2207 .mmu = gp100_mmu_new, 2209 .mmu = gp100_mmu_new,
2208 .therm = gp100_therm_new, 2210 .therm = gp100_therm_new,
@@ -2231,13 +2233,14 @@ nv134_chipset = {
2231 .bios = nvkm_bios_new, 2233 .bios = nvkm_bios_new,
2232 .bus = gf100_bus_new, 2234 .bus = gf100_bus_new,
2233 .devinit = gm200_devinit_new, 2235 .devinit = gm200_devinit_new,
2236 .fault = gp100_fault_new,
2234 .fb = gp102_fb_new, 2237 .fb = gp102_fb_new,
2235 .fuse = gm107_fuse_new, 2238 .fuse = gm107_fuse_new,
2236 .gpio = gk104_gpio_new, 2239 .gpio = gk104_gpio_new,
2237 .i2c = gm200_i2c_new, 2240 .i2c = gm200_i2c_new,
2238 .ibus = gm200_ibus_new, 2241 .ibus = gm200_ibus_new,
2239 .imem = nv50_instmem_new, 2242 .imem = nv50_instmem_new,
2240 .ltc = gp100_ltc_new, 2243 .ltc = gp102_ltc_new,
2241 .mc = gp100_mc_new, 2244 .mc = gp100_mc_new,
2242 .mmu = gp100_mmu_new, 2245 .mmu = gp100_mmu_new,
2243 .therm = gp100_therm_new, 2246 .therm = gp100_therm_new,
@@ -2253,7 +2256,7 @@ nv134_chipset = {
2253 .disp = gp102_disp_new, 2256 .disp = gp102_disp_new,
2254 .dma = gf119_dma_new, 2257 .dma = gf119_dma_new,
2255 .fifo = gp100_fifo_new, 2258 .fifo = gp100_fifo_new,
2256 .gr = gp102_gr_new, 2259 .gr = gp104_gr_new,
2257 .nvdec = gp102_nvdec_new, 2260 .nvdec = gp102_nvdec_new,
2258 .sec2 = gp102_sec2_new, 2261 .sec2 = gp102_sec2_new,
2259 .sw = gf100_sw_new, 2262 .sw = gf100_sw_new,
@@ -2266,13 +2269,14 @@ nv136_chipset = {
2266 .bios = nvkm_bios_new, 2269 .bios = nvkm_bios_new,
2267 .bus = gf100_bus_new, 2270 .bus = gf100_bus_new,
2268 .devinit = gm200_devinit_new, 2271 .devinit = gm200_devinit_new,
2272 .fault = gp100_fault_new,
2269 .fb = gp102_fb_new, 2273 .fb = gp102_fb_new,
2270 .fuse = gm107_fuse_new, 2274 .fuse = gm107_fuse_new,
2271 .gpio = gk104_gpio_new, 2275 .gpio = gk104_gpio_new,
2272 .i2c = gm200_i2c_new, 2276 .i2c = gm200_i2c_new,
2273 .ibus = gm200_ibus_new, 2277 .ibus = gm200_ibus_new,
2274 .imem = nv50_instmem_new, 2278 .imem = nv50_instmem_new,
2275 .ltc = gp100_ltc_new, 2279 .ltc = gp102_ltc_new,
2276 .mc = gp100_mc_new, 2280 .mc = gp100_mc_new,
2277 .mmu = gp100_mmu_new, 2281 .mmu = gp100_mmu_new,
2278 .therm = gp100_therm_new, 2282 .therm = gp100_therm_new,
@@ -2288,7 +2292,7 @@ nv136_chipset = {
2288 .disp = gp102_disp_new, 2292 .disp = gp102_disp_new,
2289 .dma = gf119_dma_new, 2293 .dma = gf119_dma_new,
2290 .fifo = gp100_fifo_new, 2294 .fifo = gp100_fifo_new,
2291 .gr = gp102_gr_new, 2295 .gr = gp104_gr_new,
2292 .nvdec = gp102_nvdec_new, 2296 .nvdec = gp102_nvdec_new,
2293 .sec2 = gp102_sec2_new, 2297 .sec2 = gp102_sec2_new,
2294 .sw = gf100_sw_new, 2298 .sw = gf100_sw_new,
@@ -2301,13 +2305,14 @@ nv137_chipset = {
2301 .bios = nvkm_bios_new, 2305 .bios = nvkm_bios_new,
2302 .bus = gf100_bus_new, 2306 .bus = gf100_bus_new,
2303 .devinit = gm200_devinit_new, 2307 .devinit = gm200_devinit_new,
2308 .fault = gp100_fault_new,
2304 .fb = gp102_fb_new, 2309 .fb = gp102_fb_new,
2305 .fuse = gm107_fuse_new, 2310 .fuse = gm107_fuse_new,
2306 .gpio = gk104_gpio_new, 2311 .gpio = gk104_gpio_new,
2307 .i2c = gm200_i2c_new, 2312 .i2c = gm200_i2c_new,
2308 .ibus = gm200_ibus_new, 2313 .ibus = gm200_ibus_new,
2309 .imem = nv50_instmem_new, 2314 .imem = nv50_instmem_new,
2310 .ltc = gp100_ltc_new, 2315 .ltc = gp102_ltc_new,
2311 .mc = gp100_mc_new, 2316 .mc = gp100_mc_new,
2312 .mmu = gp100_mmu_new, 2317 .mmu = gp100_mmu_new,
2313 .therm = gp100_therm_new, 2318 .therm = gp100_therm_new,
@@ -2336,13 +2341,14 @@ nv138_chipset = {
2336 .bios = nvkm_bios_new, 2341 .bios = nvkm_bios_new,
2337 .bus = gf100_bus_new, 2342 .bus = gf100_bus_new,
2338 .devinit = gm200_devinit_new, 2343 .devinit = gm200_devinit_new,
2344 .fault = gp100_fault_new,
2339 .fb = gp102_fb_new, 2345 .fb = gp102_fb_new,
2340 .fuse = gm107_fuse_new, 2346 .fuse = gm107_fuse_new,
2341 .gpio = gk104_gpio_new, 2347 .gpio = gk104_gpio_new,
2342 .i2c = gm200_i2c_new, 2348 .i2c = gm200_i2c_new,
2343 .ibus = gm200_ibus_new, 2349 .ibus = gm200_ibus_new,
2344 .imem = nv50_instmem_new, 2350 .imem = nv50_instmem_new,
2345 .ltc = gp100_ltc_new, 2351 .ltc = gp102_ltc_new,
2346 .mc = gp100_mc_new, 2352 .mc = gp100_mc_new,
2347 .mmu = gp100_mmu_new, 2353 .mmu = gp100_mmu_new,
2348 .therm = gp100_therm_new, 2354 .therm = gp100_therm_new,
@@ -2369,11 +2375,12 @@ nv13b_chipset = {
2369 .name = "GP10B", 2375 .name = "GP10B",
2370 .bar = gm20b_bar_new, 2376 .bar = gm20b_bar_new,
2371 .bus = gf100_bus_new, 2377 .bus = gf100_bus_new,
2378 .fault = gp100_fault_new,
2372 .fb = gp10b_fb_new, 2379 .fb = gp10b_fb_new,
2373 .fuse = gm107_fuse_new, 2380 .fuse = gm107_fuse_new,
2374 .ibus = gp10b_ibus_new, 2381 .ibus = gp10b_ibus_new,
2375 .imem = gk20a_instmem_new, 2382 .imem = gk20a_instmem_new,
2376 .ltc = gp100_ltc_new, 2383 .ltc = gp102_ltc_new,
2377 .mc = gp10b_mc_new, 2384 .mc = gp10b_mc_new,
2378 .mmu = gp10b_mmu_new, 2385 .mmu = gp10b_mmu_new,
2379 .secboot = gp10b_secboot_new, 2386 .secboot = gp10b_secboot_new,
@@ -2387,6 +2394,46 @@ nv13b_chipset = {
2387 .sw = gf100_sw_new, 2394 .sw = gf100_sw_new,
2388}; 2395};
2389 2396
2397static const struct nvkm_device_chip
2398nv140_chipset = {
2399 .name = "GV100",
2400 .bar = gm107_bar_new,
2401 .bios = nvkm_bios_new,
2402 .bus = gf100_bus_new,
2403 .devinit = gv100_devinit_new,
2404 .fault = gv100_fault_new,
2405 .fb = gv100_fb_new,
2406 .fuse = gm107_fuse_new,
2407 .gpio = gk104_gpio_new,
2408 .i2c = gm200_i2c_new,
2409 .ibus = gm200_ibus_new,
2410 .imem = nv50_instmem_new,
2411 .ltc = gp102_ltc_new,
2412 .mc = gp100_mc_new,
2413 .mmu = gv100_mmu_new,
2414 .pci = gp100_pci_new,
2415 .pmu = gp102_pmu_new,
2416 .secboot = gp108_secboot_new,
2417 .therm = gp100_therm_new,
2418 .timer = gk20a_timer_new,
2419 .top = gk104_top_new,
2420 .disp = gv100_disp_new,
2421 .ce[0] = gv100_ce_new,
2422 .ce[1] = gv100_ce_new,
2423 .ce[2] = gv100_ce_new,
2424 .ce[3] = gv100_ce_new,
2425 .ce[4] = gv100_ce_new,
2426 .ce[5] = gv100_ce_new,
2427 .ce[6] = gv100_ce_new,
2428 .ce[7] = gv100_ce_new,
2429 .ce[8] = gv100_ce_new,
2430 .dma = gv100_dma_new,
2431 .fifo = gv100_fifo_new,
2432 .gr = gv100_gr_new,
2433 .nvdec = gp102_nvdec_new,
2434 .sec2 = gp102_sec2_new,
2435};
2436
2390static int 2437static int
2391nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, 2438nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
2392 struct nvkm_notify *notify) 2439 struct nvkm_notify *notify)
@@ -2420,6 +2467,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
2420 _(BUS , device->bus , &device->bus->subdev); 2467 _(BUS , device->bus , &device->bus->subdev);
2421 _(CLK , device->clk , &device->clk->subdev); 2468 _(CLK , device->clk , &device->clk->subdev);
2422 _(DEVINIT , device->devinit , &device->devinit->subdev); 2469 _(DEVINIT , device->devinit , &device->devinit->subdev);
2470 _(FAULT , device->fault , &device->fault->subdev);
2423 _(FB , device->fb , &device->fb->subdev); 2471 _(FB , device->fb , &device->fb->subdev);
2424 _(FUSE , device->fuse , &device->fuse->subdev); 2472 _(FUSE , device->fuse , &device->fuse->subdev);
2425 _(GPIO , device->gpio , &device->gpio->subdev); 2473 _(GPIO , device->gpio , &device->gpio->subdev);
@@ -2463,6 +2511,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
2463 _(CE3 , device->ce[3] , device->ce[3]); 2511 _(CE3 , device->ce[3] , device->ce[3]);
2464 _(CE4 , device->ce[4] , device->ce[4]); 2512 _(CE4 , device->ce[4] , device->ce[4]);
2465 _(CE5 , device->ce[5] , device->ce[5]); 2513 _(CE5 , device->ce[5] , device->ce[5]);
2514 _(CE6 , device->ce[6] , device->ce[6]);
2515 _(CE7 , device->ce[7] , device->ce[7]);
2516 _(CE8 , device->ce[8] , device->ce[8]);
2466 _(CIPHER , device->cipher , device->cipher); 2517 _(CIPHER , device->cipher , device->cipher);
2467 _(DISP , device->disp , &device->disp->engine); 2518 _(DISP , device->disp , &device->disp->engine);
2468 _(DMAOBJ , device->dma , &device->dma->engine); 2519 _(DMAOBJ , device->dma , &device->dma->engine);
@@ -2739,6 +2790,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2739 case 0x110: 2790 case 0x110:
2740 case 0x120: device->card_type = GM100; break; 2791 case 0x120: device->card_type = GM100; break;
2741 case 0x130: device->card_type = GP100; break; 2792 case 0x130: device->card_type = GP100; break;
2793 case 0x140: device->card_type = GV100; break;
2742 default: 2794 default:
2743 break; 2795 break;
2744 } 2796 }
@@ -2830,6 +2882,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2830 case 0x137: device->chip = &nv137_chipset; break; 2882 case 0x137: device->chip = &nv137_chipset; break;
2831 case 0x138: device->chip = &nv138_chipset; break; 2883 case 0x138: device->chip = &nv138_chipset; break;
2832 case 0x13b: device->chip = &nv13b_chipset; break; 2884 case 0x13b: device->chip = &nv13b_chipset; break;
2885 case 0x140: device->chip = &nv140_chipset; break;
2833 default: 2886 default:
2834 nvdev_error(device, "unknown chipset (%08x)\n", boot0); 2887 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
2835 goto done; 2888 goto done;
@@ -2891,6 +2944,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2891 _(NVKM_SUBDEV_BUS , bus); 2944 _(NVKM_SUBDEV_BUS , bus);
2892 _(NVKM_SUBDEV_CLK , clk); 2945 _(NVKM_SUBDEV_CLK , clk);
2893 _(NVKM_SUBDEV_DEVINIT , devinit); 2946 _(NVKM_SUBDEV_DEVINIT , devinit);
2947 _(NVKM_SUBDEV_FAULT , fault);
2894 _(NVKM_SUBDEV_FB , fb); 2948 _(NVKM_SUBDEV_FB , fb);
2895 _(NVKM_SUBDEV_FUSE , fuse); 2949 _(NVKM_SUBDEV_FUSE , fuse);
2896 _(NVKM_SUBDEV_GPIO , gpio); 2950 _(NVKM_SUBDEV_GPIO , gpio);
@@ -2916,6 +2970,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2916 _(NVKM_ENGINE_CE3 , ce[3]); 2970 _(NVKM_ENGINE_CE3 , ce[3]);
2917 _(NVKM_ENGINE_CE4 , ce[4]); 2971 _(NVKM_ENGINE_CE4 , ce[4]);
2918 _(NVKM_ENGINE_CE5 , ce[5]); 2972 _(NVKM_ENGINE_CE5 , ce[5]);
2973 _(NVKM_ENGINE_CE6 , ce[6]);
2974 _(NVKM_ENGINE_CE7 , ce[7]);
2975 _(NVKM_ENGINE_CE8 , ce[8]);
2919 _(NVKM_ENGINE_CIPHER , cipher); 2976 _(NVKM_ENGINE_CIPHER , cipher);
2920 _(NVKM_ENGINE_DISP , disp); 2977 _(NVKM_ENGINE_DISP , disp);
2921 _(NVKM_ENGINE_DMAOBJ , dma); 2978 _(NVKM_ENGINE_DMAOBJ , dma);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 08d0bf605722..253ab914a8ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -8,6 +8,7 @@
8#include <subdev/bus.h> 8#include <subdev/bus.h>
9#include <subdev/clk.h> 9#include <subdev/clk.h>
10#include <subdev/devinit.h> 10#include <subdev/devinit.h>
11#include <subdev/fault.h>
11#include <subdev/fb.h> 12#include <subdev/fb.h>
12#include <subdev/fuse.h> 13#include <subdev/fuse.h>
13#include <subdev/gpio.h> 14#include <subdev/gpio.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 17adcb4e8854..dde6bbafa709 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -40,6 +40,66 @@ struct nvkm_udevice {
40}; 40};
41 41
42static int 42static int
43nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data)
44{
45 struct nvkm_subdev *subdev;
46 enum nvkm_devidx subidx;
47
48 switch (mthd & NV_DEVICE_INFO_UNIT) {
49 case NV_DEVICE_FIFO(0): subidx = NVKM_ENGINE_FIFO; break;
50 default:
51 return -EINVAL;
52 }
53
54 subdev = nvkm_device_subdev(device, subidx);
55 if (subdev)
56 return nvkm_subdev_info(subdev, mthd, data);
57 return -ENODEV;
58}
59
60static void
61nvkm_udevice_info_v1(struct nvkm_device *device,
62 struct nv_device_info_v1_data *args)
63{
64 if (args->mthd & NV_DEVICE_INFO_UNIT) {
65 if (nvkm_udevice_info_subdev(device, args->mthd, &args->data))
66 args->mthd = NV_DEVICE_INFO_INVALID;
67 return;
68 }
69
70 switch (args->mthd) {
71#define ENGINE__(A,B,C) NV_DEVICE_INFO_ENGINE_##A: { int _i; \
72 for (_i = (B), args->data = 0ULL; _i <= (C); _i++) { \
73 if (nvkm_device_engine(device, _i)) \
74 args->data |= BIT_ULL(_i); \
75 } \
76}
77#define ENGINE_A(A) ENGINE__(A, NVKM_ENGINE_##A , NVKM_ENGINE_##A)
78#define ENGINE_B(A) ENGINE__(A, NVKM_ENGINE_##A##0, NVKM_ENGINE_##A##_LAST)
79 case ENGINE_A(SW ); break;
80 case ENGINE_A(GR ); break;
81 case ENGINE_A(MPEG ); break;
82 case ENGINE_A(ME ); break;
83 case ENGINE_A(CIPHER); break;
84 case ENGINE_A(BSP ); break;
85 case ENGINE_A(VP ); break;
86 case ENGINE_B(CE ); break;
87 case ENGINE_A(SEC ); break;
88 case ENGINE_A(MSVLD ); break;
89 case ENGINE_A(MSPDEC); break;
90 case ENGINE_A(MSPPP ); break;
91 case ENGINE_A(MSENC ); break;
92 case ENGINE_A(VIC ); break;
93 case ENGINE_A(SEC2 ); break;
94 case ENGINE_A(NVDEC ); break;
95 case ENGINE_B(NVENC ); break;
96 default:
97 args->mthd = NV_DEVICE_INFO_INVALID;
98 break;
99 }
100}
101
102static int
43nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) 103nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
44{ 104{
45 struct nvkm_object *object = &udev->object; 105 struct nvkm_object *object = &udev->object;
@@ -48,10 +108,21 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
48 struct nvkm_instmem *imem = device->imem; 108 struct nvkm_instmem *imem = device->imem;
49 union { 109 union {
50 struct nv_device_info_v0 v0; 110 struct nv_device_info_v0 v0;
111 struct nv_device_info_v1 v1;
51 } *args = data; 112 } *args = data;
52 int ret = -ENOSYS; 113 int ret = -ENOSYS, i;
53 114
54 nvif_ioctl(object, "device info size %d\n", size); 115 nvif_ioctl(object, "device info size %d\n", size);
116 if (!(ret = nvif_unpack(ret, &data, &size, args->v1, 1, 1, true))) {
117 nvif_ioctl(object, "device info vers %d count %d\n",
118 args->v1.version, args->v1.count);
119 if (args->v1.count * sizeof(args->v1.data[0]) == size) {
120 for (i = 0; i < args->v1.count; i++)
121 nvkm_udevice_info_v1(device, &args->v1.data[i]);
122 return 0;
123 }
124 return -EINVAL;
125 } else
55 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 126 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
56 nvif_ioctl(object, "device info vers %d\n", args->v0.version); 127 nvif_ioctl(object, "device info vers %d\n", args->v0.version);
57 } else 128 } else
@@ -103,6 +174,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
103 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break; 174 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
104 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break; 175 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
105 case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break; 176 case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
177 case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
106 default: 178 default:
107 args->v0.family = 0; 179 args->v0.family = 0;
108 break; 180 break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 48ce6699183e..3d485dbf310a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -14,12 +14,14 @@ nvkm-y += nvkm/engine/disp/gm107.o
14nvkm-y += nvkm/engine/disp/gm200.o 14nvkm-y += nvkm/engine/disp/gm200.o
15nvkm-y += nvkm/engine/disp/gp100.o 15nvkm-y += nvkm/engine/disp/gp100.o
16nvkm-y += nvkm/engine/disp/gp102.o 16nvkm-y += nvkm/engine/disp/gp102.o
17nvkm-y += nvkm/engine/disp/gv100.o
17nvkm-y += nvkm/engine/disp/vga.o 18nvkm-y += nvkm/engine/disp/vga.o
18 19
19nvkm-y += nvkm/engine/disp/head.o 20nvkm-y += nvkm/engine/disp/head.o
20nvkm-y += nvkm/engine/disp/headnv04.o 21nvkm-y += nvkm/engine/disp/headnv04.o
21nvkm-y += nvkm/engine/disp/headnv50.o 22nvkm-y += nvkm/engine/disp/headnv50.o
22nvkm-y += nvkm/engine/disp/headgf119.o 23nvkm-y += nvkm/engine/disp/headgf119.o
24nvkm-y += nvkm/engine/disp/headgv100.o
23 25
24nvkm-y += nvkm/engine/disp/ior.o 26nvkm-y += nvkm/engine/disp/ior.o
25nvkm-y += nvkm/engine/disp/dacnv50.o 27nvkm-y += nvkm/engine/disp/dacnv50.o
@@ -35,6 +37,7 @@ nvkm-y += nvkm/engine/disp/sorgf119.o
35nvkm-y += nvkm/engine/disp/sorgk104.o 37nvkm-y += nvkm/engine/disp/sorgk104.o
36nvkm-y += nvkm/engine/disp/sorgm107.o 38nvkm-y += nvkm/engine/disp/sorgm107.o
37nvkm-y += nvkm/engine/disp/sorgm200.o 39nvkm-y += nvkm/engine/disp/sorgm200.o
40nvkm-y += nvkm/engine/disp/sorgv100.o
38 41
39nvkm-y += nvkm/engine/disp/outp.o 42nvkm-y += nvkm/engine/disp/outp.o
40nvkm-y += nvkm/engine/disp/dp.o 43nvkm-y += nvkm/engine/disp/dp.o
@@ -47,6 +50,7 @@ nvkm-y += nvkm/engine/disp/hdmig84.o
47nvkm-y += nvkm/engine/disp/hdmigt215.o 50nvkm-y += nvkm/engine/disp/hdmigt215.o
48nvkm-y += nvkm/engine/disp/hdmigf119.o 51nvkm-y += nvkm/engine/disp/hdmigf119.o
49nvkm-y += nvkm/engine/disp/hdmigk104.o 52nvkm-y += nvkm/engine/disp/hdmigk104.o
53nvkm-y += nvkm/engine/disp/hdmigv100.o
50 54
51nvkm-y += nvkm/engine/disp/conn.o 55nvkm-y += nvkm/engine/disp/conn.o
52 56
@@ -63,57 +67,49 @@ nvkm-y += nvkm/engine/disp/rootgm107.o
63nvkm-y += nvkm/engine/disp/rootgm200.o 67nvkm-y += nvkm/engine/disp/rootgm200.o
64nvkm-y += nvkm/engine/disp/rootgp100.o 68nvkm-y += nvkm/engine/disp/rootgp100.o
65nvkm-y += nvkm/engine/disp/rootgp102.o 69nvkm-y += nvkm/engine/disp/rootgp102.o
70nvkm-y += nvkm/engine/disp/rootgv100.o
66 71
67nvkm-y += nvkm/engine/disp/channv50.o 72nvkm-y += nvkm/engine/disp/channv50.o
68nvkm-y += nvkm/engine/disp/changf119.o 73nvkm-y += nvkm/engine/disp/changf119.o
74nvkm-y += nvkm/engine/disp/changv100.o
69 75
70nvkm-y += nvkm/engine/disp/dmacnv50.o 76nvkm-y += nvkm/engine/disp/dmacnv50.o
71nvkm-y += nvkm/engine/disp/dmacgf119.o 77nvkm-y += nvkm/engine/disp/dmacgf119.o
72nvkm-y += nvkm/engine/disp/dmacgp102.o 78nvkm-y += nvkm/engine/disp/dmacgp102.o
79nvkm-y += nvkm/engine/disp/dmacgv100.o
73 80
74nvkm-y += nvkm/engine/disp/basenv50.o 81nvkm-y += nvkm/engine/disp/basenv50.o
75nvkm-y += nvkm/engine/disp/baseg84.o 82nvkm-y += nvkm/engine/disp/baseg84.o
76nvkm-y += nvkm/engine/disp/basegt200.o
77nvkm-y += nvkm/engine/disp/basegt215.o
78nvkm-y += nvkm/engine/disp/basegf119.o 83nvkm-y += nvkm/engine/disp/basegf119.o
79nvkm-y += nvkm/engine/disp/basegk104.o
80nvkm-y += nvkm/engine/disp/basegk110.o
81nvkm-y += nvkm/engine/disp/basegp102.o 84nvkm-y += nvkm/engine/disp/basegp102.o
82 85
83nvkm-y += nvkm/engine/disp/corenv50.o 86nvkm-y += nvkm/engine/disp/corenv50.o
84nvkm-y += nvkm/engine/disp/coreg84.o 87nvkm-y += nvkm/engine/disp/coreg84.o
85nvkm-y += nvkm/engine/disp/coreg94.o 88nvkm-y += nvkm/engine/disp/coreg94.o
86nvkm-y += nvkm/engine/disp/coregt200.o
87nvkm-y += nvkm/engine/disp/coregt215.o
88nvkm-y += nvkm/engine/disp/coregf119.o 89nvkm-y += nvkm/engine/disp/coregf119.o
89nvkm-y += nvkm/engine/disp/coregk104.o 90nvkm-y += nvkm/engine/disp/coregk104.o
90nvkm-y += nvkm/engine/disp/coregk110.o
91nvkm-y += nvkm/engine/disp/coregm107.o
92nvkm-y += nvkm/engine/disp/coregm200.o
93nvkm-y += nvkm/engine/disp/coregp100.o
94nvkm-y += nvkm/engine/disp/coregp102.o 91nvkm-y += nvkm/engine/disp/coregp102.o
92nvkm-y += nvkm/engine/disp/coregv100.o
95 93
96nvkm-y += nvkm/engine/disp/ovlynv50.o 94nvkm-y += nvkm/engine/disp/ovlynv50.o
97nvkm-y += nvkm/engine/disp/ovlyg84.o 95nvkm-y += nvkm/engine/disp/ovlyg84.o
98nvkm-y += nvkm/engine/disp/ovlygt200.o 96nvkm-y += nvkm/engine/disp/ovlygt200.o
99nvkm-y += nvkm/engine/disp/ovlygt215.o
100nvkm-y += nvkm/engine/disp/ovlygf119.o 97nvkm-y += nvkm/engine/disp/ovlygf119.o
101nvkm-y += nvkm/engine/disp/ovlygk104.o 98nvkm-y += nvkm/engine/disp/ovlygk104.o
102nvkm-y += nvkm/engine/disp/ovlygp102.o 99nvkm-y += nvkm/engine/disp/ovlygp102.o
103 100
101nvkm-y += nvkm/engine/disp/wimmgv100.o
102
103nvkm-y += nvkm/engine/disp/wndwgv100.o
104
104nvkm-y += nvkm/engine/disp/piocnv50.o 105nvkm-y += nvkm/engine/disp/piocnv50.o
105nvkm-y += nvkm/engine/disp/piocgf119.o 106nvkm-y += nvkm/engine/disp/piocgf119.o
106 107
107nvkm-y += nvkm/engine/disp/cursnv50.o 108nvkm-y += nvkm/engine/disp/cursnv50.o
108nvkm-y += nvkm/engine/disp/cursg84.o
109nvkm-y += nvkm/engine/disp/cursgt215.o
110nvkm-y += nvkm/engine/disp/cursgf119.o 109nvkm-y += nvkm/engine/disp/cursgf119.o
111nvkm-y += nvkm/engine/disp/cursgk104.o
112nvkm-y += nvkm/engine/disp/cursgp102.o 110nvkm-y += nvkm/engine/disp/cursgp102.o
111nvkm-y += nvkm/engine/disp/cursgv100.o
113 112
114nvkm-y += nvkm/engine/disp/oimmnv50.o 113nvkm-y += nvkm/engine/disp/oimmnv50.o
115nvkm-y += nvkm/engine/disp/oimmg84.o
116nvkm-y += nvkm/engine/disp/oimmgt215.o
117nvkm-y += nvkm/engine/disp/oimmgf119.o 114nvkm-y += nvkm/engine/disp/oimmgf119.o
118nvkm-y += nvkm/engine/disp/oimmgk104.o
119nvkm-y += nvkm/engine/disp/oimmgp102.o 115nvkm-y += nvkm/engine/disp/oimmgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 93a75e5b2791..32fa94a9773f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -220,6 +220,9 @@ nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
220 struct nvkm_conn *conn; 220 struct nvkm_conn *conn;
221 struct nvkm_outp *outp; 221 struct nvkm_outp *outp;
222 222
223 if (disp->func->fini)
224 disp->func->fini(disp);
225
223 list_for_each_entry(outp, &disp->outp, head) { 226 list_for_each_entry(outp, &disp->outp, head) {
224 nvkm_outp_fini(outp); 227 nvkm_outp_fini(outp);
225 } 228 }
@@ -237,6 +240,7 @@ nvkm_disp_init(struct nvkm_engine *engine)
237 struct nvkm_disp *disp = nvkm_disp(engine); 240 struct nvkm_disp *disp = nvkm_disp(engine);
238 struct nvkm_conn *conn; 241 struct nvkm_conn *conn;
239 struct nvkm_outp *outp; 242 struct nvkm_outp *outp;
243 struct nvkm_ior *ior;
240 244
241 list_for_each_entry(conn, &disp->conn, head) { 245 list_for_each_entry(conn, &disp->conn, head) {
242 nvkm_conn_init(conn); 246 nvkm_conn_init(conn);
@@ -246,6 +250,19 @@ nvkm_disp_init(struct nvkm_engine *engine)
246 nvkm_outp_init(outp); 250 nvkm_outp_init(outp);
247 } 251 }
248 252
253 if (disp->func->init) {
254 int ret = disp->func->init(disp);
255 if (ret)
256 return ret;
257 }
258
259 /* Set 'normal' (ie. when it's attached to a head) state for
260 * each output resource to 'fully enabled'.
261 */
262 list_for_each_entry(ior, &disp->ior, head) {
263 ior->func->power(ior, true, true, true, true, true);
264 }
265
249 return 0; 266 return 0;
250} 267}
251 268
@@ -376,6 +393,12 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
376 if (ret) 393 if (ret)
377 return ret; 394 return ret;
378 395
396 if (disp->func->oneinit) {
397 ret = disp->func->oneinit(disp);
398 if (ret)
399 return ret;
400 }
401
379 i = 0; 402 i = 0;
380 list_for_each_entry(head, &disp->head, head) 403 list_for_each_entry(head, &disp->head, head)
381 i = max(i, head->id + 1); 404 i = max(i, head->id + 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
index 6d17630a3dee..01253f4a9946 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29static const struct nv50_disp_mthd_list 26static const struct nv50_disp_mthd_list
30g84_disp_base_mthd_base = { 27g84_disp_base_mthd_base = {
@@ -56,8 +53,8 @@ g84_disp_base_mthd_base = {
56 } 53 }
57}; 54};
58 55
59const struct nv50_disp_chan_mthd 56static const struct nv50_disp_chan_mthd
60g84_disp_base_chan_mthd = { 57g84_disp_base_mthd = {
61 .name = "Base", 58 .name = "Base",
62 .addr = 0x000540, 59 .addr = 0x000540,
63 .prev = 0x000004, 60 .prev = 0x000004,
@@ -68,13 +65,10 @@ g84_disp_base_chan_mthd = {
68 } 65 }
69}; 66};
70 67
71const struct nv50_disp_dmac_oclass 68int
72g84_disp_base_oclass = { 69g84_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
73 .base.oclass = G82_DISP_BASE_CHANNEL_DMA, 70 struct nv50_disp *disp, struct nvkm_object **pobject)
74 .base.minver = 0, 71{
75 .base.maxver = 0, 72 return nv50_disp_base_new_(&nv50_disp_dmac_func, &g84_disp_base_mthd,
76 .ctor = nv50_disp_base_new, 73 disp, 1, oclass, argv, argc, pobject);
77 .func = &nv50_disp_dmac_func, 74}
78 .mthd = &g84_disp_base_chan_mthd,
79 .chid = 1,
80};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
index ebcb925e9d90..389e19dfc514 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29static const struct nv50_disp_mthd_list 26static const struct nv50_disp_mthd_list
30gf119_disp_base_mthd_base = { 27gf119_disp_base_mthd_base = {
@@ -91,7 +88,7 @@ gf119_disp_base_mthd_image = {
91}; 88};
92 89
93const struct nv50_disp_chan_mthd 90const struct nv50_disp_chan_mthd
94gf119_disp_base_chan_mthd = { 91gf119_disp_base_mthd = {
95 .name = "Base", 92 .name = "Base",
96 .addr = 0x001000, 93 .addr = 0x001000,
97 .prev = -0x020000, 94 .prev = -0x020000,
@@ -102,13 +99,10 @@ gf119_disp_base_chan_mthd = {
102 } 99 }
103}; 100};
104 101
105const struct nv50_disp_dmac_oclass 102int
106gf119_disp_base_oclass = { 103gf119_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
107 .base.oclass = GF110_DISP_BASE_CHANNEL_DMA, 104 struct nv50_disp *disp, struct nvkm_object **pobject)
108 .base.minver = 0, 105{
109 .base.maxver = 0, 106 return nv50_disp_base_new_(&gf119_disp_dmac_func, &gf119_disp_base_mthd,
110 .ctor = nv50_disp_base_new, 107 disp, 1, oclass, argv, argc, pobject);
111 .func = &gf119_disp_dmac_func, 108}
112 .mthd = &gf119_disp_base_chan_mthd,
113 .chid = 1,
114};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
index 8a3cdeef8d2c..0cb23d673aa0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
@@ -21,18 +21,12 @@
21 * 21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <nvif/class.h> 26int
28 27gp102_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
29const struct nv50_disp_dmac_oclass 28 struct nv50_disp *disp, struct nvkm_object **pobject)
30gp102_disp_base_oclass = { 29{
31 .base.oclass = GK110_DISP_BASE_CHANNEL_DMA, 30 return nv50_disp_base_new_(&gp102_disp_dmac_func, &gf119_disp_base_mthd,
32 .base.minver = 0, 31 disp, 1, oclass, argv, argc, pobject);
33 .base.maxver = 0, 32}
34 .ctor = nv50_disp_base_new,
35 .func = &gp102_disp_dmac_func,
36 .mthd = &gf119_disp_base_chan_mthd,
37 .chid = 1,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
index f1d6b820d482..19eb7dde01f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -21,33 +21,30 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "head.h" 25#include "head.h"
26#include "rootnv50.h"
27 26
28#include <core/client.h> 27#include <core/client.h>
29 28
30#include <nvif/class.h>
31#include <nvif/cl507c.h> 29#include <nvif/cl507c.h>
32#include <nvif/unpack.h> 30#include <nvif/unpack.h>
33 31
34int 32int
35nv50_disp_base_new(const struct nv50_disp_dmac_func *func, 33nv50_disp_base_new_(const struct nv50_disp_chan_func *func,
36 const struct nv50_disp_chan_mthd *mthd, 34 const struct nv50_disp_chan_mthd *mthd,
37 struct nv50_disp_root *root, int chid, 35 struct nv50_disp *disp, int chid,
38 const struct nvkm_oclass *oclass, void *data, u32 size, 36 const struct nvkm_oclass *oclass, void *argv, u32 argc,
39 struct nvkm_object **pobject) 37 struct nvkm_object **pobject)
40{ 38{
41 union { 39 union {
42 struct nv50_disp_base_channel_dma_v0 v0; 40 struct nv50_disp_base_channel_dma_v0 v0;
43 } *args = data; 41 } *args = argv;
44 struct nvkm_object *parent = oclass->parent; 42 struct nvkm_object *parent = oclass->parent;
45 struct nv50_disp *disp = root->disp;
46 int head, ret = -ENOSYS; 43 int head, ret = -ENOSYS;
47 u64 push; 44 u64 push;
48 45
49 nvif_ioctl(parent, "create disp base channel dma size %d\n", size); 46 nvif_ioctl(parent, "create disp base channel dma size %d\n", argc);
50 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 47 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
51 nvif_ioctl(parent, "create disp base channel dma vers %d " 48 nvif_ioctl(parent, "create disp base channel dma vers %d "
52 "pushbuf %016llx head %d\n", 49 "pushbuf %016llx head %d\n",
53 args->v0.version, args->v0.pushbuf, args->v0.head); 50 args->v0.version, args->v0.pushbuf, args->v0.head);
@@ -58,7 +55,7 @@ nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
58 } else 55 } else
59 return ret; 56 return ret;
60 57
61 return nv50_disp_dmac_new_(func, mthd, root, chid + head, 58 return nv50_disp_dmac_new_(func, mthd, disp, chid + head,
62 head, push, oclass, pobject); 59 head, push, oclass, pobject);
63} 60}
64 61
@@ -102,7 +99,7 @@ nv50_disp_base_mthd_image = {
102}; 99};
103 100
104static const struct nv50_disp_chan_mthd 101static const struct nv50_disp_chan_mthd
105nv50_disp_base_chan_mthd = { 102nv50_disp_base_mthd = {
106 .name = "Base", 103 .name = "Base",
107 .addr = 0x000540, 104 .addr = 0x000540,
108 .prev = 0x000004, 105 .prev = 0x000004,
@@ -113,13 +110,10 @@ nv50_disp_base_chan_mthd = {
113 } 110 }
114}; 111};
115 112
116const struct nv50_disp_dmac_oclass 113int
117nv50_disp_base_oclass = { 114nv50_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
118 .base.oclass = NV50_DISP_BASE_CHANNEL_DMA, 115 struct nv50_disp *disp, struct nvkm_object **pobject)
119 .base.minver = 0, 116{
120 .base.maxver = 0, 117 return nv50_disp_base_new_(&nv50_disp_dmac_func, &nv50_disp_base_mthd,
121 .ctor = nv50_disp_base_new, 118 disp, 1, oclass, argv, argc, pobject);
122 .func = &nv50_disp_dmac_func, 119}
123 .mthd = &nv50_disp_base_chan_mthd,
124 .chid = 1,
125};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
index 17a3d835cb42..29e6dd58ac48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -47,3 +47,16 @@ gf119_disp_chan_uevent = {
47 .init = gf119_disp_chan_uevent_init, 47 .init = gf119_disp_chan_uevent_init,
48 .fini = gf119_disp_chan_uevent_fini, 48 .fini = gf119_disp_chan_uevent_fini,
49}; 49};
50
51void
52gf119_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
53{
54 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
55 const u64 mask = 0x00000001 << chan->chid.user;
56 if (!en) {
57 nvkm_mask(device, 0x610090, mask, 0x00000000);
58 nvkm_mask(device, 0x6100a0, mask, 0x00000000);
59 } else {
60 nvkm_mask(device, 0x6100a0, mask, mask);
61 }
62}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changv100.c
index a13315147391..75247c9c7e10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changv100.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2015 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,17 +18,17 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */ 21 */
24#include "changk104.h" 22#include "channv50.h"
25
26#include <nvif/class.h>
27 23
28const struct nvkm_fifo_chan_oclass 24const struct nvkm_event_func
29gm200_fifo_gpfifo_oclass = { 25gv100_disp_chan_uevent = {
30 .base.oclass = MAXWELL_CHANNEL_GPFIFO_A, 26 .ctor = nv50_disp_chan_uevent_ctor,
31 .base.minver = 0,
32 .base.maxver = 0,
33 .ctor = gk104_fifo_gpfifo_new,
34}; 27};
28
29u64
30gv100_disp_chan_user(struct nv50_disp_chan *chan, u64 *psize)
31{
32 *psize = 0x1000;
33 return 0x690000 + ((chan->chid.user - 1) * 0x1000);
34}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 723dcbde2ac2..57719f675eec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -26,6 +26,7 @@
26 26
27#include <core/client.h> 27#include <core/client.h>
28#include <core/notify.h> 28#include <core/notify.h>
29#include <core/oproxy.h>
29#include <core/ramht.h> 30#include <core/ramht.h>
30#include <engine/dma.h> 31#include <engine/dma.h>
31 32
@@ -65,7 +66,7 @@ nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c,
65void 66void
66nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug) 67nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
67{ 68{
68 struct nv50_disp *disp = chan->root->disp; 69 struct nv50_disp *disp = chan->disp;
69 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 70 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
70 const struct nv50_disp_chan_mthd *mthd = chan->mthd; 71 const struct nv50_disp_chan_mthd *mthd = chan->mthd;
71 const struct nv50_disp_mthd_list *list; 72 const struct nv50_disp_mthd_list *list;
@@ -154,13 +155,29 @@ nv50_disp_chan_uevent = {
154 .fini = nv50_disp_chan_uevent_fini, 155 .fini = nv50_disp_chan_uevent_fini,
155}; 156};
156 157
158u64
159nv50_disp_chan_user(struct nv50_disp_chan *chan, u64 *psize)
160{
161 *psize = 0x1000;
162 return 0x640000 + (chan->chid.user * 0x1000);
163}
164
165void
166nv50_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
167{
168 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
169 const u64 mask = 0x00010001 << chan->chid.user;
170 const u64 data = en ? 0x00010000 : 0x00000000;
171 nvkm_mask(device, 0x610028, mask, data);
172}
173
157static int 174static int
158nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data) 175nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
159{ 176{
160 struct nv50_disp_chan *chan = nv50_disp_chan(object); 177 struct nv50_disp_chan *chan = nv50_disp_chan(object);
161 struct nv50_disp *disp = chan->root->disp; 178 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
162 struct nvkm_device *device = disp->base.engine.subdev.device; 179 u64 size, base = chan->func->user(chan, &size);
163 *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr); 180 *data = nvkm_rd32(device, base + addr);
164 return 0; 181 return 0;
165} 182}
166 183
@@ -168,9 +185,9 @@ static int
168nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) 185nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
169{ 186{
170 struct nv50_disp_chan *chan = nv50_disp_chan(object); 187 struct nv50_disp_chan *chan = nv50_disp_chan(object);
171 struct nv50_disp *disp = chan->root->disp; 188 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
172 struct nvkm_device *device = disp->base.engine.subdev.device; 189 u64 size, base = chan->func->user(chan, &size);
173 nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data); 190 nvkm_wr32(device, base + addr, data);
174 return 0; 191 return 0;
175} 192}
176 193
@@ -179,7 +196,7 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
179 struct nvkm_event **pevent) 196 struct nvkm_event **pevent)
180{ 197{
181 struct nv50_disp_chan *chan = nv50_disp_chan(object); 198 struct nv50_disp_chan *chan = nv50_disp_chan(object);
182 struct nv50_disp *disp = chan->root->disp; 199 struct nv50_disp *disp = chan->disp;
183 switch (type) { 200 switch (type) {
184 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT: 201 case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
185 *pevent = &disp->uevent; 202 *pevent = &disp->uevent;
@@ -195,34 +212,83 @@ nv50_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
195 enum nvkm_object_map *type, u64 *addr, u64 *size) 212 enum nvkm_object_map *type, u64 *addr, u64 *size)
196{ 213{
197 struct nv50_disp_chan *chan = nv50_disp_chan(object); 214 struct nv50_disp_chan *chan = nv50_disp_chan(object);
198 struct nv50_disp *disp = chan->root->disp; 215 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
199 struct nvkm_device *device = disp->base.engine.subdev.device; 216 const u64 base = device->func->resource_addr(device, 0);
200 *type = NVKM_OBJECT_MAP_IO; 217 *type = NVKM_OBJECT_MAP_IO;
201 *addr = device->func->resource_addr(device, 0) + 218 *addr = base + chan->func->user(chan, size);
202 0x640000 + (chan->chid.user * 0x1000);
203 *size = 0x001000;
204 return 0; 219 return 0;
205} 220}
206 221
222struct nv50_disp_chan_object {
223 struct nvkm_oproxy oproxy;
224 struct nv50_disp *disp;
225 int hash;
226};
227
228static void
229nv50_disp_chan_child_del_(struct nvkm_oproxy *base)
230{
231 struct nv50_disp_chan_object *object =
232 container_of(base, typeof(*object), oproxy);
233 nvkm_ramht_remove(object->disp->ramht, object->hash);
234}
235
236static const struct nvkm_oproxy_func
237nv50_disp_chan_child_func_ = {
238 .dtor[0] = nv50_disp_chan_child_del_,
239};
240
207static int 241static int
208nv50_disp_chan_child_new(const struct nvkm_oclass *oclass, 242nv50_disp_chan_child_new(const struct nvkm_oclass *oclass,
209 void *data, u32 size, struct nvkm_object **pobject) 243 void *argv, u32 argc, struct nvkm_object **pobject)
210{ 244{
211 struct nv50_disp_chan *chan = nv50_disp_chan(oclass->parent); 245 struct nv50_disp_chan *chan = nv50_disp_chan(oclass->parent);
212 return chan->func->child_new(chan, oclass, data, size, pobject); 246 struct nv50_disp *disp = chan->disp;
247 struct nvkm_device *device = disp->base.engine.subdev.device;
248 const struct nvkm_device_oclass *sclass = oclass->priv;
249 struct nv50_disp_chan_object *object;
250 int ret;
251
252 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
253 return -ENOMEM;
254 nvkm_oproxy_ctor(&nv50_disp_chan_child_func_, oclass, &object->oproxy);
255 object->disp = disp;
256 *pobject = &object->oproxy.base;
257
258 ret = sclass->ctor(device, oclass, argv, argc, &object->oproxy.object);
259 if (ret)
260 return ret;
261
262 object->hash = chan->func->bind(chan, object->oproxy.object,
263 oclass->handle);
264 if (object->hash < 0)
265 return object->hash;
266
267 return 0;
213} 268}
214 269
215static int 270static int
216nv50_disp_chan_child_get(struct nvkm_object *object, int index, 271nv50_disp_chan_child_get(struct nvkm_object *object, int index,
217 struct nvkm_oclass *oclass) 272 struct nvkm_oclass *sclass)
218{ 273{
219 struct nv50_disp_chan *chan = nv50_disp_chan(object); 274 struct nv50_disp_chan *chan = nv50_disp_chan(object);
220 if (chan->func->child_get) { 275 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
221 int ret = chan->func->child_get(chan, index, oclass); 276 const struct nvkm_device_oclass *oclass = NULL;
222 if (ret == 0) 277
223 oclass->ctor = nv50_disp_chan_child_new; 278 if (chan->func->bind)
224 return ret; 279 sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
280 else
281 sclass->engine = NULL;
282
283 if (sclass->engine && sclass->engine->func->base.sclass) {
284 sclass->engine->func->base.sclass(sclass, index, &oclass);
285 if (oclass) {
286 sclass->ctor = nv50_disp_chan_child_new,
287 sclass->priv = oclass;
288 return 0;
289 }
225 } 290 }
291
226 return -EINVAL; 292 return -EINVAL;
227} 293}
228 294
@@ -231,6 +297,7 @@ nv50_disp_chan_fini(struct nvkm_object *object, bool suspend)
231{ 297{
232 struct nv50_disp_chan *chan = nv50_disp_chan(object); 298 struct nv50_disp_chan *chan = nv50_disp_chan(object);
233 chan->func->fini(chan); 299 chan->func->fini(chan);
300 chan->func->intr(chan, false);
234 return 0; 301 return 0;
235} 302}
236 303
@@ -238,6 +305,7 @@ static int
238nv50_disp_chan_init(struct nvkm_object *object) 305nv50_disp_chan_init(struct nvkm_object *object)
239{ 306{
240 struct nv50_disp_chan *chan = nv50_disp_chan(object); 307 struct nv50_disp_chan *chan = nv50_disp_chan(object);
308 chan->func->intr(chan, true);
241 return chan->func->init(chan); 309 return chan->func->init(chan);
242} 310}
243 311
@@ -245,10 +313,11 @@ static void *
245nv50_disp_chan_dtor(struct nvkm_object *object) 313nv50_disp_chan_dtor(struct nvkm_object *object)
246{ 314{
247 struct nv50_disp_chan *chan = nv50_disp_chan(object); 315 struct nv50_disp_chan *chan = nv50_disp_chan(object);
248 struct nv50_disp *disp = chan->root->disp; 316 struct nv50_disp *disp = chan->disp;
249 if (chan->chid.user >= 0) 317 if (chan->chid.user >= 0)
250 disp->chan[chan->chid.user] = NULL; 318 disp->chan[chan->chid.user] = NULL;
251 return chan->func->dtor ? chan->func->dtor(chan) : chan; 319 nvkm_memory_unref(&chan->memory);
320 return chan;
252} 321}
253 322
254static const struct nvkm_object_func 323static const struct nvkm_object_func
@@ -264,18 +333,22 @@ nv50_disp_chan = {
264}; 333};
265 334
266int 335int
267nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, 336nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
268 const struct nv50_disp_chan_mthd *mthd, 337 const struct nv50_disp_chan_mthd *mthd,
269 struct nv50_disp_root *root, int ctrl, int user, int head, 338 struct nv50_disp *disp, int ctrl, int user, int head,
270 const struct nvkm_oclass *oclass, 339 const struct nvkm_oclass *oclass,
271 struct nv50_disp_chan *chan) 340 struct nvkm_object **pobject)
272{ 341{
273 struct nv50_disp *disp = root->disp; 342 struct nv50_disp_chan *chan;
343
344 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
345 return -ENOMEM;
346 *pobject = &chan->object;
274 347
275 nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object); 348 nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object);
276 chan->func = func; 349 chan->func = func;
277 chan->mthd = mthd; 350 chan->mthd = mthd;
278 chan->root = root; 351 chan->disp = disp;
279 chan->chid.ctrl = ctrl; 352 chan->chid.ctrl = ctrl;
280 chan->chid.user = user; 353 chan->chid.user = user;
281 chan->head = head; 354 chan->head = head;
@@ -287,20 +360,3 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
287 disp->chan[chan->chid.user] = chan; 360 disp->chan[chan->chid.user] = chan;
288 return 0; 361 return 0;
289} 362}
290
291int
292nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
293 const struct nv50_disp_chan_mthd *mthd,
294 struct nv50_disp_root *root, int ctrl, int user, int head,
295 const struct nvkm_oclass *oclass,
296 struct nvkm_object **pobject)
297{
298 struct nv50_disp_chan *chan;
299
300 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
301 return -ENOMEM;
302 *pobject = &chan->object;
303
304 return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
305 head, oclass, chan);
306}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 40681db91a02..adc9d76d09cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -4,11 +4,12 @@
4#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object) 4#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
5#include <core/object.h> 5#include <core/object.h>
6#include "nv50.h" 6#include "nv50.h"
7struct nv50_disp_root;
7 8
8struct nv50_disp_chan { 9struct nv50_disp_chan {
9 const struct nv50_disp_chan_func *func; 10 const struct nv50_disp_chan_func *func;
10 const struct nv50_disp_chan_mthd *mthd; 11 const struct nv50_disp_chan_mthd *mthd;
11 struct nv50_disp_root *root; 12 struct nv50_disp *disp;
12 13
13 struct { 14 struct {
14 int ctrl; 15 int ctrl;
@@ -17,36 +18,133 @@ struct nv50_disp_chan {
17 int head; 18 int head;
18 19
19 struct nvkm_object object; 20 struct nvkm_object object;
21
22 struct nvkm_memory *memory;
23 u64 push;
20}; 24};
21 25
22struct nv50_disp_chan_func { 26struct nv50_disp_chan_func {
23 void *(*dtor)(struct nv50_disp_chan *);
24 int (*init)(struct nv50_disp_chan *); 27 int (*init)(struct nv50_disp_chan *);
25 void (*fini)(struct nv50_disp_chan *); 28 void (*fini)(struct nv50_disp_chan *);
26 int (*child_get)(struct nv50_disp_chan *, int index, 29 void (*intr)(struct nv50_disp_chan *, bool en);
27 struct nvkm_oclass *); 30 u64 (*user)(struct nv50_disp_chan *, u64 *size);
28 int (*child_new)(struct nv50_disp_chan *, const struct nvkm_oclass *, 31 int (*bind)(struct nv50_disp_chan *, struct nvkm_object *, u32 handle);
29 void *data, u32 size, struct nvkm_object **);
30}; 32};
31 33
32int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
33 const struct nv50_disp_chan_mthd *,
34 struct nv50_disp_root *, int ctrl, int user, int head,
35 const struct nvkm_oclass *, struct nv50_disp_chan *);
36int nv50_disp_chan_new_(const struct nv50_disp_chan_func *, 34int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
37 const struct nv50_disp_chan_mthd *, 35 const struct nv50_disp_chan_mthd *,
38 struct nv50_disp_root *, int ctrl, int user, int head, 36 struct nv50_disp *, int ctrl, int user, int head,
37 const struct nvkm_oclass *, struct nvkm_object **);
38int nv50_disp_dmac_new_(const struct nv50_disp_chan_func *,
39 const struct nv50_disp_chan_mthd *,
40 struct nv50_disp *, int chid, int head, u64 push,
39 const struct nvkm_oclass *, struct nvkm_object **); 41 const struct nvkm_oclass *, struct nvkm_object **);
40 42
43void nv50_disp_chan_intr(struct nv50_disp_chan *, bool);
44u64 nv50_disp_chan_user(struct nv50_disp_chan *, u64 *);
41extern const struct nv50_disp_chan_func nv50_disp_pioc_func; 45extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
42extern const struct nv50_disp_chan_func gf119_disp_pioc_func; 46extern const struct nv50_disp_chan_func nv50_disp_dmac_func;
43 47int nv50_disp_dmac_bind(struct nv50_disp_chan *, struct nvkm_object *, u32);
44extern const struct nvkm_event_func nv50_disp_chan_uevent; 48extern const struct nv50_disp_chan_func nv50_disp_core_func;
45int nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
46 struct nvkm_notify *);
47void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
48 49
49extern const struct nvkm_event_func gf119_disp_chan_uevent; 50void gf119_disp_chan_intr(struct nv50_disp_chan *, bool);
51extern const struct nv50_disp_chan_func gf119_disp_pioc_func;
52extern const struct nv50_disp_chan_func gf119_disp_dmac_func;
53void gf119_disp_dmac_fini(struct nv50_disp_chan *);
54int gf119_disp_dmac_bind(struct nv50_disp_chan *, struct nvkm_object *, u32);
55extern const struct nv50_disp_chan_func gf119_disp_core_func;
56void gf119_disp_core_fini(struct nv50_disp_chan *);
57
58extern const struct nv50_disp_chan_func gp102_disp_dmac_func;
59
60u64 gv100_disp_chan_user(struct nv50_disp_chan *, u64 *);
61int gv100_disp_dmac_init(struct nv50_disp_chan *);
62void gv100_disp_dmac_fini(struct nv50_disp_chan *);
63int gv100_disp_dmac_bind(struct nv50_disp_chan *, struct nvkm_object *, u32);
64
65int nv50_disp_curs_new_(const struct nv50_disp_chan_func *,
66 struct nv50_disp *, int ctrl, int user,
67 const struct nvkm_oclass *, void *argv, u32 argc,
68 struct nvkm_object **);
69int nv50_disp_oimm_new_(const struct nv50_disp_chan_func *,
70 struct nv50_disp *, int ctrl, int user,
71 const struct nvkm_oclass *, void *argv, u32 argc,
72 struct nvkm_object **);
73int nv50_disp_base_new_(const struct nv50_disp_chan_func *,
74 const struct nv50_disp_chan_mthd *,
75 struct nv50_disp *, int chid,
76 const struct nvkm_oclass *, void *argv, u32 argc,
77 struct nvkm_object **);
78int nv50_disp_core_new_(const struct nv50_disp_chan_func *,
79 const struct nv50_disp_chan_mthd *,
80 struct nv50_disp *, int chid,
81 const struct nvkm_oclass *oclass, void *argv, u32 argc,
82 struct nvkm_object **);
83int nv50_disp_ovly_new_(const struct nv50_disp_chan_func *,
84 const struct nv50_disp_chan_mthd *,
85 struct nv50_disp *, int chid,
86 const struct nvkm_oclass *, void *argv, u32 argc,
87 struct nvkm_object **);
88
89int nv50_disp_curs_new(const struct nvkm_oclass *, void *, u32,
90 struct nv50_disp *, struct nvkm_object **);
91int nv50_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
92 struct nv50_disp *, struct nvkm_object **);
93int nv50_disp_base_new(const struct nvkm_oclass *, void *, u32,
94 struct nv50_disp *, struct nvkm_object **);
95int nv50_disp_core_new(const struct nvkm_oclass *, void *, u32,
96 struct nv50_disp *, struct nvkm_object **);
97int nv50_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
98 struct nv50_disp *, struct nvkm_object **);
99
100int g84_disp_base_new(const struct nvkm_oclass *, void *, u32,
101 struct nv50_disp *, struct nvkm_object **);
102int g84_disp_core_new(const struct nvkm_oclass *, void *, u32,
103 struct nv50_disp *, struct nvkm_object **);
104int g84_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
105 struct nv50_disp *, struct nvkm_object **);
106
107int g94_disp_core_new(const struct nvkm_oclass *, void *, u32,
108 struct nv50_disp *, struct nvkm_object **);
109
110int gt200_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
111 struct nv50_disp *, struct nvkm_object **);
112
113int gf119_disp_curs_new(const struct nvkm_oclass *, void *, u32,
114 struct nv50_disp *, struct nvkm_object **);
115int gf119_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
116 struct nv50_disp *, struct nvkm_object **);
117int gf119_disp_base_new(const struct nvkm_oclass *, void *, u32,
118 struct nv50_disp *, struct nvkm_object **);
119int gf119_disp_core_new(const struct nvkm_oclass *, void *, u32,
120 struct nv50_disp *, struct nvkm_object **);
121int gf119_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
122 struct nv50_disp *, struct nvkm_object **);
123
124int gk104_disp_core_new(const struct nvkm_oclass *, void *, u32,
125 struct nv50_disp *, struct nvkm_object **);
126int gk104_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
127 struct nv50_disp *, struct nvkm_object **);
128
129int gp102_disp_curs_new(const struct nvkm_oclass *, void *, u32,
130 struct nv50_disp *, struct nvkm_object **);
131int gp102_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
132 struct nv50_disp *, struct nvkm_object **);
133int gp102_disp_base_new(const struct nvkm_oclass *, void *, u32,
134 struct nv50_disp *, struct nvkm_object **);
135int gp102_disp_core_new(const struct nvkm_oclass *, void *, u32,
136 struct nv50_disp *, struct nvkm_object **);
137int gp102_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
138 struct nv50_disp *, struct nvkm_object **);
139
140int gv100_disp_curs_new(const struct nvkm_oclass *, void *, u32,
141 struct nv50_disp *, struct nvkm_object **);
142int gv100_disp_wimm_new(const struct nvkm_oclass *, void *, u32,
143 struct nv50_disp *, struct nvkm_object **);
144int gv100_disp_core_new(const struct nvkm_oclass *, void *, u32,
145 struct nv50_disp *, struct nvkm_object **);
146int gv100_disp_wndw_new(const struct nvkm_oclass *, void *, u32,
147 struct nv50_disp *, struct nvkm_object **);
50 148
51struct nv50_disp_mthd_list { 149struct nv50_disp_mthd_list {
52 u32 mthd; 150 u32 mthd;
@@ -76,64 +174,18 @@ extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
76extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior; 174extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
77extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image; 175extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
78 176
79extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd; 177extern const struct nv50_disp_chan_mthd g84_disp_core_mthd;
80extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac; 178extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
81extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head; 179extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
82extern const struct nv50_disp_chan_mthd g84_disp_base_chan_mthd;
83extern const struct nv50_disp_chan_mthd g84_disp_ovly_chan_mthd;
84 180
85extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd; 181extern const struct nv50_disp_chan_mthd g94_disp_core_mthd;
86 182
87extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base; 183extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base;
88extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac; 184extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac;
89extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor; 185extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor;
90extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior; 186extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
91extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd; 187extern const struct nv50_disp_chan_mthd gf119_disp_base_mthd;
92
93extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
94extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
95
96struct nv50_disp_pioc_oclass {
97 int (*ctor)(const struct nv50_disp_chan_func *,
98 const struct nv50_disp_chan_mthd *,
99 struct nv50_disp_root *, int ctrl, int user,
100 const struct nvkm_oclass *, void *data, u32 size,
101 struct nvkm_object **);
102 struct nvkm_sclass base;
103 const struct nv50_disp_chan_func *func;
104 const struct nv50_disp_chan_mthd *mthd;
105 struct {
106 int ctrl;
107 int user;
108 } chid;
109};
110
111extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
112extern const struct nv50_disp_pioc_oclass nv50_disp_curs_oclass;
113
114extern const struct nv50_disp_pioc_oclass g84_disp_oimm_oclass;
115extern const struct nv50_disp_pioc_oclass g84_disp_curs_oclass;
116
117extern const struct nv50_disp_pioc_oclass gt215_disp_oimm_oclass;
118extern const struct nv50_disp_pioc_oclass gt215_disp_curs_oclass;
119
120extern const struct nv50_disp_pioc_oclass gf119_disp_oimm_oclass;
121extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
122
123extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
124extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
125
126extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
127extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
128 188
129int nv50_disp_curs_new(const struct nv50_disp_chan_func *, 189extern const struct nv50_disp_chan_mthd gk104_disp_core_mthd;
130 const struct nv50_disp_chan_mthd *, 190extern const struct nv50_disp_chan_mthd gk104_disp_ovly_mthd;
131 struct nv50_disp_root *, int ctrl, int user,
132 const struct nvkm_oclass *, void *data, u32 size,
133 struct nvkm_object **);
134int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
135 const struct nv50_disp_chan_mthd *,
136 struct nv50_disp_root *, int ctrl, int user,
137 const struct nvkm_oclass *, void *data, u32 size,
138 struct nvkm_object **);
139#endif 191#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
index 1baa5c34b327..cfc54aad3e7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29const struct nv50_disp_mthd_list 26const struct nv50_disp_mthd_list
30g84_disp_core_mthd_dac = { 27g84_disp_core_mthd_dac = {
@@ -91,7 +88,7 @@ g84_disp_core_mthd_head = {
91}; 88};
92 89
93const struct nv50_disp_chan_mthd 90const struct nv50_disp_chan_mthd
94g84_disp_core_chan_mthd = { 91g84_disp_core_mthd = {
95 .name = "Core", 92 .name = "Core",
96 .addr = 0x000000, 93 .addr = 0x000000,
97 .prev = 0x000004, 94 .prev = 0x000004,
@@ -105,13 +102,10 @@ g84_disp_core_chan_mthd = {
105 } 102 }
106}; 103};
107 104
108const struct nv50_disp_dmac_oclass 105int
109g84_disp_core_oclass = { 106g84_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
110 .base.oclass = G82_DISP_CORE_CHANNEL_DMA, 107 struct nv50_disp *disp, struct nvkm_object **pobject)
111 .base.minver = 0, 108{
112 .base.maxver = 0, 109 return nv50_disp_core_new_(&nv50_disp_core_func, &g84_disp_core_mthd,
113 .ctor = nv50_disp_core_new, 110 disp, 0, oclass, argv, argc, pobject);
114 .func = &nv50_disp_core_func, 111}
115 .mthd = &g84_disp_core_chan_mthd,
116 .chid = 0,
117};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
index c65c9f3ff69f..e911925f1182 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29static const struct nv50_disp_mthd_list 26static const struct nv50_disp_mthd_list
30g94_disp_core_mthd_sor = { 27g94_disp_core_mthd_sor = {
@@ -37,7 +34,7 @@ g94_disp_core_mthd_sor = {
37}; 34};
38 35
39const struct nv50_disp_chan_mthd 36const struct nv50_disp_chan_mthd
40g94_disp_core_chan_mthd = { 37g94_disp_core_mthd = {
41 .name = "Core", 38 .name = "Core",
42 .addr = 0x000000, 39 .addr = 0x000000,
43 .prev = 0x000004, 40 .prev = 0x000004,
@@ -51,13 +48,10 @@ g94_disp_core_chan_mthd = {
51 } 48 }
52}; 49};
53 50
54const struct nv50_disp_dmac_oclass 51int
55g94_disp_core_oclass = { 52g94_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
56 .base.oclass = GT206_DISP_CORE_CHANNEL_DMA, 53 struct nv50_disp *disp, struct nvkm_object **pobject)
57 .base.minver = 0, 54{
58 .base.maxver = 0, 55 return nv50_disp_core_new_(&nv50_disp_core_func, &g94_disp_core_mthd,
59 .ctor = nv50_disp_core_new, 56 disp, 0, oclass, argv, argc, pobject);
60 .func = &nv50_disp_core_func, 57}
61 .mthd = &g94_disp_core_chan_mthd,
62 .chid = 0,
63};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 21fbf89b6319..d162b9cf4eac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -21,15 +21,10 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <core/client.h>
28#include <subdev/timer.h> 26#include <subdev/timer.h>
29 27
30#include <nvif/class.h>
31#include <nvif/unpack.h>
32
33const struct nv50_disp_mthd_list 28const struct nv50_disp_mthd_list
34gf119_disp_core_mthd_base = { 29gf119_disp_core_mthd_base = {
35 .mthd = 0x0000, 30 .mthd = 0x0000,
@@ -157,7 +152,7 @@ gf119_disp_core_mthd_head = {
157}; 152};
158 153
159static const struct nv50_disp_chan_mthd 154static const struct nv50_disp_chan_mthd
160gf119_disp_core_chan_mthd = { 155gf119_disp_core_mthd = {
161 .name = "Core", 156 .name = "Core",
162 .addr = 0x000000, 157 .addr = 0x000000,
163 .prev = -0x020000, 158 .prev = -0x020000,
@@ -172,10 +167,9 @@ gf119_disp_core_chan_mthd = {
172}; 167};
173 168
174void 169void
175gf119_disp_core_fini(struct nv50_disp_dmac *chan) 170gf119_disp_core_fini(struct nv50_disp_chan *chan)
176{ 171{
177 struct nv50_disp *disp = chan->base.root->disp; 172 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
178 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
179 struct nvkm_device *device = subdev->device; 173 struct nvkm_device *device = subdev->device;
180 174
181 /* deactivate channel */ 175 /* deactivate channel */
@@ -188,22 +182,14 @@ gf119_disp_core_fini(struct nv50_disp_dmac *chan)
188 nvkm_error(subdev, "core fini: %08x\n", 182 nvkm_error(subdev, "core fini: %08x\n",
189 nvkm_rd32(device, 0x610490)); 183 nvkm_rd32(device, 0x610490));
190 } 184 }
191
192 /* disable error reporting and completion notification */
193 nvkm_mask(device, 0x610090, 0x00000001, 0x00000000);
194 nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000000);
195} 185}
196 186
197static int 187static int
198gf119_disp_core_init(struct nv50_disp_dmac *chan) 188gf119_disp_core_init(struct nv50_disp_chan *chan)
199{ 189{
200 struct nv50_disp *disp = chan->base.root->disp; 190 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
201 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
202 struct nvkm_device *device = subdev->device; 191 struct nvkm_device *device = subdev->device;
203 192
204 /* enable error reporting */
205 nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
206
207 /* initialise channel for dma command submission */ 193 /* initialise channel for dma command submission */
208 nvkm_wr32(device, 0x610494, chan->push); 194 nvkm_wr32(device, 0x610494, chan->push);
209 nvkm_wr32(device, 0x610498, 0x00010000); 195 nvkm_wr32(device, 0x610498, 0x00010000);
@@ -225,20 +211,19 @@ gf119_disp_core_init(struct nv50_disp_dmac *chan)
225 return 0; 211 return 0;
226} 212}
227 213
228const struct nv50_disp_dmac_func 214const struct nv50_disp_chan_func
229gf119_disp_core_func = { 215gf119_disp_core_func = {
230 .init = gf119_disp_core_init, 216 .init = gf119_disp_core_init,
231 .fini = gf119_disp_core_fini, 217 .fini = gf119_disp_core_fini,
218 .intr = gf119_disp_chan_intr,
219 .user = nv50_disp_chan_user,
232 .bind = gf119_disp_dmac_bind, 220 .bind = gf119_disp_dmac_bind,
233}; 221};
234 222
235const struct nv50_disp_dmac_oclass 223int
236gf119_disp_core_oclass = { 224gf119_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
237 .base.oclass = GF110_DISP_CORE_CHANNEL_DMA, 225 struct nv50_disp *disp, struct nvkm_object **pobject)
238 .base.minver = 0, 226{
239 .base.maxver = 0, 227 return nv50_disp_core_new_(&gf119_disp_core_func, &gf119_disp_core_mthd,
240 .ctor = nv50_disp_core_new, 228 disp, 0, oclass, argv, argc, pobject);
241 .func = &gf119_disp_core_func, 229}
242 .mthd = &gf119_disp_core_chan_mthd,
243 .chid = 0,
244};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
index 088ab222e823..5c800174e079 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29static const struct nv50_disp_mthd_list 26static const struct nv50_disp_mthd_list
30gk104_disp_core_mthd_head = { 27gk104_disp_core_mthd_head = {
@@ -106,7 +103,7 @@ gk104_disp_core_mthd_head = {
106}; 103};
107 104
108const struct nv50_disp_chan_mthd 105const struct nv50_disp_chan_mthd
109gk104_disp_core_chan_mthd = { 106gk104_disp_core_mthd = {
110 .name = "Core", 107 .name = "Core",
111 .addr = 0x000000, 108 .addr = 0x000000,
112 .prev = -0x020000, 109 .prev = -0x020000,
@@ -120,13 +117,10 @@ gk104_disp_core_chan_mthd = {
120 } 117 }
121}; 118};
122 119
123const struct nv50_disp_dmac_oclass 120int
124gk104_disp_core_oclass = { 121gk104_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
125 .base.oclass = GK104_DISP_CORE_CHANNEL_DMA, 122 struct nv50_disp *disp, struct nvkm_object **pobject)
126 .base.minver = 0, 123{
127 .base.maxver = 0, 124 return nv50_disp_core_new_(&gf119_disp_core_func, &gk104_disp_core_mthd,
128 .ctor = nv50_disp_core_new, 125 disp, 0, oclass, argv, argc, pobject);
129 .func = &gf119_disp_core_func, 126}
130 .mthd = &gk104_disp_core_chan_mthd,
131 .chid = 0,
132};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
deleted file mode 100644
index df0f45c20108..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gk110_disp_core_oclass = {
31 .base.oclass = GK110_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &gf119_disp_core_func,
36 .mthd = &gk104_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
deleted file mode 100644
index 9e27f8fd98b6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gm107_disp_core_oclass = {
31 .base.oclass = GM107_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &gf119_disp_core_func,
36 .mthd = &gk104_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
deleted file mode 100644
index bb23a8658ac0..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gm200_disp_core_oclass = {
31 .base.oclass = GM200_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &gf119_disp_core_func,
36 .mthd = &gk104_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
deleted file mode 100644
index d5dff6619d4d..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gp100_disp_core_oclass = {
31 .base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &gf119_disp_core_func,
36 .mthd = &gk104_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index b0df4b752b8c..5b7f993c73c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -21,23 +21,16 @@
21 * 21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <subdev/timer.h> 26#include <subdev/timer.h>
28 27
29#include <nvif/class.h>
30
31static int 28static int
32gp102_disp_core_init(struct nv50_disp_dmac *chan) 29gp102_disp_core_init(struct nv50_disp_chan *chan)
33{ 30{
34 struct nv50_disp *disp = chan->base.root->disp; 31 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
35 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
36 struct nvkm_device *device = subdev->device; 32 struct nvkm_device *device = subdev->device;
37 33
38 /* enable error reporting */
39 nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
40
41 /* initialise channel for dma command submission */ 34 /* initialise channel for dma command submission */
42 nvkm_wr32(device, 0x611494, chan->push); 35 nvkm_wr32(device, 0x611494, chan->push);
43 nvkm_wr32(device, 0x611498, 0x00010000); 36 nvkm_wr32(device, 0x611498, 0x00010000);
@@ -59,20 +52,19 @@ gp102_disp_core_init(struct nv50_disp_dmac *chan)
59 return 0; 52 return 0;
60} 53}
61 54
62static const struct nv50_disp_dmac_func 55static const struct nv50_disp_chan_func
63gp102_disp_core_func = { 56gp102_disp_core_func = {
64 .init = gp102_disp_core_init, 57 .init = gp102_disp_core_init,
65 .fini = gf119_disp_core_fini, 58 .fini = gf119_disp_core_fini,
59 .intr = gf119_disp_chan_intr,
60 .user = nv50_disp_chan_user,
66 .bind = gf119_disp_dmac_bind, 61 .bind = gf119_disp_dmac_bind,
67}; 62};
68 63
69const struct nv50_disp_dmac_oclass 64int
70gp102_disp_core_oclass = { 65gp102_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
71 .base.oclass = GP102_DISP_CORE_CHANNEL_DMA, 66 struct nv50_disp *disp, struct nvkm_object **pobject)
72 .base.minver = 0, 67{
73 .base.maxver = 0, 68 return nv50_disp_core_new_(&gp102_disp_core_func, &gk104_disp_core_mthd,
74 .ctor = nv50_disp_core_new, 69 disp, 0, oclass, argv, argc, pobject);
75 .func = &gp102_disp_core_func, 70}
76 .mthd = &gk104_disp_core_chan_mthd,
77 .chid = 0,
78};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
deleted file mode 100644
index b234547708fc..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gt200_disp_core_oclass = {
31 .base.oclass = GT200_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &nv50_disp_core_func,
36 .mthd = &g84_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
deleted file mode 100644
index 8f5ba2018975..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gt215_disp_core_oclass = {
31 .base.oclass = GT214_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &nv50_disp_core_func,
36 .mthd = &g94_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
new file mode 100644
index 000000000000..4592d0e69fec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
@@ -0,0 +1,204 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "channv50.h"
23
24#include <subdev/timer.h>
25
26const struct nv50_disp_mthd_list
27gv100_disp_core_mthd_base = {
28 .mthd = 0x0000,
29 .addr = 0x000000,
30 .data = {
31 { 0x0200, 0x680200 },
32 { 0x0208, 0x680208 },
33 { 0x020c, 0x68020c },
34 { 0x0210, 0x680210 },
35 { 0x0214, 0x680214 },
36 { 0x0218, 0x680218 },
37 { 0x021c, 0x68021c },
38 {}
39 }
40};
41
42const struct nv50_disp_mthd_list
43gv100_disp_core_mthd_sor = {
44 .mthd = 0x0020,
45 .addr = 0x000020,
46 .data = {
47 { 0x0300, 0x680300 },
48 { 0x0304, 0x680304 },
49 { 0x0308, 0x680308 },
50 { 0x030c, 0x68030c },
51 {}
52 }
53};
54
55static const struct nv50_disp_mthd_list
56gv100_disp_core_mthd_wndw = {
57 .mthd = 0x0080,
58 .addr = 0x000080,
59 .data = {
60 { 0x1000, 0x681000 },
61 { 0x1004, 0x681004 },
62 { 0x1008, 0x681008 },
63 { 0x100c, 0x68100c },
64 { 0x1010, 0x681010 },
65 {}
66 }
67};
68
69static const struct nv50_disp_mthd_list
70gv100_disp_core_mthd_head = {
71 .mthd = 0x0400,
72 .addr = 0x000400,
73 .data = {
74 { 0x2000, 0x682000 },
75 { 0x2004, 0x682004 },
76 { 0x2008, 0x682008 },
77 { 0x200c, 0x68200c },
78 { 0x2014, 0x682014 },
79 { 0x2018, 0x682018 },
80 { 0x201c, 0x68201c },
81 { 0x2020, 0x682020 },
82 { 0x2028, 0x682028 },
83 { 0x202c, 0x68202c },
84 { 0x2030, 0x682030 },
85 { 0x2038, 0x682038 },
86 { 0x203c, 0x68203c },
87 { 0x2048, 0x682048 },
88 { 0x204c, 0x68204c },
89 { 0x2050, 0x682050 },
90 { 0x2054, 0x682054 },
91 { 0x2058, 0x682058 },
92 { 0x205c, 0x68205c },
93 { 0x2060, 0x682060 },
94 { 0x2064, 0x682064 },
95 { 0x2068, 0x682068 },
96 { 0x206c, 0x68206c },
97 { 0x2070, 0x682070 },
98 { 0x2074, 0x682074 },
99 { 0x2078, 0x682078 },
100 { 0x207c, 0x68207c },
101 { 0x2080, 0x682080 },
102 { 0x2088, 0x682088 },
103 { 0x2090, 0x682090 },
104 { 0x209c, 0x68209c },
105 { 0x20a0, 0x6820a0 },
106 { 0x20a4, 0x6820a4 },
107 { 0x20a8, 0x6820a8 },
108 { 0x20ac, 0x6820ac },
109 { 0x218c, 0x68218c },
110 { 0x2194, 0x682194 },
111 { 0x2198, 0x682198 },
112 { 0x219c, 0x68219c },
113 { 0x21a0, 0x6821a0 },
114 { 0x21a4, 0x6821a4 },
115 { 0x2214, 0x682214 },
116 { 0x2218, 0x682218 },
117 {}
118 }
119};
120
121static const struct nv50_disp_chan_mthd
122gv100_disp_core_mthd = {
123 .name = "Core",
124 .addr = 0x000000,
125 .prev = 0x008000,
126 .data = {
127 { "Global", 1, &gv100_disp_core_mthd_base },
128 { "SOR", 4, &gv100_disp_core_mthd_sor },
129 { "WINDOW", 8, &gv100_disp_core_mthd_wndw },
130 { "HEAD", 4, &gv100_disp_core_mthd_head },
131 {}
132 }
133};
134
135static int
136gv100_disp_core_idle(struct nv50_disp_chan *chan)
137{
138 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
139 nvkm_msec(device, 2000,
140 u32 stat = nvkm_rd32(device, 0x610630);
141 if ((stat & 0x001f0000) == 0x000b0000)
142 return 0;
143 );
144 return -EBUSY;
145}
146
147static u64
148gv100_disp_core_user(struct nv50_disp_chan *chan, u64 *psize)
149{
150 *psize = 0x10000;
151 return 0x680000;
152}
153
154static void
155gv100_disp_core_intr(struct nv50_disp_chan *chan, bool en)
156{
157 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
158 const u32 mask = 0x00000001;
159 const u32 data = en ? mask : 0;
160 nvkm_mask(device, 0x611dac, mask, data);
161}
162
163static void
164gv100_disp_core_fini(struct nv50_disp_chan *chan)
165{
166 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
167 nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000000);
168 gv100_disp_core_idle(chan);
169 nvkm_mask(device, 0x6104e0, 0x00000002, 0x00000000);
170}
171
172static int
173gv100_disp_core_init(struct nv50_disp_chan *chan)
174{
175 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
176 struct nvkm_device *device = subdev->device;
177
178 nvkm_wr32(device, 0x610b24, lower_32_bits(chan->push));
179 nvkm_wr32(device, 0x610b20, upper_32_bits(chan->push));
180 nvkm_wr32(device, 0x610b28, 0x00000001);
181 nvkm_wr32(device, 0x610b2c, 0x00000040);
182
183 nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000010);
184 nvkm_wr32(device, 0x680000, 0x00000000);
185 nvkm_wr32(device, 0x6104e0, 0x00000013);
186 return gv100_disp_core_idle(chan);
187}
188
189static const struct nv50_disp_chan_func
190gv100_disp_core = {
191 .init = gv100_disp_core_init,
192 .fini = gv100_disp_core_fini,
193 .intr = gv100_disp_core_intr,
194 .user = gv100_disp_core_user,
195 .bind = gv100_disp_dmac_bind,
196};
197
198int
199gv100_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
200 struct nv50_disp *disp, struct nvkm_object **pobject)
201{
202 return nv50_disp_core_new_(&gv100_disp_core, &gv100_disp_core_mthd,
203 disp, 0, oclass, argv, argc, pobject);
204}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
index b547c8b833ca..55db9a22b4be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -21,32 +21,30 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <core/client.h> 26#include <core/client.h>
28#include <subdev/timer.h> 27#include <subdev/timer.h>
29 28
30#include <nvif/class.h>
31#include <nvif/cl507d.h> 29#include <nvif/cl507d.h>
32#include <nvif/unpack.h> 30#include <nvif/unpack.h>
33 31
34int 32int
35nv50_disp_core_new(const struct nv50_disp_dmac_func *func, 33nv50_disp_core_new_(const struct nv50_disp_chan_func *func,
36 const struct nv50_disp_chan_mthd *mthd, 34 const struct nv50_disp_chan_mthd *mthd,
37 struct nv50_disp_root *root, int chid, 35 struct nv50_disp *disp, int chid,
38 const struct nvkm_oclass *oclass, void *data, u32 size, 36 const struct nvkm_oclass *oclass, void *argv, u32 argc,
39 struct nvkm_object **pobject) 37 struct nvkm_object **pobject)
40{ 38{
41 union { 39 union {
42 struct nv50_disp_core_channel_dma_v0 v0; 40 struct nv50_disp_core_channel_dma_v0 v0;
43 } *args = data; 41 } *args = argv;
44 struct nvkm_object *parent = oclass->parent; 42 struct nvkm_object *parent = oclass->parent;
45 u64 push; 43 u64 push;
46 int ret = -ENOSYS; 44 int ret = -ENOSYS;
47 45
48 nvif_ioctl(parent, "create disp core channel dma size %d\n", size); 46 nvif_ioctl(parent, "create disp core channel dma size %d\n", argc);
49 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 47 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
50 nvif_ioctl(parent, "create disp core channel dma vers %d " 48 nvif_ioctl(parent, "create disp core channel dma vers %d "
51 "pushbuf %016llx\n", 49 "pushbuf %016llx\n",
52 args->v0.version, args->v0.pushbuf); 50 args->v0.version, args->v0.pushbuf);
@@ -54,7 +52,7 @@ nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
54 } else 52 } else
55 return ret; 53 return ret;
56 54
57 return nv50_disp_dmac_new_(func, mthd, root, chid, 0, 55 return nv50_disp_dmac_new_(func, mthd, disp, chid, 0,
58 push, oclass, pobject); 56 push, oclass, pobject);
59} 57}
60 58
@@ -151,7 +149,7 @@ nv50_disp_core_mthd_head = {
151}; 149};
152 150
153static const struct nv50_disp_chan_mthd 151static const struct nv50_disp_chan_mthd
154nv50_disp_core_chan_mthd = { 152nv50_disp_core_mthd = {
155 .name = "Core", 153 .name = "Core",
156 .addr = 0x000000, 154 .addr = 0x000000,
157 .prev = 0x000004, 155 .prev = 0x000004,
@@ -166,10 +164,9 @@ nv50_disp_core_chan_mthd = {
166}; 164};
167 165
168static void 166static void
169nv50_disp_core_fini(struct nv50_disp_dmac *chan) 167nv50_disp_core_fini(struct nv50_disp_chan *chan)
170{ 168{
171 struct nv50_disp *disp = chan->base.root->disp; 169 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
172 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
173 struct nvkm_device *device = subdev->device; 170 struct nvkm_device *device = subdev->device;
174 171
175 /* deactivate channel */ 172 /* deactivate channel */
@@ -182,21 +179,14 @@ nv50_disp_core_fini(struct nv50_disp_dmac *chan)
182 nvkm_error(subdev, "core fini: %08x\n", 179 nvkm_error(subdev, "core fini: %08x\n",
183 nvkm_rd32(device, 0x610200)); 180 nvkm_rd32(device, 0x610200));
184 } 181 }
185
186 /* disable error reporting and completion notifications */
187 nvkm_mask(device, 0x610028, 0x00010001, 0x00000000);
188} 182}
189 183
190static int 184static int
191nv50_disp_core_init(struct nv50_disp_dmac *chan) 185nv50_disp_core_init(struct nv50_disp_chan *chan)
192{ 186{
193 struct nv50_disp *disp = chan->base.root->disp; 187 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
194 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
195 struct nvkm_device *device = subdev->device; 188 struct nvkm_device *device = subdev->device;
196 189
197 /* enable error reporting */
198 nvkm_mask(device, 0x610028, 0x00010000, 0x00010000);
199
200 /* attempt to unstick channel from some unknown state */ 190 /* attempt to unstick channel from some unknown state */
201 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000) 191 if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
202 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000); 192 nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
@@ -224,20 +214,19 @@ nv50_disp_core_init(struct nv50_disp_dmac *chan)
224 return 0; 214 return 0;
225} 215}
226 216
227const struct nv50_disp_dmac_func 217const struct nv50_disp_chan_func
228nv50_disp_core_func = { 218nv50_disp_core_func = {
229 .init = nv50_disp_core_init, 219 .init = nv50_disp_core_init,
230 .fini = nv50_disp_core_fini, 220 .fini = nv50_disp_core_fini,
221 .intr = nv50_disp_chan_intr,
222 .user = nv50_disp_chan_user,
231 .bind = nv50_disp_dmac_bind, 223 .bind = nv50_disp_dmac_bind,
232}; 224};
233 225
234const struct nv50_disp_dmac_oclass 226int
235nv50_disp_core_oclass = { 227nv50_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
236 .base.oclass = NV50_DISP_CORE_CHANNEL_DMA, 228 struct nv50_disp *disp, struct nvkm_object **pobject)
237 .base.minver = 0, 229{
238 .base.maxver = 0, 230 return nv50_disp_core_new_(&nv50_disp_core_func, &nv50_disp_core_mthd,
239 .ctor = nv50_disp_core_new, 231 disp, 0, oclass, argv, argc, pobject);
240 .func = &nv50_disp_core_func, 232}
241 .mthd = &nv50_disp_core_chan_mthd,
242 .chid = 0,
243};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index 2be6fb052c65..cdda3658dcb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -22,16 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "channv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <nvif/class.h> 26int
28 27gf119_disp_curs_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
29const struct nv50_disp_pioc_oclass 28 struct nv50_disp *disp, struct nvkm_object **pobject)
30gf119_disp_curs_oclass = { 29{
31 .base.oclass = GF110_DISP_CURSOR, 30 return nv50_disp_curs_new_(&gf119_disp_pioc_func, disp, 13, 13,
32 .base.minver = 0, 31 oclass, argv, argc, pobject);
33 .base.maxver = 0, 32}
34 .ctor = nv50_disp_curs_new,
35 .func = &gf119_disp_pioc_func,
36 .chid = { 13, 13 },
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
index e958210d8105..1a4601f975e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
@@ -22,16 +22,11 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "channv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <nvif/class.h> 26int
28 27gp102_disp_curs_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
29const struct nv50_disp_pioc_oclass 28 struct nv50_disp *disp, struct nvkm_object **pobject)
30gp102_disp_curs_oclass = { 29{
31 .base.oclass = GK104_DISP_CURSOR, 30 return nv50_disp_curs_new_(&gf119_disp_pioc_func, disp, 13, 17,
32 .base.minver = 0, 31 oclass, argv, argc, pobject);
33 .base.maxver = 0, 32}
34 .ctor = nv50_disp_curs_new,
35 .func = &gf119_disp_pioc_func,
36 .chid = { 13, 17 },
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c
new file mode 100644
index 000000000000..a3e4f6900245
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "channv50.h"
23
24#include <subdev/timer.h>
25
26static int
27gv100_disp_curs_idle(struct nv50_disp_chan *chan)
28{
29 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
30 const u32 soff = (chan->chid.ctrl - 1) * 0x04;
31 nvkm_msec(device, 2000,
32 u32 stat = nvkm_rd32(device, 0x610664 + soff);
33 if ((stat & 0x00070000) == 0x00040000)
34 return 0;
35 );
36 return -EBUSY;
37}
38
39static void
40gv100_disp_curs_intr(struct nv50_disp_chan *chan, bool en)
41{
42 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
43 const u32 mask = 0x00010000 << chan->head;
44 const u32 data = en ? mask : 0;
45 nvkm_mask(device, 0x611dac, mask, data);
46}
47
48static void
49gv100_disp_curs_fini(struct nv50_disp_chan *chan)
50{
51 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
52 const u32 hoff = chan->chid.ctrl * 4;
53 nvkm_mask(device, 0x6104e0 + hoff, 0x00000010, 0x00000010);
54 gv100_disp_curs_idle(chan);
55 nvkm_mask(device, 0x6104e0 + hoff, 0x00000001, 0x00000000);
56}
57
58static int
59gv100_disp_curs_init(struct nv50_disp_chan *chan)
60{
61 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
62 struct nvkm_device *device = subdev->device;
63 nvkm_wr32(device, 0x6104e0 + chan->chid.ctrl * 4, 0x00000001);
64 return gv100_disp_curs_idle(chan);
65}
66
67static const struct nv50_disp_chan_func
68gv100_disp_curs = {
69 .init = gv100_disp_curs_init,
70 .fini = gv100_disp_curs_fini,
71 .intr = gv100_disp_curs_intr,
72 .user = gv100_disp_chan_user,
73};
74
75int
76gv100_disp_curs_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
77 struct nv50_disp *disp, struct nvkm_object **pobject)
78{
79 return nv50_disp_curs_new_(&gv100_disp_curs, disp, 73, 73,
80 oclass, argv, argc, pobject);
81}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index ab51121b7982..d29758504a5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -23,30 +23,26 @@
23 */ 23 */
24#include "channv50.h" 24#include "channv50.h"
25#include "head.h" 25#include "head.h"
26#include "rootnv50.h"
27 26
28#include <core/client.h> 27#include <core/client.h>
29 28
30#include <nvif/class.h>
31#include <nvif/cl507a.h> 29#include <nvif/cl507a.h>
32#include <nvif/unpack.h> 30#include <nvif/unpack.h>
33 31
34int 32int
35nv50_disp_curs_new(const struct nv50_disp_chan_func *func, 33nv50_disp_curs_new_(const struct nv50_disp_chan_func *func,
36 const struct nv50_disp_chan_mthd *mthd, 34 struct nv50_disp *disp, int ctrl, int user,
37 struct nv50_disp_root *root, int ctrl, int user, 35 const struct nvkm_oclass *oclass, void *argv, u32 argc,
38 const struct nvkm_oclass *oclass, void *data, u32 size, 36 struct nvkm_object **pobject)
39 struct nvkm_object **pobject)
40{ 37{
41 union { 38 union {
42 struct nv50_disp_cursor_v0 v0; 39 struct nv50_disp_cursor_v0 v0;
43 } *args = data; 40 } *args = argv;
44 struct nvkm_object *parent = oclass->parent; 41 struct nvkm_object *parent = oclass->parent;
45 struct nv50_disp *disp = root->disp;
46 int head, ret = -ENOSYS; 42 int head, ret = -ENOSYS;
47 43
48 nvif_ioctl(parent, "create disp cursor size %d\n", size); 44 nvif_ioctl(parent, "create disp cursor size %d\n", argc);
49 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 45 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
50 nvif_ioctl(parent, "create disp cursor vers %d head %d\n", 46 nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
51 args->v0.version, args->v0.head); 47 args->v0.version, args->v0.head);
52 if (!nvkm_head_find(&disp->base, args->v0.head)) 48 if (!nvkm_head_find(&disp->base, args->v0.head))
@@ -55,16 +51,14 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
55 } else 51 } else
56 return ret; 52 return ret;
57 53
58 return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, 54 return nv50_disp_chan_new_(func, NULL, disp, ctrl + head, user + head,
59 head, oclass, pobject); 55 head, oclass, pobject);
60} 56}
61 57
62const struct nv50_disp_pioc_oclass 58int
63nv50_disp_curs_oclass = { 59nv50_disp_curs_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
64 .base.oclass = NV50_DISP_CURSOR, 60 struct nv50_disp *disp, struct nvkm_object **pobject)
65 .base.minver = 0, 61{
66 .base.maxver = 0, 62 return nv50_disp_curs_new_(&nv50_disp_pioc_func, disp, 7, 7,
67 .ctor = nv50_disp_curs_new, 63 oclass, argv, argc, pobject);
68 .func = &nv50_disp_pioc_func, 64}
69 .chid = { 7, 7 },
70};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c
index dbd032ef352a..71a94777ea2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c
@@ -58,8 +58,13 @@ gf119_dac = {
58int 58int
59gf119_dac_new(struct nvkm_disp *disp, int id) 59gf119_dac_new(struct nvkm_disp *disp, int id)
60{ 60{
61 struct nvkm_device *device = disp->engine.subdev.device;
62 if (!(nvkm_rd32(device, 0x612004) & (0x00000010 << id)))
63 return 0;
64 return nvkm_ior_new_(&gf119_dac, disp, DAC, id); 61 return nvkm_ior_new_(&gf119_dac, disp, DAC, id);
65} 62}
63
64int
65gf119_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
66{
67 struct nvkm_device *device = disp->engine.subdev.device;
68 *pmask = (nvkm_rd32(device, 0x612004) & 0x000000f0) >> 4;
69 return 4;
70}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index 85e692b12260..558012db35f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -109,8 +109,13 @@ nv50_dac = {
109int 109int
110nv50_dac_new(struct nvkm_disp *disp, int id) 110nv50_dac_new(struct nvkm_disp *disp, int id)
111{ 111{
112 struct nvkm_device *device = disp->engine.subdev.device;
113 if (!(nvkm_rd32(device, 0x610184) & (0x00100000 << id)))
114 return 0;
115 return nvkm_ior_new_(&nv50_dac, disp, DAC, id); 112 return nvkm_ior_new_(&nv50_dac, disp, DAC, id);
116} 113}
114
115int
116nv50_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
117{
118 struct nvkm_device *device = disp->engine.subdev.device;
119 *pmask = (nvkm_rd32(device, 0x610184) & 0x00700000) >> 20;
120 return 3;
121}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index ce7cd74fbd5d..edf7dd0d931d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -21,29 +21,27 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <core/ramht.h> 26#include <core/ramht.h>
28#include <subdev/timer.h> 27#include <subdev/timer.h>
29 28
30int 29int
31gf119_disp_dmac_bind(struct nv50_disp_dmac *chan, 30gf119_disp_dmac_bind(struct nv50_disp_chan *chan,
32 struct nvkm_object *object, u32 handle) 31 struct nvkm_object *object, u32 handle)
33{ 32{
34 return nvkm_ramht_insert(chan->base.root->ramht, object, 33 return nvkm_ramht_insert(chan->disp->ramht, object,
35 chan->base.chid.user, -9, handle, 34 chan->chid.user, -9, handle,
36 chan->base.chid.user << 27 | 0x00000001); 35 chan->chid.user << 27 | 0x00000001);
37} 36}
38 37
39void 38void
40gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) 39gf119_disp_dmac_fini(struct nv50_disp_chan *chan)
41{ 40{
42 struct nv50_disp *disp = chan->base.root->disp; 41 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
43 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
44 struct nvkm_device *device = subdev->device; 42 struct nvkm_device *device = subdev->device;
45 int ctrl = chan->base.chid.ctrl; 43 int ctrl = chan->chid.ctrl;
46 int user = chan->base.chid.user; 44 int user = chan->chid.user;
47 45
48 /* deactivate channel */ 46 /* deactivate channel */
49 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000); 47 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
@@ -55,23 +53,15 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
55 nvkm_error(subdev, "ch %d fini: %08x\n", user, 53 nvkm_error(subdev, "ch %d fini: %08x\n", user,
56 nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); 54 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
57 } 55 }
58
59 /* disable error reporting and completion notification */
60 nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
61 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
62} 56}
63 57
64static int 58static int
65gf119_disp_dmac_init(struct nv50_disp_dmac *chan) 59gf119_disp_dmac_init(struct nv50_disp_chan *chan)
66{ 60{
67 struct nv50_disp *disp = chan->base.root->disp; 61 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
68 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
69 struct nvkm_device *device = subdev->device; 62 struct nvkm_device *device = subdev->device;
70 int ctrl = chan->base.chid.ctrl; 63 int ctrl = chan->chid.ctrl;
71 int user = chan->base.chid.user; 64 int user = chan->chid.user;
72
73 /* enable error reporting */
74 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
75 65
76 /* initialise channel for dma command submission */ 66 /* initialise channel for dma command submission */
77 nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push); 67 nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
@@ -94,9 +84,11 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
94 return 0; 84 return 0;
95} 85}
96 86
97const struct nv50_disp_dmac_func 87const struct nv50_disp_chan_func
98gf119_disp_dmac_func = { 88gf119_disp_dmac_func = {
99 .init = gf119_disp_dmac_init, 89 .init = gf119_disp_dmac_init,
100 .fini = gf119_disp_dmac_fini, 90 .fini = gf119_disp_dmac_fini,
91 .intr = gf119_disp_chan_intr,
92 .user = nv50_disp_chan_user,
101 .bind = gf119_disp_dmac_bind, 93 .bind = gf119_disp_dmac_bind,
102}; 94};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index cdead9500343..f21a433199aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -21,22 +21,17 @@
21 * 21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <subdev/timer.h> 26#include <subdev/timer.h>
28 27
29static int 28static int
30gp102_disp_dmac_init(struct nv50_disp_dmac *chan) 29gp102_disp_dmac_init(struct nv50_disp_chan *chan)
31{ 30{
32 struct nv50_disp *disp = chan->base.root->disp; 31 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device; 32 struct nvkm_device *device = subdev->device;
35 int ctrl = chan->base.chid.ctrl; 33 int ctrl = chan->chid.ctrl;
36 int user = chan->base.chid.user; 34 int user = chan->chid.user;
37
38 /* enable error reporting */
39 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
40 35
41 /* initialise channel for dma command submission */ 36 /* initialise channel for dma command submission */
42 nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push); 37 nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
@@ -59,9 +54,11 @@ gp102_disp_dmac_init(struct nv50_disp_dmac *chan)
59 return 0; 54 return 0;
60} 55}
61 56
62const struct nv50_disp_dmac_func 57const struct nv50_disp_chan_func
63gp102_disp_dmac_func = { 58gp102_disp_dmac_func = {
64 .init = gp102_disp_dmac_init, 59 .init = gp102_disp_dmac_init,
65 .fini = gf119_disp_dmac_fini, 60 .fini = gf119_disp_dmac_fini,
61 .intr = gf119_disp_chan_intr,
62 .user = nv50_disp_chan_user,
66 .bind = gf119_disp_dmac_bind, 63 .bind = gf119_disp_dmac_bind,
67}; 64};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
new file mode 100644
index 000000000000..eac0e42da354
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "channv50.h"
23
24#include <core/ramht.h>
25#include <subdev/timer.h>
26
27static int
28gv100_disp_dmac_idle(struct nv50_disp_chan *chan)
29{
30 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
31 const u32 soff = (chan->chid.ctrl - 1) * 0x04;
32 nvkm_msec(device, 2000,
33 u32 stat = nvkm_rd32(device, 0x610664 + soff);
34 if ((stat & 0x000f0000) == 0x00040000)
35 return 0;
36 );
37 return -EBUSY;
38}
39
40int
41gv100_disp_dmac_bind(struct nv50_disp_chan *chan,
42 struct nvkm_object *object, u32 handle)
43{
44 return nvkm_ramht_insert(chan->disp->ramht, object,
45 chan->chid.user, -9, handle,
46 chan->chid.user << 25 | 0x00000040);
47}
48
49void
50gv100_disp_dmac_fini(struct nv50_disp_chan *chan)
51{
52 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
53 const u32 coff = chan->chid.ctrl * 0x04;
54 nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000000);
55 gv100_disp_dmac_idle(chan);
56 nvkm_mask(device, 0x6104e0 + coff, 0x00000002, 0x00000000);
57}
58
59int
60gv100_disp_dmac_init(struct nv50_disp_chan *chan)
61{
62 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
63 struct nvkm_device *device = subdev->device;
64 const u32 uoff = (chan->chid.ctrl - 1) * 0x1000;
65 const u32 poff = chan->chid.ctrl * 0x10;
66 const u32 coff = chan->chid.ctrl * 0x04;
67
68 nvkm_wr32(device, 0x610b24 + poff, lower_32_bits(chan->push));
69 nvkm_wr32(device, 0x610b20 + poff, upper_32_bits(chan->push));
70 nvkm_wr32(device, 0x610b28 + poff, 0x00000001);
71 nvkm_wr32(device, 0x610b2c + poff, 0x00000040);
72
73 nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000010);
74 nvkm_wr32(device, 0x690000 + uoff, 0x00000000);
75 nvkm_wr32(device, 0x6104e0 + coff, 0x00000013);
76 return gv100_disp_dmac_idle(chan);
77}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 070ec5e18fdb..9e8a9d7a9b68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -21,176 +21,68 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <core/client.h> 26#include <core/client.h>
28#include <core/oproxy.h>
29#include <core/ramht.h> 27#include <core/ramht.h>
30#include <subdev/fb.h> 28#include <subdev/fb.h>
29#include <subdev/mmu.h>
31#include <subdev/timer.h> 30#include <subdev/timer.h>
32#include <engine/dma.h> 31#include <engine/dma.h>
33 32
34struct nv50_disp_dmac_object {
35 struct nvkm_oproxy oproxy;
36 struct nv50_disp_root *root;
37 int hash;
38};
39
40static void
41nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
42{
43 struct nv50_disp_dmac_object *object =
44 container_of(base, typeof(*object), oproxy);
45 nvkm_ramht_remove(object->root->ramht, object->hash);
46}
47
48static const struct nvkm_oproxy_func
49nv50_disp_dmac_child_func_ = {
50 .dtor[0] = nv50_disp_dmac_child_del_,
51};
52
53static int
54nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
55 const struct nvkm_oclass *oclass,
56 void *data, u32 size, struct nvkm_object **pobject)
57{
58 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
59 struct nv50_disp_root *root = chan->base.root;
60 struct nvkm_device *device = root->disp->base.engine.subdev.device;
61 const struct nvkm_device_oclass *sclass = oclass->priv;
62 struct nv50_disp_dmac_object *object;
63 int ret;
64
65 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
66 return -ENOMEM;
67 nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
68 object->root = root;
69 *pobject = &object->oproxy.base;
70
71 ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
72 if (ret)
73 return ret;
74
75 object->hash = chan->func->bind(chan, object->oproxy.object,
76 oclass->handle);
77 if (object->hash < 0)
78 return object->hash;
79
80 return 0;
81}
82
83static int
84nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
85 struct nvkm_oclass *sclass)
86{
87 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
88 struct nv50_disp *disp = chan->base.root->disp;
89 struct nvkm_device *device = disp->base.engine.subdev.device;
90 const struct nvkm_device_oclass *oclass = NULL;
91
92 sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
93 if (sclass->engine && sclass->engine->func->base.sclass) {
94 sclass->engine->func->base.sclass(sclass, index, &oclass);
95 if (oclass) {
96 sclass->priv = oclass;
97 return 0;
98 }
99 }
100
101 return -EINVAL;
102}
103
104static void
105nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
106{
107 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
108 chan->func->fini(chan);
109}
110
111static int
112nv50_disp_dmac_init_(struct nv50_disp_chan *base)
113{
114 struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
115 return chan->func->init(chan);
116}
117
118static void *
119nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
120{
121 return nv50_disp_dmac(base);
122}
123
124static const struct nv50_disp_chan_func
125nv50_disp_dmac_func_ = {
126 .dtor = nv50_disp_dmac_dtor_,
127 .init = nv50_disp_dmac_init_,
128 .fini = nv50_disp_dmac_fini_,
129 .child_get = nv50_disp_dmac_child_get_,
130 .child_new = nv50_disp_dmac_child_new_,
131};
132
133int 33int
134nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func, 34nv50_disp_dmac_new_(const struct nv50_disp_chan_func *func,
135 const struct nv50_disp_chan_mthd *mthd, 35 const struct nv50_disp_chan_mthd *mthd,
136 struct nv50_disp_root *root, int chid, int head, u64 push, 36 struct nv50_disp *disp, int chid, int head, u64 push,
137 const struct nvkm_oclass *oclass, 37 const struct nvkm_oclass *oclass,
138 struct nvkm_object **pobject) 38 struct nvkm_object **pobject)
139{ 39{
140 struct nvkm_client *client = oclass->client; 40 struct nvkm_client *client = oclass->client;
141 struct nvkm_dmaobj *dmaobj; 41 struct nv50_disp_chan *chan;
142 struct nv50_disp_dmac *chan;
143 int ret; 42 int ret;
144 43
145 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) 44 ret = nv50_disp_chan_new_(func, mthd, disp, chid, chid, head, oclass,
146 return -ENOMEM; 45 pobject);
147 *pobject = &chan->base.object; 46 chan = nv50_disp_chan(*pobject);
148 chan->func = func;
149
150 ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
151 chid, chid, head, oclass, &chan->base);
152 if (ret) 47 if (ret)
153 return ret; 48 return ret;
154 49
155 dmaobj = nvkm_dmaobj_search(client, push); 50 chan->memory = nvkm_umem_search(client, push);
156 if (IS_ERR(dmaobj)) 51 if (IS_ERR(chan->memory))
157 return PTR_ERR(dmaobj); 52 return PTR_ERR(chan->memory);
158 53
159 if (dmaobj->limit - dmaobj->start != 0xfff) 54 if (nvkm_memory_size(chan->memory) < 0x1000)
160 return -EINVAL; 55 return -EINVAL;
161 56
162 switch (dmaobj->target) { 57 switch (nvkm_memory_target(chan->memory)) {
163 case NV_MEM_TARGET_VRAM: 58 case NVKM_MEM_TARGET_VRAM: chan->push = 0x00000001; break;
164 chan->push = 0x00000001 | dmaobj->start >> 8; 59 case NVKM_MEM_TARGET_NCOH: chan->push = 0x00000002; break;
165 break; 60 case NVKM_MEM_TARGET_HOST: chan->push = 0x00000003; break;
166 case NV_MEM_TARGET_PCI_NOSNOOP:
167 chan->push = 0x00000003 | dmaobj->start >> 8;
168 break;
169 default: 61 default:
170 return -EINVAL; 62 return -EINVAL;
171 } 63 }
172 64
65 chan->push |= nvkm_memory_addr(chan->memory) >> 8;
173 return 0; 66 return 0;
174} 67}
175 68
176int 69int
177nv50_disp_dmac_bind(struct nv50_disp_dmac *chan, 70nv50_disp_dmac_bind(struct nv50_disp_chan *chan,
178 struct nvkm_object *object, u32 handle) 71 struct nvkm_object *object, u32 handle)
179{ 72{
180 return nvkm_ramht_insert(chan->base.root->ramht, object, 73 return nvkm_ramht_insert(chan->disp->ramht, object,
181 chan->base.chid.user, -10, handle, 74 chan->chid.user, -10, handle,
182 chan->base.chid.user << 28 | 75 chan->chid.user << 28 |
183 chan->base.chid.user); 76 chan->chid.user);
184} 77}
185 78
186static void 79static void
187nv50_disp_dmac_fini(struct nv50_disp_dmac *chan) 80nv50_disp_dmac_fini(struct nv50_disp_chan *chan)
188{ 81{
189 struct nv50_disp *disp = chan->base.root->disp; 82 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
190 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
191 struct nvkm_device *device = subdev->device; 83 struct nvkm_device *device = subdev->device;
192 int ctrl = chan->base.chid.ctrl; 84 int ctrl = chan->chid.ctrl;
193 int user = chan->base.chid.user; 85 int user = chan->chid.user;
194 86
195 /* deactivate channel */ 87 /* deactivate channel */
196 nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000); 88 nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
@@ -202,22 +94,15 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
202 nvkm_error(subdev, "ch %d fini timeout, %08x\n", user, 94 nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
203 nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); 95 nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
204 } 96 }
205
206 /* disable error reporting and completion notifications */
207 nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
208} 97}
209 98
210static int 99static int
211nv50_disp_dmac_init(struct nv50_disp_dmac *chan) 100nv50_disp_dmac_init(struct nv50_disp_chan *chan)
212{ 101{
213 struct nv50_disp *disp = chan->base.root->disp; 102 struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
214 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
215 struct nvkm_device *device = subdev->device; 103 struct nvkm_device *device = subdev->device;
216 int ctrl = chan->base.chid.ctrl; 104 int ctrl = chan->chid.ctrl;
217 int user = chan->base.chid.user; 105 int user = chan->chid.user;
218
219 /* enable error reporting */
220 nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
221 106
222 /* initialise channel for dma command submission */ 107 /* initialise channel for dma command submission */
223 nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push); 108 nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
@@ -240,9 +125,11 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
240 return 0; 125 return 0;
241} 126}
242 127
243const struct nv50_disp_dmac_func 128const struct nv50_disp_chan_func
244nv50_disp_dmac_func = { 129nv50_disp_dmac_func = {
245 .init = nv50_disp_dmac_init, 130 .init = nv50_disp_dmac_init,
246 .fini = nv50_disp_dmac_fini, 131 .fini = nv50_disp_dmac_fini,
132 .intr = nv50_disp_chan_intr,
133 .user = nv50_disp_chan_user,
247 .bind = nv50_disp_dmac_bind, 134 .bind = nv50_disp_dmac_bind,
248}; 135};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
deleted file mode 100644
index f9b98211da6a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NV50_DISP_DMAC_H__
3#define __NV50_DISP_DMAC_H__
4#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
5#include "channv50.h"
6
7struct nv50_disp_dmac {
8 const struct nv50_disp_dmac_func *func;
9 struct nv50_disp_chan base;
10 u32 push;
11};
12
13struct nv50_disp_dmac_func {
14 int (*init)(struct nv50_disp_dmac *);
15 void (*fini)(struct nv50_disp_dmac *);
16 int (*bind)(struct nv50_disp_dmac *, struct nvkm_object *, u32 handle);
17};
18
19int nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *,
20 const struct nv50_disp_chan_mthd *,
21 struct nv50_disp_root *, int chid, int head, u64 push,
22 const struct nvkm_oclass *, struct nvkm_object **);
23
24extern const struct nv50_disp_dmac_func nv50_disp_dmac_func;
25int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
26extern const struct nv50_disp_dmac_func nv50_disp_core_func;
27
28extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
29void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
30int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
31extern const struct nv50_disp_dmac_func gf119_disp_core_func;
32void gf119_disp_core_fini(struct nv50_disp_dmac *);
33
34extern const struct nv50_disp_dmac_func gp102_disp_dmac_func;
35
36struct nv50_disp_dmac_oclass {
37 int (*ctor)(const struct nv50_disp_dmac_func *,
38 const struct nv50_disp_chan_mthd *,
39 struct nv50_disp_root *, int chid,
40 const struct nvkm_oclass *, void *data, u32 size,
41 struct nvkm_object **);
42 struct nvkm_sclass base;
43 const struct nv50_disp_dmac_func *func;
44 const struct nv50_disp_chan_mthd *mthd;
45 int chid;
46};
47
48int nv50_disp_core_new(const struct nv50_disp_dmac_func *,
49 const struct nv50_disp_chan_mthd *,
50 struct nv50_disp_root *, int chid,
51 const struct nvkm_oclass *oclass, void *data, u32 size,
52 struct nvkm_object **);
53int nv50_disp_base_new(const struct nv50_disp_dmac_func *,
54 const struct nv50_disp_chan_mthd *,
55 struct nv50_disp_root *, int chid,
56 const struct nvkm_oclass *oclass, void *data, u32 size,
57 struct nvkm_object **);
58int nv50_disp_ovly_new(const struct nv50_disp_dmac_func *,
59 const struct nv50_disp_chan_mthd *,
60 struct nv50_disp_root *, int chid,
61 const struct nvkm_oclass *oclass, void *data, u32 size,
62 struct nvkm_object **);
63
64extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
65extern const struct nv50_disp_dmac_oclass nv50_disp_base_oclass;
66extern const struct nv50_disp_dmac_oclass nv50_disp_ovly_oclass;
67
68extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
69extern const struct nv50_disp_dmac_oclass g84_disp_base_oclass;
70extern const struct nv50_disp_dmac_oclass g84_disp_ovly_oclass;
71
72extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
73
74extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
75extern const struct nv50_disp_dmac_oclass gt200_disp_base_oclass;
76extern const struct nv50_disp_dmac_oclass gt200_disp_ovly_oclass;
77
78extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
79extern const struct nv50_disp_dmac_oclass gt215_disp_base_oclass;
80extern const struct nv50_disp_dmac_oclass gt215_disp_ovly_oclass;
81
82extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
83extern const struct nv50_disp_dmac_oclass gf119_disp_base_oclass;
84extern const struct nv50_disp_dmac_oclass gf119_disp_ovly_oclass;
85
86extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
87extern const struct nv50_disp_dmac_oclass gk104_disp_base_oclass;
88extern const struct nv50_disp_dmac_oclass gk104_disp_ovly_oclass;
89
90extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
91extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
92
93extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
94
95extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
96
97extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
98
99extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass;
100extern const struct nv50_disp_dmac_oclass gp102_disp_base_oclass;
101extern const struct nv50_disp_dmac_oclass gp102_disp_ovly_oclass;
102#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 842e1b72ee42..731f188fc1ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -28,18 +28,20 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30g84_disp = { 30g84_disp = {
31 .init = nv50_disp_init,
32 .fini = nv50_disp_fini,
31 .intr = nv50_disp_intr, 33 .intr = nv50_disp_intr,
32 .uevent = &nv50_disp_chan_uevent, 34 .uevent = &nv50_disp_chan_uevent,
33 .super = nv50_disp_super, 35 .super = nv50_disp_super,
34 .root = &g84_disp_root_oclass, 36 .root = &g84_disp_root_oclass,
35 .head.new = nv50_head_new, 37 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
36 .dac = { .nr = 3, .new = nv50_dac_new }, 38 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
37 .sor = { .nr = 2, .new = g84_sor_new }, 39 .sor = { .cnt = nv50_sor_cnt, .new = g84_sor_new },
38 .pior = { .nr = 3, .new = nv50_pior_new }, 40 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
39}; 41};
40 42
41int 43int
42g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 44g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
43{ 45{
44 return nv50_disp_new_(&g84_disp, device, index, 2, pdisp); 46 return nv50_disp_new_(&g84_disp, device, index, pdisp);
45} 47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index d184e6ab8918..def54fe1951e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -28,18 +28,20 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30g94_disp = { 30g94_disp = {
31 .init = nv50_disp_init,
32 .fini = nv50_disp_fini,
31 .intr = nv50_disp_intr, 33 .intr = nv50_disp_intr,
32 .uevent = &nv50_disp_chan_uevent, 34 .uevent = &nv50_disp_chan_uevent,
33 .super = nv50_disp_super, 35 .super = nv50_disp_super,
34 .root = &g94_disp_root_oclass, 36 .root = &g94_disp_root_oclass,
35 .head.new = nv50_head_new, 37 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
36 .dac = { .nr = 3, .new = nv50_dac_new }, 38 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
37 .sor = { .nr = 4, .new = g94_sor_new }, 39 .sor = { .cnt = g94_sor_cnt, .new = g94_sor_new },
38 .pior = { .nr = 3, .new = nv50_pior_new }, 40 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
39}; 41};
40 42
41int 43int
42g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 44g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
43{ 45{
44 return nv50_disp_new_(&g94_disp, device, index, 2, pdisp); 46 return nv50_disp_new_(&g94_disp, device, index, pdisp);
45} 47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index d8765b57180b..794e90982641 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -24,8 +24,12 @@
24#include "nv50.h" 24#include "nv50.h"
25#include "head.h" 25#include "head.h"
26#include "ior.h" 26#include "ior.h"
27#include "channv50.h"
27#include "rootnv50.h" 28#include "rootnv50.h"
28 29
30#include <core/ramht.h>
31#include <subdev/timer.h>
32
29void 33void
30gf119_disp_super(struct work_struct *work) 34gf119_disp_super(struct work_struct *work)
31{ 35{
@@ -164,28 +168,99 @@ gf119_disp_intr(struct nv50_disp *disp)
164 } 168 }
165} 169}
166 170
171void
172gf119_disp_fini(struct nv50_disp *disp)
173{
174 struct nvkm_device *device = disp->base.engine.subdev.device;
175 /* disable all interrupts */
176 nvkm_wr32(device, 0x6100b0, 0x00000000);
177}
178
167int 179int
168gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device, 180gf119_disp_init(struct nv50_disp *disp)
169 int index, struct nvkm_disp **pdisp)
170{ 181{
171 u32 heads = nvkm_rd32(device, 0x022448); 182 struct nvkm_device *device = disp->base.engine.subdev.device;
172 return nv50_disp_new_(func, device, index, heads, pdisp); 183 struct nvkm_head *head;
184 u32 tmp;
185 int i;
186
187 /* The below segments of code copying values from one register to
188 * another appear to inform EVO of the display capabilities or
189 * something similar.
190 */
191
192 /* ... CRTC caps */
193 list_for_each_entry(head, &disp->base.head, head) {
194 const u32 hoff = head->id * 0x800;
195 tmp = nvkm_rd32(device, 0x616104 + hoff);
196 nvkm_wr32(device, 0x6101b4 + hoff, tmp);
197 tmp = nvkm_rd32(device, 0x616108 + hoff);
198 nvkm_wr32(device, 0x6101b8 + hoff, tmp);
199 tmp = nvkm_rd32(device, 0x61610c + hoff);
200 nvkm_wr32(device, 0x6101bc + hoff, tmp);
201 }
202
203 /* ... DAC caps */
204 for (i = 0; i < disp->dac.nr; i++) {
205 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
206 nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
207 }
208
209 /* ... SOR caps */
210 for (i = 0; i < disp->sor.nr; i++) {
211 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
212 nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
213 }
214
215 /* steal display away from vbios, or something like that */
216 if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
217 nvkm_wr32(device, 0x6100ac, 0x00000100);
218 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
219 if (nvkm_msec(device, 2000,
220 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
221 break;
222 ) < 0)
223 return -EBUSY;
224 }
225
226 /* point at display engine memory area (hash table, objects) */
227 nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
228
229 /* enable supervisor interrupts, disable everything else */
230 nvkm_wr32(device, 0x610090, 0x00000000);
231 nvkm_wr32(device, 0x6100a0, 0x00000000);
232 nvkm_wr32(device, 0x6100b0, 0x00000307);
233
234 /* disable underflow reporting, preventing an intermittent issue
235 * on some gk104 boards where the production vbios left this
236 * setting enabled by default.
237 *
238 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
239 */
240 list_for_each_entry(head, &disp->base.head, head) {
241 const u32 hoff = head->id * 0x800;
242 nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
243 }
244
245 return 0;
173} 246}
174 247
175static const struct nv50_disp_func 248static const struct nv50_disp_func
176gf119_disp = { 249gf119_disp = {
250 .init = gf119_disp_init,
251 .fini = gf119_disp_fini,
177 .intr = gf119_disp_intr, 252 .intr = gf119_disp_intr,
178 .intr_error = gf119_disp_intr_error, 253 .intr_error = gf119_disp_intr_error,
179 .uevent = &gf119_disp_chan_uevent, 254 .uevent = &gf119_disp_chan_uevent,
180 .super = gf119_disp_super, 255 .super = gf119_disp_super,
181 .root = &gf119_disp_root_oclass, 256 .root = &gf119_disp_root_oclass,
182 .head.new = gf119_head_new, 257 .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
183 .dac = { .nr = 3, .new = gf119_dac_new }, 258 .dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
184 .sor = { .nr = 4, .new = gf119_sor_new }, 259 .sor = { .cnt = gf119_sor_cnt, .new = gf119_sor_new },
185}; 260};
186 261
187int 262int
188gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 263gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
189{ 264{
190 return gf119_disp_new_(&gf119_disp, device, index, pdisp); 265 return nv50_disp_new_(&gf119_disp, device, index, pdisp);
191} 266}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index e8fe9f315d64..4c3439b1a62d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -28,18 +28,20 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30gk104_disp = { 30gk104_disp = {
31 .init = gf119_disp_init,
32 .fini = gf119_disp_fini,
31 .intr = gf119_disp_intr, 33 .intr = gf119_disp_intr,
32 .intr_error = gf119_disp_intr_error, 34 .intr_error = gf119_disp_intr_error,
33 .uevent = &gf119_disp_chan_uevent, 35 .uevent = &gf119_disp_chan_uevent,
34 .super = gf119_disp_super, 36 .super = gf119_disp_super,
35 .root = &gk104_disp_root_oclass, 37 .root = &gk104_disp_root_oclass,
36 .head.new = gf119_head_new, 38 .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
37 .dac = { .nr = 3, .new = gf119_dac_new }, 39 .dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
38 .sor = { .nr = 4, .new = gk104_sor_new }, 40 .sor = { .cnt = gf119_sor_cnt, .new = gk104_sor_new },
39}; 41};
40 42
41int 43int
42gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 44gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
43{ 45{
44 return gf119_disp_new_(&gk104_disp, device, index, pdisp); 46 return nv50_disp_new_(&gk104_disp, device, index, pdisp);
45} 47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index 769687502e7a..bc6f4750c942 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -28,18 +28,20 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30gk110_disp = { 30gk110_disp = {
31 .init = gf119_disp_init,
32 .fini = gf119_disp_fini,
31 .intr = gf119_disp_intr, 33 .intr = gf119_disp_intr,
32 .intr_error = gf119_disp_intr_error, 34 .intr_error = gf119_disp_intr_error,
33 .uevent = &gf119_disp_chan_uevent, 35 .uevent = &gf119_disp_chan_uevent,
34 .super = gf119_disp_super, 36 .super = gf119_disp_super,
35 .root = &gk110_disp_root_oclass, 37 .root = &gk110_disp_root_oclass,
36 .head.new = gf119_head_new, 38 .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
37 .dac = { .nr = 3, .new = gf119_dac_new }, 39 .dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
38 .sor = { .nr = 4, .new = gk104_sor_new }, 40 .sor = { .cnt = gf119_sor_cnt, .new = gk104_sor_new },
39}; 41};
40 42
41int 43int
42gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 44gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
43{ 45{
44 return gf119_disp_new_(&gk110_disp, device, index, pdisp); 46 return nv50_disp_new_(&gk110_disp, device, index, pdisp);
45} 47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index ede70e5d188e..031cf6b03a76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -28,18 +28,20 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30gm107_disp = { 30gm107_disp = {
31 .init = gf119_disp_init,
32 .fini = gf119_disp_fini,
31 .intr = gf119_disp_intr, 33 .intr = gf119_disp_intr,
32 .intr_error = gf119_disp_intr_error, 34 .intr_error = gf119_disp_intr_error,
33 .uevent = &gf119_disp_chan_uevent, 35 .uevent = &gf119_disp_chan_uevent,
34 .super = gf119_disp_super, 36 .super = gf119_disp_super,
35 .root = &gm107_disp_root_oclass, 37 .root = &gm107_disp_root_oclass,
36 .head.new = gf119_head_new, 38 .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
37 .dac = { .nr = 3, .new = gf119_dac_new }, 39 .dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
38 .sor = { .nr = 4, .new = gm107_sor_new }, 40 .sor = { .cnt = gf119_sor_cnt, .new = gm107_sor_new },
39}; 41};
40 42
41int 43int
42gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 44gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
43{ 45{
44 return gf119_disp_new_(&gm107_disp, device, index, pdisp); 46 return nv50_disp_new_(&gm107_disp, device, index, pdisp);
45} 47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 292d3b5f9704..ec9c33a5162d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -28,18 +28,20 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30gm200_disp = { 30gm200_disp = {
31 .init = gf119_disp_init,
32 .fini = gf119_disp_fini,
31 .intr = gf119_disp_intr, 33 .intr = gf119_disp_intr,
32 .intr_error = gf119_disp_intr_error, 34 .intr_error = gf119_disp_intr_error,
33 .uevent = &gf119_disp_chan_uevent, 35 .uevent = &gf119_disp_chan_uevent,
34 .super = gf119_disp_super, 36 .super = gf119_disp_super,
35 .root = &gm200_disp_root_oclass, 37 .root = &gm200_disp_root_oclass,
36 .head.new = gf119_head_new, 38 .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
37 .dac = { .nr = 3, .new = gf119_dac_new }, 39 .dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
38 .sor = { .nr = 4, .new = gm200_sor_new }, 40 .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
39}; 41};
40 42
41int 43int
42gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 44gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
43{ 45{
44 return gf119_disp_new_(&gm200_disp, device, index, pdisp); 46 return nv50_disp_new_(&gm200_disp, device, index, pdisp);
45} 47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index 39eb98b2c3a2..fd6216684f6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -28,17 +28,19 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30gp100_disp = { 30gp100_disp = {
31 .init = gf119_disp_init,
32 .fini = gf119_disp_fini,
31 .intr = gf119_disp_intr, 33 .intr = gf119_disp_intr,
32 .intr_error = gf119_disp_intr_error, 34 .intr_error = gf119_disp_intr_error,
33 .uevent = &gf119_disp_chan_uevent, 35 .uevent = &gf119_disp_chan_uevent,
34 .super = gf119_disp_super, 36 .super = gf119_disp_super,
35 .root = &gp100_disp_root_oclass, 37 .root = &gp100_disp_root_oclass,
36 .head.new = gf119_head_new, 38 .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
37 .sor = { .nr = 4, .new = gm200_sor_new }, 39 .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
38}; 40};
39 41
40int 42int
41gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 43gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
42{ 44{
43 return gf119_disp_new_(&gp100_disp, device, index, pdisp); 45 return nv50_disp_new_(&gp100_disp, device, index, pdisp);
44} 46}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 91d70fe18275..3468ddec1270 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -24,6 +24,7 @@
24#include "nv50.h" 24#include "nv50.h"
25#include "head.h" 25#include "head.h"
26#include "ior.h" 26#include "ior.h"
27#include "channv50.h"
27#include "rootnv50.h" 28#include "rootnv50.h"
28 29
29static void 30static void
@@ -54,17 +55,19 @@ gp102_disp_intr_error(struct nv50_disp *disp, int chid)
54 55
55static const struct nv50_disp_func 56static const struct nv50_disp_func
56gp102_disp = { 57gp102_disp = {
58 .init = gf119_disp_init,
59 .fini = gf119_disp_fini,
57 .intr = gf119_disp_intr, 60 .intr = gf119_disp_intr,
58 .intr_error = gp102_disp_intr_error, 61 .intr_error = gp102_disp_intr_error,
59 .uevent = &gf119_disp_chan_uevent, 62 .uevent = &gf119_disp_chan_uevent,
60 .super = gf119_disp_super, 63 .super = gf119_disp_super,
61 .root = &gp102_disp_root_oclass, 64 .root = &gp102_disp_root_oclass,
62 .head.new = gf119_head_new, 65 .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
63 .sor = { .nr = 4, .new = gm200_sor_new }, 66 .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
64}; 67};
65 68
66int 69int
67gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 70gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
68{ 71{
69 return gf119_disp_new_(&gp102_disp, device, index, pdisp); 72 return nv50_disp_new_(&gp102_disp, device, index, pdisp);
70} 73}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index bf00c4e3be3a..f80183701f44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -28,18 +28,20 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30gt200_disp = { 30gt200_disp = {
31 .init = nv50_disp_init,
32 .fini = nv50_disp_fini,
31 .intr = nv50_disp_intr, 33 .intr = nv50_disp_intr,
32 .uevent = &nv50_disp_chan_uevent, 34 .uevent = &nv50_disp_chan_uevent,
33 .super = nv50_disp_super, 35 .super = nv50_disp_super,
34 .root = &gt200_disp_root_oclass, 36 .root = &gt200_disp_root_oclass,
35 .head.new = nv50_head_new, 37 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
36 .dac = { .nr = 3, .new = nv50_dac_new }, 38 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
37 .sor = { .nr = 2, .new = g84_sor_new }, 39 .sor = { .cnt = nv50_sor_cnt, .new = g84_sor_new },
38 .pior = { .nr = 3, .new = nv50_pior_new }, 40 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
39}; 41};
40 42
41int 43int
42gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 44gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
43{ 45{
44 return nv50_disp_new_(&gt200_disp, device, index, 2, pdisp); 46 return nv50_disp_new_(&gt200_disp, device, index, pdisp);
45} 47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 2cdd4d7a98d3..7581efc1357e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -28,18 +28,20 @@
28 28
29static const struct nv50_disp_func 29static const struct nv50_disp_func
30gt215_disp = { 30gt215_disp = {
31 .init = nv50_disp_init,
32 .fini = nv50_disp_fini,
31 .intr = nv50_disp_intr, 33 .intr = nv50_disp_intr,
32 .uevent = &nv50_disp_chan_uevent, 34 .uevent = &nv50_disp_chan_uevent,
33 .super = nv50_disp_super, 35 .super = nv50_disp_super,
34 .root = &gt215_disp_root_oclass, 36 .root = &gt215_disp_root_oclass,
35 .head.new = nv50_head_new, 37 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
36 .dac = { .nr = 3, .new = nv50_dac_new }, 38 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
37 .sor = { .nr = 4, .new = gt215_sor_new }, 39 .sor = { .cnt = g94_sor_cnt, .new = gt215_sor_new },
38 .pior = { .nr = 3, .new = nv50_pior_new }, 40 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
39}; 41};
40 42
41int 43int
42gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 44gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
43{ 45{
44 return nv50_disp_new_(&gt215_disp, device, index, 2, pdisp); 46 return nv50_disp_new_(&gt215_disp, device, index, pdisp);
45} 47}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
new file mode 100644
index 000000000000..d0a7e3456da1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -0,0 +1,427 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "nv50.h"
23#include "head.h"
24#include "ior.h"
25#include "channv50.h"
26#include "rootnv50.h"
27
28#include <core/gpuobj.h>
29#include <subdev/timer.h>
30
31static int
32gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
33{
34 struct nvkm_device *device = disp->engine.subdev.device;
35 *pmask = nvkm_rd32(device, 0x610064);
36 return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
37}
38
39static void
40gv100_disp_super(struct work_struct *work)
41{
42 struct nv50_disp *disp =
43 container_of(work, struct nv50_disp, supervisor);
44 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
45 struct nvkm_device *device = subdev->device;
46 struct nvkm_head *head;
47 u32 stat = nvkm_rd32(device, 0x6107a8);
48 u32 mask[4];
49
50 nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super), stat);
51 list_for_each_entry(head, &disp->base.head, head) {
52 mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
53 HEAD_DBG(head, "%08x", mask[head->id]);
54 }
55
56 if (disp->super & 0x00000001) {
57 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
58 nv50_disp_super_1(disp);
59 list_for_each_entry(head, &disp->base.head, head) {
60 if (!(mask[head->id] & 0x00001000))
61 continue;
62 nv50_disp_super_1_0(disp, head);
63 }
64 } else
65 if (disp->super & 0x00000002) {
66 list_for_each_entry(head, &disp->base.head, head) {
67 if (!(mask[head->id] & 0x00001000))
68 continue;
69 nv50_disp_super_2_0(disp, head);
70 }
71 nvkm_outp_route(&disp->base);
72 list_for_each_entry(head, &disp->base.head, head) {
73 if (!(mask[head->id] & 0x00010000))
74 continue;
75 nv50_disp_super_2_1(disp, head);
76 }
77 list_for_each_entry(head, &disp->base.head, head) {
78 if (!(mask[head->id] & 0x00001000))
79 continue;
80 nv50_disp_super_2_2(disp, head);
81 }
82 } else
83 if (disp->super & 0x00000004) {
84 list_for_each_entry(head, &disp->base.head, head) {
85 if (!(mask[head->id] & 0x00001000))
86 continue;
87 nv50_disp_super_3_0(disp, head);
88 }
89 }
90
91 list_for_each_entry(head, &disp->base.head, head)
92 nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
93 nvkm_wr32(device, 0x6107a8, 0x80000000);
94}
95
96static void
97gv100_disp_exception(struct nv50_disp *disp, int chid)
98{
99 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
100 struct nvkm_device *device = subdev->device;
101 u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
102 u32 type = (stat & 0x00007000) >> 12;
103 u32 mthd = (stat & 0x00000fff) << 2;
104 u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
105 u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
106
107 nvkm_error(subdev, "chid %d %08x [type %d mthd %04x] "
108 "data %08x code %08x\n",
109 chid, stat, type, mthd, data, code);
110
111 if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
112 switch (mthd) {
113 case 0x0200:
114 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
115 break;
116 default:
117 break;
118 }
119 }
120
121 nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000);
122}
123
124static void
125gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
126{
127 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
128 struct nvkm_device *device = subdev->device;
129 u32 stat = nvkm_rd32(device, 0x611c30);
130
131 if (stat & 0x00000007) {
132 disp->super = (stat & 0x00000007);
133 queue_work(disp->wq, &disp->supervisor);
134 nvkm_wr32(device, 0x611860, disp->super);
135 stat &= ~0x00000007;
136 }
137
138 /*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to
139 * ACK it, nor does RM appear to bother.
140 */
141 if (stat & 0x00000008)
142 stat &= ~0x00000008;
143
144 if (stat & 0x00000100) {
145 unsigned long wndws = nvkm_rd32(device, 0x611858);
146 unsigned long other = nvkm_rd32(device, 0x61185c);
147 int wndw;
148
149 nvkm_wr32(device, 0x611858, wndws);
150 nvkm_wr32(device, 0x61185c, other);
151
152 /* AWAKEN_OTHER_CORE. */
153 if (other & 0x00000001)
154 nv50_disp_chan_uevent_send(disp, 0);
155
156 /* AWAKEN_WIN_CH(n). */
157 for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
158 nv50_disp_chan_uevent_send(disp, 1 + wndw);
159 }
160 }
161
162 if (stat)
163 nvkm_warn(subdev, "ctrl %08x\n", stat);
164}
165
166static void
167gv100_disp_intr_exc_other(struct nv50_disp *disp)
168{
169 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
170 struct nvkm_device *device = subdev->device;
171 u32 stat = nvkm_rd32(device, 0x611854);
172 unsigned long mask;
173 int head;
174
175 if (stat & 0x00000001) {
176 nvkm_wr32(device, 0x611854, 0x00000001);
177 gv100_disp_exception(disp, 0);
178 stat &= ~0x00000001;
179 }
180
181 if ((mask = (stat & 0x00ff0000) >> 16)) {
182 for_each_set_bit(head, &mask, disp->wndw.nr) {
183 nvkm_wr32(device, 0x611854, 0x00010000 << head);
184 gv100_disp_exception(disp, 73 + head);
185 stat &= ~(0x00010000 << head);
186 }
187 }
188
189 if (stat) {
190 nvkm_warn(subdev, "exception %08x\n", stat);
191 nvkm_wr32(device, 0x611854, stat);
192 }
193}
194
195static void
196gv100_disp_intr_exc_winim(struct nv50_disp *disp)
197{
198 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
199 struct nvkm_device *device = subdev->device;
200 unsigned long stat = nvkm_rd32(device, 0x611850);
201 int wndw;
202
203 for_each_set_bit(wndw, &stat, disp->wndw.nr) {
204 nvkm_wr32(device, 0x611850, BIT(wndw));
205 gv100_disp_exception(disp, 33 + wndw);
206 stat &= ~BIT(wndw);
207 }
208
209 if (stat) {
210 nvkm_warn(subdev, "wimm %08x\n", (u32)stat);
211 nvkm_wr32(device, 0x611850, stat);
212 }
213}
214
215static void
216gv100_disp_intr_exc_win(struct nv50_disp *disp)
217{
218 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
219 struct nvkm_device *device = subdev->device;
220 unsigned long stat = nvkm_rd32(device, 0x61184c);
221 int wndw;
222
223 for_each_set_bit(wndw, &stat, disp->wndw.nr) {
224 nvkm_wr32(device, 0x61184c, BIT(wndw));
225 gv100_disp_exception(disp, 1 + wndw);
226 stat &= ~BIT(wndw);
227 }
228
229 if (stat) {
230 nvkm_warn(subdev, "wndw %08x\n", (u32)stat);
231 nvkm_wr32(device, 0x61184c, stat);
232 }
233}
234
235static void
236gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
237{
238 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
239 struct nvkm_device *device = subdev->device;
240 u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04));
241
242 /* LAST_DATA, LOADV. */
243 if (stat & 0x00000003) {
244 nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003);
245 stat &= ~0x00000003;
246 }
247
248 if (stat & 0x00000004) {
249 nvkm_disp_vblank(&disp->base, head);
250 nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004);
251 stat &= ~0x00000004;
252 }
253
254 if (stat) {
255 nvkm_warn(subdev, "head %08x\n", stat);
256 nvkm_wr32(device, 0x611800 + (head * 0x04), stat);
257 }
258}
259
260static void
261gv100_disp_intr(struct nv50_disp *disp)
262{
263 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
264 struct nvkm_device *device = subdev->device;
265 u32 stat = nvkm_rd32(device, 0x611ec0);
266 unsigned long mask;
267 int head;
268
269 if ((mask = (stat & 0x000000ff))) {
270 for_each_set_bit(head, &mask, 8) {
271 gv100_disp_intr_head_timing(disp, head);
272 stat &= ~BIT(head);
273 }
274 }
275
276 if (stat & 0x00000200) {
277 gv100_disp_intr_exc_win(disp);
278 stat &= ~0x00000200;
279 }
280
281 if (stat & 0x00000400) {
282 gv100_disp_intr_exc_winim(disp);
283 stat &= ~0x00000400;
284 }
285
286 if (stat & 0x00000800) {
287 gv100_disp_intr_exc_other(disp);
288 stat &= ~0x00000800;
289 }
290
291 if (stat & 0x00001000) {
292 gv100_disp_intr_ctrl_disp(disp);
293 stat &= ~0x00001000;
294 }
295
296 if (stat)
297 nvkm_warn(subdev, "intr %08x\n", stat);
298}
299
300static void
301gv100_disp_fini(struct nv50_disp *disp)
302{
303 struct nvkm_device *device = disp->base.engine.subdev.device;
304 nvkm_wr32(device, 0x611db0, 0x00000000);
305}
306
307static int
308gv100_disp_init(struct nv50_disp *disp)
309{
310 struct nvkm_device *device = disp->base.engine.subdev.device;
311 struct nvkm_head *head;
312 int i, j;
313 u32 tmp;
314
315 /* Claim ownership of display. */
316 if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
317 nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
318 if (nvkm_msec(device, 2000,
319 if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
320 break;
321 ) < 0)
322 return -EBUSY;
323 }
324
325 /* Lock pin capabilities. */
326 tmp = nvkm_rd32(device, 0x610068);
327 nvkm_wr32(device, 0x640008, tmp);
328
329 /* SOR capabilities. */
330 for (i = 0; i < disp->sor.nr; i++) {
331 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
332 nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
333 nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
334 }
335
336 /* Head capabilities. */
337 list_for_each_entry(head, &disp->base.head, head) {
338 const int id = head->id;
339
340 /* RG. */
341 tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
342 nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
343
344 /* POSTCOMP. */
345 for (j = 0; j < 6 * 4; j += 4) {
346 tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j);
347 nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp);
348 }
349 }
350
351 /* Window capabilities. */
352 for (i = 0; i < disp->wndw.nr; i++) {
353 nvkm_mask(device, 0x640004, 1 << i, 1 << i);
354 for (j = 0; j < 6 * 4; j += 4) {
355 tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j);
356 nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp);
357 }
358 }
359
360 /* IHUB capabilities. */
361 for (i = 0; i < 4; i++) {
362 tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
363 nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
364 }
365
366 nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
367
368 /* Setup instance memory. */
369 switch (nvkm_memory_target(disp->inst->memory)) {
370 case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
371 case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
372 case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
373 default:
374 break;
375 }
376 nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
377 nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
378
379 /* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
380 nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
381 nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
382
383 /* EXC_OTHER: CURSn, CORE. */
384 nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
385 0x00000001); /* MSK. */
386 nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
387
388 /* EXC_WINIM. */
389 nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
390 nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
391
392 /* EXC_WIN. */
393 nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
394 nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
395
396 /* HEAD_TIMING(n): VBLANK. */
397 list_for_each_entry(head, &disp->base.head, head) {
398 const u32 hoff = head->id * 4;
399 nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
400 nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
401 }
402
403 /* OR. */
404 nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
405 nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
406 return 0;
407}
408
409static const struct nv50_disp_func
410gv100_disp = {
411 .init = gv100_disp_init,
412 .fini = gv100_disp_fini,
413 .intr = gv100_disp_intr,
414 .uevent = &gv100_disp_chan_uevent,
415 .super = gv100_disp_super,
416 .root = &gv100_disp_root_oclass,
417 .wndw = { .cnt = gv100_disp_wndw_cnt },
418 .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
419 .sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new },
420 .ramht_size = 0x2000,
421};
422
423int
424gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
425{
426 return nv50_disp_new_(&gv100_disp, device, index, pdisp);
427}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
new file mode 100644
index 000000000000..6e3c450eaace
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "hdmi.h"
23
24void
25gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
26 u8 rekey, u8 *avi, u8 avi_size, u8 *vendor, u8 vendor_size)
27{
28 struct nvkm_device *device = ior->disp->engine.subdev.device;
29 const u32 ctrl = 0x40000000 * enable |
30 max_ac_packet << 16 |
31 rekey;
32 const u32 hoff = head * 0x800;
33 const u32 hdmi = head * 0x400;
34 struct packed_hdmi_infoframe avi_infoframe;
35 struct packed_hdmi_infoframe vendor_infoframe;
36
37 pack_hdmi_infoframe(&avi_infoframe, avi, avi_size);
38 pack_hdmi_infoframe(&vendor_infoframe, vendor, vendor_size);
39
40 if (!(ctrl & 0x40000000)) {
41 nvkm_mask(device, 0x6165c0 + hoff, 0x40000000, 0x00000000);
42 nvkm_mask(device, 0x6f0100 + hdmi, 0x00000001, 0x00000000);
43 nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
44 nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000000);
45 return;
46 }
47
48 /* AVI InfoFrame (AVI). */
49 nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000000);
50 if (avi_size) {
51 nvkm_wr32(device, 0x6f0008 + hdmi, avi_infoframe.header);
52 nvkm_wr32(device, 0x6f000c + hdmi, avi_infoframe.subpack0_low);
53 nvkm_wr32(device, 0x6f0010 + hdmi, avi_infoframe.subpack0_high);
54 nvkm_wr32(device, 0x6f0014 + hdmi, avi_infoframe.subpack1_low);
55 nvkm_wr32(device, 0x6f0018 + hdmi, avi_infoframe.subpack1_high);
56 nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000001);
57 }
58
59 /* Vendor-specific InfoFrame (VSI). */
60 nvkm_mask(device, 0x6f0100 + hdmi, 0x00010001, 0x00000000);
61 if (vendor_size) {
62 nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header);
63 nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low);
64 nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high);
65 nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000);
66 nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000);
67 nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000);
68 nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000);
69 nvkm_wr32(device, 0x6f0120 + hdmi, 0x00000000);
70 nvkm_wr32(device, 0x6f0124 + hdmi, 0x00000000);
71 nvkm_mask(device, 0x6f0100 + hdmi, 0x00000001, 0x00000001);
72 }
73
74
75 /* General Control (GCP). */
76 nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
77 nvkm_wr32(device, 0x6f00cc + hdmi, 0x00000010);
78 nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
79
80 /* Audio Clock Regeneration (ACR). */
81 nvkm_wr32(device, 0x6f0080 + hdmi, 0x82000000);
82
83 /* NV_PDISP_SF_HDMI_CTRL. */
84 nvkm_mask(device, 0x6165c0 + hoff, 0x401f007f, ctrl);
85}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
index 57030b3a4a75..7d55faf52fcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
@@ -52,6 +52,14 @@ void nv50_head_rgpos(struct nvkm_head *, u16 *, u16 *);
52#define HEAD_DBG(h,f,a...) HEAD_MSG((h), debug, f, ##a) 52#define HEAD_DBG(h,f,a...) HEAD_MSG((h), debug, f, ##a)
53 53
54int nv04_head_new(struct nvkm_disp *, int id); 54int nv04_head_new(struct nvkm_disp *, int id);
55
56int nv50_head_cnt(struct nvkm_disp *, unsigned long *);
55int nv50_head_new(struct nvkm_disp *, int id); 57int nv50_head_new(struct nvkm_disp *, int id);
58
59int gf119_head_cnt(struct nvkm_disp *, unsigned long *);
56int gf119_head_new(struct nvkm_disp *, int id); 60int gf119_head_new(struct nvkm_disp *, int id);
61void gf119_head_rgclk(struct nvkm_head *, int);
62
63int gv100_head_cnt(struct nvkm_disp *, unsigned long *);
64int gv100_head_new(struct nvkm_disp *, int id);
57#endif 65#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
index 9fd7ae331308..e86298b35902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
@@ -39,7 +39,7 @@ gf119_head_vblank_get(struct nvkm_head *head)
39 nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000001); 39 nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000001);
40} 40}
41 41
42static void 42void
43gf119_head_rgclk(struct nvkm_head *head, int div) 43gf119_head_rgclk(struct nvkm_head *head, int div)
44{ 44{
45 struct nvkm_device *device = head->disp->engine.subdev.device; 45 struct nvkm_device *device = head->disp->engine.subdev.device;
@@ -92,8 +92,13 @@ gf119_head = {
92int 92int
93gf119_head_new(struct nvkm_disp *disp, int id) 93gf119_head_new(struct nvkm_disp *disp, int id)
94{ 94{
95 struct nvkm_device *device = disp->engine.subdev.device;
96 if (!(nvkm_rd32(device, 0x612004) & (0x00000001 << id)))
97 return 0;
98 return nvkm_head_new_(&gf119_head, disp, id); 95 return nvkm_head_new_(&gf119_head, disp, id);
99} 96}
97
98int
99gf119_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
100{
101 struct nvkm_device *device = disp->engine.subdev.device;
102 *pmask = nvkm_rd32(device, 0x612004) & 0x0000000f;
103 return nvkm_rd32(device, 0x022448);
104}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c
new file mode 100644
index 000000000000..1a061b42ae5c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "head.h"
23
24static void
25gv100_head_vblank_put(struct nvkm_head *head)
26{
27 struct nvkm_device *device = head->disp->engine.subdev.device;
28 nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000000);
29}
30
31static void
32gv100_head_vblank_get(struct nvkm_head *head)
33{
34 struct nvkm_device *device = head->disp->engine.subdev.device;
35 nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000004);
36}
37
38static void
39gv100_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline)
40{
41 struct nvkm_device *device = head->disp->engine.subdev.device;
42 const u32 hoff = head->id * 0x800;
43 /* vline read locks hline. */
44 *vline = nvkm_rd32(device, 0x616330 + hoff) & 0x0000ffff;
45 *hline = nvkm_rd32(device, 0x616334 + hoff) & 0x0000ffff;
46}
47
48static void
49gv100_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
50{
51 struct nvkm_device *device = head->disp->engine.subdev.device;
52 const u32 hoff = (state == &head->arm) * 0x8000 + head->id * 0x400;
53 u32 data;
54
55 data = nvkm_rd32(device, 0x682064 + hoff);
56 state->vtotal = (data & 0xffff0000) >> 16;
57 state->htotal = (data & 0x0000ffff);
58 data = nvkm_rd32(device, 0x682068 + hoff);
59 state->vsynce = (data & 0xffff0000) >> 16;
60 state->hsynce = (data & 0x0000ffff);
61 data = nvkm_rd32(device, 0x68206c + hoff);
62 state->vblanke = (data & 0xffff0000) >> 16;
63 state->hblanke = (data & 0x0000ffff);
64 data = nvkm_rd32(device, 0x682070 + hoff);
65 state->vblanks = (data & 0xffff0000) >> 16;
66 state->hblanks = (data & 0x0000ffff);
67 state->hz = nvkm_rd32(device, 0x68200c + hoff);
68
69 data = nvkm_rd32(device, 0x682004 + hoff);
70 switch ((data & 0x000000f0) >> 4) {
71 case 5: state->or.depth = 30; break;
72 case 4: state->or.depth = 24; break;
73 case 1: state->or.depth = 18; break;
74 default:
75 state->or.depth = 18;
76 WARN_ON(1);
77 break;
78 }
79}
80
81static const struct nvkm_head_func
82gv100_head = {
83 .state = gv100_head_state,
84 .rgpos = gv100_head_rgpos,
85 .rgclk = gf119_head_rgclk,
86 .vblank_get = gv100_head_vblank_get,
87 .vblank_put = gv100_head_vblank_put,
88};
89
90int
91gv100_head_new(struct nvkm_disp *disp, int id)
92{
93 struct nvkm_device *device = disp->engine.subdev.device;
94 if (!(nvkm_rd32(device, 0x610060) & (0x00000001 << id)))
95 return 0;
96 return nvkm_head_new_(&gv100_head, disp, id);
97}
98
99int
100gv100_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
101{
102 struct nvkm_device *device = disp->engine.subdev.device;
103 *pmask = nvkm_rd32(device, 0x610060) & 0x000000ff;
104 return nvkm_rd32(device, 0x610074) & 0x0000000f;
105}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c
index c80d06d5168f..e7d5c397cd29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c
@@ -90,3 +90,10 @@ nv50_head_new(struct nvkm_disp *disp, int id)
90{ 90{
91 return nvkm_head_new_(&nv50_head, disp, id); 91 return nvkm_head_new_(&nv50_head, disp, id);
92} 92}
93
94int
95nv50_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
96{
97 *pmask = 3;
98 return 2;
99}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 4548c031b937..e0b4e0c5704e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -30,7 +30,7 @@ struct nvkm_ior {
30 UNKNOWN 30 UNKNOWN
31 } proto:3; 31 } proto:3;
32 unsigned link:2; 32 unsigned link:2;
33 unsigned head:4; 33 unsigned head:8;
34 } arm, asy; 34 } arm, asy;
35 35
36 /* Armed DP state. */ 36 /* Armed DP state. */
@@ -106,7 +106,6 @@ nv50_sor_link(struct nvkm_ior *ior)
106 return nv50_ior_base(ior) + ((ior->asy.link == 2) * 0x80); 106 return nv50_ior_base(ior) + ((ior->asy.link == 2) * 0x80);
107} 107}
108 108
109int nv50_sor_new_(const struct nvkm_ior_func *, struct nvkm_disp *, int id);
110void nv50_sor_state(struct nvkm_ior *, struct nvkm_ior_state *); 109void nv50_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
111void nv50_sor_power(struct nvkm_ior *, bool, bool, bool, bool, bool); 110void nv50_sor_power(struct nvkm_ior *, bool, bool, bool, bool, bool);
112void nv50_sor_clock(struct nvkm_ior *); 111void nv50_sor_clock(struct nvkm_ior *);
@@ -122,7 +121,6 @@ void g94_sor_dp_watermark(struct nvkm_ior *, int, u8);
122 121
123void gt215_sor_dp_audio(struct nvkm_ior *, int, bool); 122void gt215_sor_dp_audio(struct nvkm_ior *, int, bool);
124 123
125int gf119_sor_new_(const struct nvkm_ior_func *, struct nvkm_disp *, int id);
126void gf119_sor_state(struct nvkm_ior *, struct nvkm_ior_state *); 124void gf119_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
127void gf119_sor_clock(struct nvkm_ior *); 125void gf119_sor_clock(struct nvkm_ior *);
128int gf119_sor_dp_links(struct nvkm_ior *, struct nvkm_i2c_aux *); 126int gf119_sor_dp_links(struct nvkm_ior *, struct nvkm_i2c_aux *);
@@ -135,10 +133,15 @@ void gf119_sor_dp_watermark(struct nvkm_ior *, int, u8);
135 133
136void gm107_sor_dp_pattern(struct nvkm_ior *, int); 134void gm107_sor_dp_pattern(struct nvkm_ior *, int);
137 135
136void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *);
137int gm200_sor_route_get(struct nvkm_outp *, int *);
138void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int);
139
138void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8); 140void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
139void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8); 141void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
140void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8); 142void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
141void gk104_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8); 143void gk104_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
144void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
142 145
143void gt215_hda_hpd(struct nvkm_ior *, int, bool); 146void gt215_hda_hpd(struct nvkm_ior *, int, bool);
144void gt215_hda_eld(struct nvkm_ior *, u8 *, u8); 147void gt215_hda_eld(struct nvkm_ior *, u8 *, u8);
@@ -153,19 +156,34 @@ void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
153#define IOR_WARN(i,f,a...) IOR_MSG((i), warn, f, ##a) 156#define IOR_WARN(i,f,a...) IOR_MSG((i), warn, f, ##a)
154#define IOR_DBG(i,f,a...) IOR_MSG((i), debug, f, ##a) 157#define IOR_DBG(i,f,a...) IOR_MSG((i), debug, f, ##a)
155 158
159int nv50_dac_cnt(struct nvkm_disp *, unsigned long *);
156int nv50_dac_new(struct nvkm_disp *, int); 160int nv50_dac_new(struct nvkm_disp *, int);
161
162int gf119_dac_cnt(struct nvkm_disp *, unsigned long *);
157int gf119_dac_new(struct nvkm_disp *, int); 163int gf119_dac_new(struct nvkm_disp *, int);
158 164
165int nv50_pior_cnt(struct nvkm_disp *, unsigned long *);
159int nv50_pior_new(struct nvkm_disp *, int); 166int nv50_pior_new(struct nvkm_disp *, int);
160 167
168int nv50_sor_cnt(struct nvkm_disp *, unsigned long *);
161int nv50_sor_new(struct nvkm_disp *, int); 169int nv50_sor_new(struct nvkm_disp *, int);
170
162int g84_sor_new(struct nvkm_disp *, int); 171int g84_sor_new(struct nvkm_disp *, int);
172
173int g94_sor_cnt(struct nvkm_disp *, unsigned long *);
163int g94_sor_new(struct nvkm_disp *, int); 174int g94_sor_new(struct nvkm_disp *, int);
175
164int mcp77_sor_new(struct nvkm_disp *, int); 176int mcp77_sor_new(struct nvkm_disp *, int);
165int gt215_sor_new(struct nvkm_disp *, int); 177int gt215_sor_new(struct nvkm_disp *, int);
166int mcp89_sor_new(struct nvkm_disp *, int); 178int mcp89_sor_new(struct nvkm_disp *, int);
179
180int gf119_sor_cnt(struct nvkm_disp *, unsigned long *);
167int gf119_sor_new(struct nvkm_disp *, int); 181int gf119_sor_new(struct nvkm_disp *, int);
182
168int gk104_sor_new(struct nvkm_disp *, int); 183int gk104_sor_new(struct nvkm_disp *, int);
169int gm107_sor_new(struct nvkm_disp *, int); 184int gm107_sor_new(struct nvkm_disp *, int);
170int gm200_sor_new(struct nvkm_disp *, int); 185int gm200_sor_new(struct nvkm_disp *, int);
186
187int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
188int gv100_sor_new(struct nvkm_disp *, int);
171#endif 189#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index d7e0fbb12bf1..cfdce23ab83a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -26,18 +26,20 @@
26 26
27static const struct nv50_disp_func 27static const struct nv50_disp_func
28mcp77_disp = { 28mcp77_disp = {
29 .init = nv50_disp_init,
30 .fini = nv50_disp_fini,
29 .intr = nv50_disp_intr, 31 .intr = nv50_disp_intr,
30 .uevent = &nv50_disp_chan_uevent, 32 .uevent = &nv50_disp_chan_uevent,
31 .super = nv50_disp_super, 33 .super = nv50_disp_super,
32 .root = &g94_disp_root_oclass, 34 .root = &g94_disp_root_oclass,
33 .head.new = nv50_head_new, 35 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
34 .dac = { .nr = 3, .new = nv50_dac_new }, 36 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
35 .sor = { .nr = 4, .new = mcp77_sor_new }, 37 .sor = { .cnt = g94_sor_cnt, .new = mcp77_sor_new },
36 .pior = { .nr = 3, .new = nv50_pior_new }, 38 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
37}; 39};
38 40
39int 41int
40mcp77_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 42mcp77_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
41{ 43{
42 return nv50_disp_new_(&mcp77_disp, device, index, 2, pdisp); 44 return nv50_disp_new_(&mcp77_disp, device, index, pdisp);
43} 45}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index 7b75c57c12ed..85d9329cfa0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -26,18 +26,20 @@
26 26
27static const struct nv50_disp_func 27static const struct nv50_disp_func
28mcp89_disp = { 28mcp89_disp = {
29 .init = nv50_disp_init,
30 .fini = nv50_disp_fini,
29 .intr = nv50_disp_intr, 31 .intr = nv50_disp_intr,
30 .uevent = &nv50_disp_chan_uevent, 32 .uevent = &nv50_disp_chan_uevent,
31 .super = nv50_disp_super, 33 .super = nv50_disp_super,
32 .root = &gt215_disp_root_oclass, 34 .root = &gt215_disp_root_oclass,
33 .head.new = nv50_head_new, 35 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
34 .dac = { .nr = 3, .new = nv50_dac_new }, 36 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
35 .sor = { .nr = 4, .new = mcp89_sor_new }, 37 .sor = { .cnt = g94_sor_cnt, .new = mcp89_sor_new },
36 .pior = { .nr = 3, .new = nv50_pior_new }, 38 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
37}; 39};
38 40
39int 41int
40mcp89_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 42mcp89_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
41{ 43{
42 return nv50_disp_new_(&mcp89_disp, device, index, 2, pdisp); 44 return nv50_disp_new_(&mcp89_disp, device, index, pdisp);
43} 45}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 0c570dbd3021..f89c7b977aa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -24,11 +24,12 @@
24#include "nv50.h" 24#include "nv50.h"
25#include "head.h" 25#include "head.h"
26#include "ior.h" 26#include "ior.h"
27#include "channv50.h"
27#include "rootnv50.h" 28#include "rootnv50.h"
28 29
29#include <core/client.h> 30#include <core/client.h>
30#include <core/enum.h> 31#include <core/enum.h>
31#include <core/gpuobj.h> 32#include <core/ramht.h>
32#include <subdev/bios.h> 33#include <subdev/bios.h>
33#include <subdev/bios/disp.h> 34#include <subdev/bios/disp.h>
34#include <subdev/bios/init.h> 35#include <subdev/bios/init.h>
@@ -49,29 +50,115 @@ nv50_disp_intr_(struct nvkm_disp *base)
49 disp->func->intr(disp); 50 disp->func->intr(disp);
50} 51}
51 52
53static void
54nv50_disp_fini_(struct nvkm_disp *base)
55{
56 struct nv50_disp *disp = nv50_disp(base);
57 disp->func->fini(disp);
58}
59
60static int
61nv50_disp_init_(struct nvkm_disp *base)
62{
63 struct nv50_disp *disp = nv50_disp(base);
64 return disp->func->init(disp);
65}
66
52static void * 67static void *
53nv50_disp_dtor_(struct nvkm_disp *base) 68nv50_disp_dtor_(struct nvkm_disp *base)
54{ 69{
55 struct nv50_disp *disp = nv50_disp(base); 70 struct nv50_disp *disp = nv50_disp(base);
71
72 nvkm_ramht_del(&disp->ramht);
73 nvkm_gpuobj_del(&disp->inst);
74
56 nvkm_event_fini(&disp->uevent); 75 nvkm_event_fini(&disp->uevent);
57 if (disp->wq) 76 if (disp->wq)
58 destroy_workqueue(disp->wq); 77 destroy_workqueue(disp->wq);
78
59 return disp; 79 return disp;
60} 80}
61 81
82static int
83nv50_disp_oneinit_(struct nvkm_disp *base)
84{
85 struct nv50_disp *disp = nv50_disp(base);
86 const struct nv50_disp_func *func = disp->func;
87 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
88 struct nvkm_device *device = subdev->device;
89 int ret, i;
90
91 if (func->wndw.cnt) {
92 disp->wndw.nr = func->wndw.cnt(&disp->base, &disp->wndw.mask);
93 nvkm_debug(subdev, "Window(s): %d (%08lx)\n",
94 disp->wndw.nr, disp->wndw.mask);
95 }
96
97 disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask);
98 nvkm_debug(subdev, " Head(s): %d (%02lx)\n",
99 disp->head.nr, disp->head.mask);
100 for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
101 ret = func->head.new(&disp->base, i);
102 if (ret)
103 return ret;
104 }
105
106 if (func->dac.cnt) {
107 disp->dac.nr = func->dac.cnt(&disp->base, &disp->dac.mask);
108 nvkm_debug(subdev, " DAC(s): %d (%02lx)\n",
109 disp->dac.nr, disp->dac.mask);
110 for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
111 ret = func->dac.new(&disp->base, i);
112 if (ret)
113 return ret;
114 }
115 }
116
117 if (func->pior.cnt) {
118 disp->pior.nr = func->pior.cnt(&disp->base, &disp->pior.mask);
119 nvkm_debug(subdev, " PIOR(s): %d (%02lx)\n",
120 disp->pior.nr, disp->pior.mask);
121 for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
122 ret = func->pior.new(&disp->base, i);
123 if (ret)
124 return ret;
125 }
126 }
127
128 disp->sor.nr = func->sor.cnt(&disp->base, &disp->sor.mask);
129 nvkm_debug(subdev, " SOR(s): %d (%02lx)\n",
130 disp->sor.nr, disp->sor.mask);
131 for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
132 ret = func->sor.new(&disp->base, i);
133 if (ret)
134 return ret;
135 }
136
137 ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL,
138 &disp->inst);
139 if (ret)
140 return ret;
141
142 return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
143 0x1000, 0, disp->inst, &disp->ramht);
144}
145
62static const struct nvkm_disp_func 146static const struct nvkm_disp_func
63nv50_disp_ = { 147nv50_disp_ = {
64 .dtor = nv50_disp_dtor_, 148 .dtor = nv50_disp_dtor_,
149 .oneinit = nv50_disp_oneinit_,
150 .init = nv50_disp_init_,
151 .fini = nv50_disp_fini_,
65 .intr = nv50_disp_intr_, 152 .intr = nv50_disp_intr_,
66 .root = nv50_disp_root_, 153 .root = nv50_disp_root_,
67}; 154};
68 155
69int 156int
70nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device, 157nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
71 int index, int heads, struct nvkm_disp **pdisp) 158 int index, struct nvkm_disp **pdisp)
72{ 159{
73 struct nv50_disp *disp; 160 struct nv50_disp *disp;
74 int ret, i; 161 int ret;
75 162
76 if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL))) 163 if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
77 return -ENOMEM; 164 return -ENOMEM;
@@ -85,33 +172,11 @@ nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
85 disp->wq = create_singlethread_workqueue("nvkm-disp"); 172 disp->wq = create_singlethread_workqueue("nvkm-disp");
86 if (!disp->wq) 173 if (!disp->wq)
87 return -ENOMEM; 174 return -ENOMEM;
88 INIT_WORK(&disp->supervisor, func->super);
89
90 for (i = 0; func->head.new && i < heads; i++) {
91 ret = func->head.new(&disp->base, i);
92 if (ret)
93 return ret;
94 }
95
96 for (i = 0; func->dac.new && i < func->dac.nr; i++) {
97 ret = func->dac.new(&disp->base, i);
98 if (ret)
99 return ret;
100 }
101
102 for (i = 0; func->pior.new && i < func->pior.nr; i++) {
103 ret = func->pior.new(&disp->base, i);
104 if (ret)
105 return ret;
106 }
107 175
108 for (i = 0; func->sor.new && i < func->sor.nr; i++) { 176 INIT_WORK(&disp->supervisor, func->super);
109 ret = func->sor.new(&disp->base, i);
110 if (ret)
111 return ret;
112 }
113 177
114 return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent); 178 return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan),
179 &disp->uevent);
115} 180}
116 181
117static u32 182static u32
@@ -613,20 +678,96 @@ nv50_disp_intr(struct nv50_disp *disp)
613 } 678 }
614} 679}
615 680
681void
682nv50_disp_fini(struct nv50_disp *disp)
683{
684 struct nvkm_device *device = disp->base.engine.subdev.device;
685 /* disable all interrupts */
686 nvkm_wr32(device, 0x610024, 0x00000000);
687 nvkm_wr32(device, 0x610020, 0x00000000);
688}
689
690int
691nv50_disp_init(struct nv50_disp *disp)
692{
693 struct nvkm_device *device = disp->base.engine.subdev.device;
694 struct nvkm_head *head;
695 u32 tmp;
696 int i;
697
698 /* The below segments of code copying values from one register to
699 * another appear to inform EVO of the display capabilities or
700 * something similar. NFI what the 0x614004 caps are for..
701 */
702 tmp = nvkm_rd32(device, 0x614004);
703 nvkm_wr32(device, 0x610184, tmp);
704
705 /* ... CRTC caps */
706 list_for_each_entry(head, &disp->base.head, head) {
707 tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
708 nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
709 tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
710 nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
711 tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
712 nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
713 tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
714 nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
715 }
716
717 /* ... DAC caps */
718 for (i = 0; i < disp->dac.nr; i++) {
719 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
720 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
721 }
722
723 /* ... SOR caps */
724 for (i = 0; i < disp->sor.nr; i++) {
725 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
726 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
727 }
728
729 /* ... PIOR caps */
730 for (i = 0; i < disp->pior.nr; i++) {
731 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
732 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
733 }
734
735 /* steal display away from vbios, or something like that */
736 if (nvkm_rd32(device, 0x610024) & 0x00000100) {
737 nvkm_wr32(device, 0x610024, 0x00000100);
738 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
739 if (nvkm_msec(device, 2000,
740 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
741 break;
742 ) < 0)
743 return -EBUSY;
744 }
745
746 /* point at display engine memory area (hash table, objects) */
747 nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
748
749 /* enable supervisor interrupts, disable everything else */
750 nvkm_wr32(device, 0x61002c, 0x00000370);
751 nvkm_wr32(device, 0x610028, 0x00000000);
752 return 0;
753}
754
616static const struct nv50_disp_func 755static const struct nv50_disp_func
617nv50_disp = { 756nv50_disp = {
757 .init = nv50_disp_init,
758 .fini = nv50_disp_fini,
618 .intr = nv50_disp_intr, 759 .intr = nv50_disp_intr,
619 .uevent = &nv50_disp_chan_uevent, 760 .uevent = &nv50_disp_chan_uevent,
620 .super = nv50_disp_super, 761 .super = nv50_disp_super,
621 .root = &nv50_disp_root_oclass, 762 .root = &nv50_disp_root_oclass,
622 .head.new = nv50_head_new, 763 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
623 .dac = { .nr = 3, .new = nv50_dac_new }, 764 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
624 .sor = { .nr = 2, .new = nv50_sor_new }, 765 .sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
625 .pior = { .nr = 3, .new = nv50_pior_new }, 766 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
626}; 767};
627 768
628int 769int
629nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) 770nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
630{ 771{
631 return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp); 772 return nv50_disp_new_(&nv50_disp, device, index, pdisp);
632} 773}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index eb0b8acb1c5b..8580382ab248 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -16,14 +16,26 @@ struct nv50_disp {
16 struct nvkm_event uevent; 16 struct nvkm_event uevent;
17 17
18 struct { 18 struct {
19 unsigned long mask;
20 int nr;
21 } wndw, head, dac;
22
23 struct {
24 unsigned long mask;
25 int nr;
19 u32 lvdsconf; 26 u32 lvdsconf;
20 } sor; 27 } sor;
21 28
22 struct { 29 struct {
30 unsigned long mask;
31 int nr;
23 u8 type[3]; 32 u8 type[3];
24 } pior; 33 } pior;
25 34
26 struct nv50_disp_chan *chan[21]; 35 struct nvkm_gpuobj *inst;
36 struct nvkm_ramht *ramht;
37
38 struct nv50_disp_chan *chan[81];
27}; 39};
28 40
29void nv50_disp_super_1(struct nv50_disp *); 41void nv50_disp_super_1(struct nv50_disp *);
@@ -34,11 +46,11 @@ void nv50_disp_super_2_2(struct nv50_disp *, struct nvkm_head *);
34void nv50_disp_super_3_0(struct nv50_disp *, struct nvkm_head *); 46void nv50_disp_super_3_0(struct nv50_disp *, struct nvkm_head *);
35 47
36int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *, 48int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
37 int index, int heads, struct nvkm_disp **); 49 int index, struct nvkm_disp **);
38int gf119_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
39 int index, struct nvkm_disp **);
40 50
41struct nv50_disp_func { 51struct nv50_disp_func {
52 int (*init)(struct nv50_disp *);
53 void (*fini)(struct nv50_disp *);
42 void (*intr)(struct nv50_disp *); 54 void (*intr)(struct nv50_disp *);
43 void (*intr_error)(struct nv50_disp *, int chid); 55 void (*intr_error)(struct nv50_disp *, int chid);
44 56
@@ -48,28 +60,20 @@ struct nv50_disp_func {
48 const struct nvkm_disp_oclass *root; 60 const struct nvkm_disp_oclass *root;
49 61
50 struct { 62 struct {
63 int (*cnt)(struct nvkm_disp *, unsigned long *mask);
51 int (*new)(struct nvkm_disp *, int id); 64 int (*new)(struct nvkm_disp *, int id);
52 } head; 65 } wndw, head, dac, sor, pior;
53 66
54 struct { 67 u16 ramht_size;
55 int nr;
56 int (*new)(struct nvkm_disp *, int id);
57 } dac;
58
59 struct {
60 int nr;
61 int (*new)(struct nvkm_disp *, int id);
62 } sor;
63
64 struct {
65 int nr;
66 int (*new)(struct nvkm_disp *, int id);
67 } pior;
68}; 68};
69 69
70int nv50_disp_init(struct nv50_disp *);
71void nv50_disp_fini(struct nv50_disp *);
70void nv50_disp_intr(struct nv50_disp *); 72void nv50_disp_intr(struct nv50_disp *);
71void nv50_disp_super(struct work_struct *); 73void nv50_disp_super(struct work_struct *);
72 74
75int gf119_disp_init(struct nv50_disp *);
76void gf119_disp_fini(struct nv50_disp *);
73void gf119_disp_intr(struct nv50_disp *); 77void gf119_disp_intr(struct nv50_disp *);
74void gf119_disp_super(struct work_struct *); 78void gf119_disp_super(struct work_struct *);
75void gf119_disp_intr_error(struct nv50_disp *, int); 79void gf119_disp_intr_error(struct nv50_disp *, int);
@@ -77,4 +81,12 @@ void gf119_disp_intr_error(struct nv50_disp *, int);
77void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *); 81void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
78void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *); 82void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
79void nv50_disp_update_sppll1(struct nv50_disp *); 83void nv50_disp_update_sppll1(struct nv50_disp *);
84
85extern const struct nvkm_event_func nv50_disp_chan_uevent;
86int nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
87 struct nvkm_notify *);
88void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
89
90extern const struct nvkm_event_func gf119_disp_chan_uevent;
91extern const struct nvkm_event_func gv100_disp_chan_uevent;
80#endif 92#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
index 1f9fd3403f07..1ae0bcfc89b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -22,16 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "channv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <nvif/class.h> 26int
28 27gf119_disp_oimm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
29const struct nv50_disp_pioc_oclass 28 struct nv50_disp *disp, struct nvkm_object **pobject)
30gf119_disp_oimm_oclass = { 29{
31 .base.oclass = GF110_DISP_OVERLAY, 30 return nv50_disp_oimm_new_(&gf119_disp_pioc_func, disp, 9, 9,
32 .base.minver = 0, 31 oclass, argv, argc, pobject);
33 .base.maxver = 0, 32}
34 .ctor = nv50_disp_oimm_new,
35 .func = &gf119_disp_pioc_func,
36 .chid = { 9, 9 },
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
deleted file mode 100644
index 0c09fe85e952..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_pioc_oclass
30gk104_disp_oimm_oclass = {
31 .base.oclass = GK104_DISP_OVERLAY,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_oimm_new,
35 .func = &gf119_disp_pioc_func,
36 .chid = { 9, 9 },
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
index abf82365c671..30ffb1008505 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
@@ -22,16 +22,11 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "channv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <nvif/class.h> 26int
28 27gp102_disp_oimm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
29const struct nv50_disp_pioc_oclass 28 struct nv50_disp *disp, struct nvkm_object **pobject)
30gp102_disp_oimm_oclass = { 29{
31 .base.oclass = GK104_DISP_OVERLAY, 30 return nv50_disp_oimm_new_(&gf119_disp_pioc_func, disp, 9, 13,
32 .base.minver = 0, 31 oclass, argv, argc, pobject);
33 .base.maxver = 0, 32}
34 .ctor = nv50_disp_oimm_new,
35 .func = &gf119_disp_pioc_func,
36 .chid = { 9, 13 },
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
deleted file mode 100644
index 1281db28aebd..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_pioc_oclass
30gt215_disp_oimm_oclass = {
31 .base.oclass = GT214_DISP_OVERLAY,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_oimm_new,
35 .func = &nv50_disp_pioc_func,
36 .chid = { 5, 5 },
37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index f3b0fa2c5924..0db99bfe9db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -23,30 +23,26 @@
23 */ 23 */
24#include "channv50.h" 24#include "channv50.h"
25#include "head.h" 25#include "head.h"
26#include "rootnv50.h"
27 26
28#include <core/client.h> 27#include <core/client.h>
29 28
30#include <nvif/class.h>
31#include <nvif/cl507b.h> 29#include <nvif/cl507b.h>
32#include <nvif/unpack.h> 30#include <nvif/unpack.h>
33 31
34int 32int
35nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, 33nv50_disp_oimm_new_(const struct nv50_disp_chan_func *func,
36 const struct nv50_disp_chan_mthd *mthd, 34 struct nv50_disp *disp, int ctrl, int user,
37 struct nv50_disp_root *root, int ctrl, int user, 35 const struct nvkm_oclass *oclass, void *argv, u32 argc,
38 const struct nvkm_oclass *oclass, void *data, u32 size, 36 struct nvkm_object **pobject)
39 struct nvkm_object **pobject)
40{ 37{
41 union { 38 union {
42 struct nv50_disp_overlay_v0 v0; 39 struct nv50_disp_overlay_v0 v0;
43 } *args = data; 40 } *args = argv;
44 struct nvkm_object *parent = oclass->parent; 41 struct nvkm_object *parent = oclass->parent;
45 struct nv50_disp *disp = root->disp;
46 int head, ret = -ENOSYS; 42 int head, ret = -ENOSYS;
47 43
48 nvif_ioctl(parent, "create disp overlay size %d\n", size); 44 nvif_ioctl(parent, "create disp overlay size %d\n", argc);
49 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 45 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
50 nvif_ioctl(parent, "create disp overlay vers %d head %d\n", 46 nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
51 args->v0.version, args->v0.head); 47 args->v0.version, args->v0.head);
52 if (!nvkm_head_find(&disp->base, args->v0.head)) 48 if (!nvkm_head_find(&disp->base, args->v0.head))
@@ -55,16 +51,14 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
55 } else 51 } else
56 return ret; 52 return ret;
57 53
58 return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, 54 return nv50_disp_chan_new_(func, NULL, disp, ctrl + head, user + head,
59 head, oclass, pobject); 55 head, oclass, pobject);
60} 56}
61 57
62const struct nv50_disp_pioc_oclass 58int
63nv50_disp_oimm_oclass = { 59nv50_disp_oimm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
64 .base.oclass = NV50_DISP_OVERLAY, 60 struct nv50_disp *disp, struct nvkm_object **pobject)
65 .base.minver = 0, 61{
66 .base.maxver = 0, 62 return nv50_disp_oimm_new_(&nv50_disp_pioc_func, disp, 5, 5,
67 .ctor = nv50_disp_oimm_new, 63 oclass, argv, argc, pobject);
68 .func = &nv50_disp_pioc_func, 64}
69 .chid = { 5, 5 },
70};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
index db6234eebc61..31b915d48699 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29static const struct nv50_disp_mthd_list 26static const struct nv50_disp_mthd_list
30g84_disp_ovly_mthd_base = { 27g84_disp_ovly_mthd_base = {
@@ -54,8 +51,8 @@ g84_disp_ovly_mthd_base = {
54 } 51 }
55}; 52};
56 53
57const struct nv50_disp_chan_mthd 54static const struct nv50_disp_chan_mthd
58g84_disp_ovly_chan_mthd = { 55g84_disp_ovly_mthd = {
59 .name = "Overlay", 56 .name = "Overlay",
60 .addr = 0x000540, 57 .addr = 0x000540,
61 .prev = 0x000004, 58 .prev = 0x000004,
@@ -65,13 +62,10 @@ g84_disp_ovly_chan_mthd = {
65 } 62 }
66}; 63};
67 64
68const struct nv50_disp_dmac_oclass 65int
69g84_disp_ovly_oclass = { 66g84_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
70 .base.oclass = G82_DISP_OVERLAY_CHANNEL_DMA, 67 struct nv50_disp *disp, struct nvkm_object **pobject)
71 .base.minver = 0, 68{
72 .base.maxver = 0, 69 return nv50_disp_ovly_new_(&nv50_disp_dmac_func, &g84_disp_ovly_mthd,
73 .ctor = nv50_disp_ovly_new, 70 disp, 3, oclass, argv, argc, pobject);
74 .func = &nv50_disp_dmac_func, 71}
75 .mthd = &g84_disp_ovly_chan_mthd,
76 .chid = 3,
77};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
index 5985879abd23..83fd534c44da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29static const struct nv50_disp_mthd_list 26static const struct nv50_disp_mthd_list
30gf119_disp_ovly_mthd_base = { 27gf119_disp_ovly_mthd_base = {
@@ -79,7 +76,7 @@ gf119_disp_ovly_mthd_base = {
79}; 76};
80 77
81static const struct nv50_disp_chan_mthd 78static const struct nv50_disp_chan_mthd
82gf119_disp_ovly_chan_mthd = { 79gf119_disp_ovly_mthd = {
83 .name = "Overlay", 80 .name = "Overlay",
84 .addr = 0x001000, 81 .addr = 0x001000,
85 .prev = -0x020000, 82 .prev = -0x020000,
@@ -89,13 +86,10 @@ gf119_disp_ovly_chan_mthd = {
89 } 86 }
90}; 87};
91 88
92const struct nv50_disp_dmac_oclass 89int
93gf119_disp_ovly_oclass = { 90gf119_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
94 .base.oclass = GF110_DISP_OVERLAY_CONTROL_DMA, 91 struct nv50_disp *disp, struct nvkm_object **pobject)
95 .base.minver = 0, 92{
96 .base.maxver = 0, 93 return nv50_disp_ovly_new_(&gf119_disp_dmac_func, &gf119_disp_ovly_mthd,
97 .ctor = nv50_disp_ovly_new, 94 disp, 5, oclass, argv, argc, pobject);
98 .func = &gf119_disp_dmac_func, 95}
99 .mthd = &gf119_disp_ovly_chan_mthd,
100 .chid = 5,
101};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
index 2f0220b39f34..a7acacbc92c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29static const struct nv50_disp_mthd_list 26static const struct nv50_disp_mthd_list
30gk104_disp_ovly_mthd_base = { 27gk104_disp_ovly_mthd_base = {
@@ -81,7 +78,7 @@ gk104_disp_ovly_mthd_base = {
81}; 78};
82 79
83const struct nv50_disp_chan_mthd 80const struct nv50_disp_chan_mthd
84gk104_disp_ovly_chan_mthd = { 81gk104_disp_ovly_mthd = {
85 .name = "Overlay", 82 .name = "Overlay",
86 .addr = 0x001000, 83 .addr = 0x001000,
87 .prev = -0x020000, 84 .prev = -0x020000,
@@ -91,13 +88,10 @@ gk104_disp_ovly_chan_mthd = {
91 } 88 }
92}; 89};
93 90
94const struct nv50_disp_dmac_oclass 91int
95gk104_disp_ovly_oclass = { 92gk104_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
96 .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA, 93 struct nv50_disp *disp, struct nvkm_object **pobject)
97 .base.minver = 0, 94{
98 .base.maxver = 0, 95 return nv50_disp_ovly_new_(&gf119_disp_dmac_func, &gk104_disp_ovly_mthd,
99 .ctor = nv50_disp_ovly_new, 96 disp, 5, oclass, argv, argc, pobject);
100 .func = &gf119_disp_dmac_func, 97}
101 .mthd = &gk104_disp_ovly_chan_mthd,
102 .chid = 5,
103};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
index 589bd2f12b41..e0eca6ea914c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
@@ -21,18 +21,12 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26 25
27#include <nvif/class.h> 26int
28 27gp102_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
29const struct nv50_disp_dmac_oclass 28 struct nv50_disp *disp, struct nvkm_object **pobject)
30gp102_disp_ovly_oclass = { 29{
31 .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA, 30 return nv50_disp_ovly_new_(&gp102_disp_dmac_func, &gk104_disp_ovly_mthd,
32 .base.minver = 0, 31 disp, 5, oclass, argv, argc, pobject);
33 .base.maxver = 0, 32}
34 .ctor = nv50_disp_ovly_new,
35 .func = &gp102_disp_dmac_func,
36 .mthd = &gk104_disp_ovly_chan_mthd,
37 .chid = 5,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
index f858053db83d..dc60cd00dc16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
@@ -21,10 +21,7 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28 25
29static const struct nv50_disp_mthd_list 26static const struct nv50_disp_mthd_list
30gt200_disp_ovly_mthd_base = { 27gt200_disp_ovly_mthd_base = {
@@ -58,7 +55,7 @@ gt200_disp_ovly_mthd_base = {
58}; 55};
59 56
60static const struct nv50_disp_chan_mthd 57static const struct nv50_disp_chan_mthd
61gt200_disp_ovly_chan_mthd = { 58gt200_disp_ovly_mthd = {
62 .name = "Overlay", 59 .name = "Overlay",
63 .addr = 0x000540, 60 .addr = 0x000540,
64 .prev = 0x000004, 61 .prev = 0x000004,
@@ -68,13 +65,10 @@ gt200_disp_ovly_chan_mthd = {
68 } 65 }
69}; 66};
70 67
71const struct nv50_disp_dmac_oclass 68int
72gt200_disp_ovly_oclass = { 69gt200_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
73 .base.oclass = GT200_DISP_OVERLAY_CHANNEL_DMA, 70 struct nv50_disp *disp, struct nvkm_object **pobject)
74 .base.minver = 0, 71{
75 .base.maxver = 0, 72 return nv50_disp_ovly_new_(&nv50_disp_dmac_func, &gt200_disp_ovly_mthd,
76 .ctor = nv50_disp_ovly_new, 73 disp, 3, oclass, argv, argc, pobject);
77 .func = &nv50_disp_dmac_func, 74}
78 .mthd = &gt200_disp_ovly_chan_mthd,
79 .chid = 3,
80};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
deleted file mode 100644
index c947e1e16a37..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gt215_disp_ovly_oclass = {
31 .base.oclass = GT214_DISP_OVERLAY_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_ovly_new,
35 .func = &nv50_disp_dmac_func,
36 .mthd = &g84_disp_ovly_chan_mthd,
37 .chid = 3,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
index 9ebaaa6e9e33..6974c12c4518 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -21,33 +21,30 @@
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "dmacnv50.h" 24#include "channv50.h"
25#include "head.h" 25#include "head.h"
26#include "rootnv50.h"
27 26
28#include <core/client.h> 27#include <core/client.h>
29 28
30#include <nvif/class.h>
31#include <nvif/cl507e.h> 29#include <nvif/cl507e.h>
32#include <nvif/unpack.h> 30#include <nvif/unpack.h>
33 31
34int 32int
35nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func, 33nv50_disp_ovly_new_(const struct nv50_disp_chan_func *func,
36 const struct nv50_disp_chan_mthd *mthd, 34 const struct nv50_disp_chan_mthd *mthd,
37 struct nv50_disp_root *root, int chid, 35 struct nv50_disp *disp, int chid,
38 const struct nvkm_oclass *oclass, void *data, u32 size, 36 const struct nvkm_oclass *oclass, void *argv, u32 argc,
39 struct nvkm_object **pobject) 37 struct nvkm_object **pobject)
40{ 38{
41 union { 39 union {
42 struct nv50_disp_overlay_channel_dma_v0 v0; 40 struct nv50_disp_overlay_channel_dma_v0 v0;
43 } *args = data; 41 } *args = argv;
44 struct nvkm_object *parent = oclass->parent; 42 struct nvkm_object *parent = oclass->parent;
45 struct nv50_disp *disp = root->disp;
46 int head, ret = -ENOSYS; 43 int head, ret = -ENOSYS;
47 u64 push; 44 u64 push;
48 45
49 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size); 46 nvif_ioctl(parent, "create disp overlay channel dma size %d\n", argc);
50 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 47 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
51 nvif_ioctl(parent, "create disp overlay channel dma vers %d " 48 nvif_ioctl(parent, "create disp overlay channel dma vers %d "
52 "pushbuf %016llx head %d\n", 49 "pushbuf %016llx head %d\n",
53 args->v0.version, args->v0.pushbuf, args->v0.head); 50 args->v0.version, args->v0.pushbuf, args->v0.head);
@@ -58,7 +55,7 @@ nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
58 } else 55 } else
59 return ret; 56 return ret;
60 57
61 return nv50_disp_dmac_new_(func, mthd, root, chid + head, 58 return nv50_disp_dmac_new_(func, mthd, disp, chid + head,
62 head, push, oclass, pobject); 59 head, push, oclass, pobject);
63} 60}
64 61
@@ -91,7 +88,7 @@ nv50_disp_ovly_mthd_base = {
91}; 88};
92 89
93static const struct nv50_disp_chan_mthd 90static const struct nv50_disp_chan_mthd
94nv50_disp_ovly_chan_mthd = { 91nv50_disp_ovly_mthd = {
95 .name = "Overlay", 92 .name = "Overlay",
96 .addr = 0x000540, 93 .addr = 0x000540,
97 .prev = 0x000004, 94 .prev = 0x000004,
@@ -101,13 +98,10 @@ nv50_disp_ovly_chan_mthd = {
101 } 98 }
102}; 99};
103 100
104const struct nv50_disp_dmac_oclass 101int
105nv50_disp_ovly_oclass = { 102nv50_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
106 .base.oclass = NV50_DISP_OVERLAY_CHANNEL_DMA, 103 struct nv50_disp *disp, struct nvkm_object **pobject)
107 .base.minver = 0, 104{
108 .base.maxver = 0, 105 return nv50_disp_ovly_new_(&nv50_disp_dmac_func, &nv50_disp_ovly_mthd,
109 .ctor = nv50_disp_ovly_new, 106 disp, 3, oclass, argv, argc, pobject);
110 .func = &nv50_disp_dmac_func, 107}
111 .mthd = &nv50_disp_ovly_chan_mthd,
112 .chid = 3,
113};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index 0abaa6431943..5296e7bee813 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -29,7 +29,7 @@
29static void 29static void
30gf119_disp_pioc_fini(struct nv50_disp_chan *chan) 30gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
31{ 31{
32 struct nv50_disp *disp = chan->root->disp; 32 struct nv50_disp *disp = chan->disp;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device; 34 struct nvkm_device *device = subdev->device;
35 int ctrl = chan->chid.ctrl; 35 int ctrl = chan->chid.ctrl;
@@ -43,24 +43,17 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
43 nvkm_error(subdev, "ch %d fini: %08x\n", user, 43 nvkm_error(subdev, "ch %d fini: %08x\n", user,
44 nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); 44 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
45 } 45 }
46
47 /* disable error reporting and completion notification */
48 nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
49 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
50} 46}
51 47
52static int 48static int
53gf119_disp_pioc_init(struct nv50_disp_chan *chan) 49gf119_disp_pioc_init(struct nv50_disp_chan *chan)
54{ 50{
55 struct nv50_disp *disp = chan->root->disp; 51 struct nv50_disp *disp = chan->disp;
56 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 52 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
57 struct nvkm_device *device = subdev->device; 53 struct nvkm_device *device = subdev->device;
58 int ctrl = chan->chid.ctrl; 54 int ctrl = chan->chid.ctrl;
59 int user = chan->chid.user; 55 int user = chan->chid.user;
60 56
61 /* enable error reporting */
62 nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
63
64 /* activate channel */ 57 /* activate channel */
65 nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001); 58 nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
66 if (nvkm_msec(device, 2000, 59 if (nvkm_msec(device, 2000,
@@ -80,4 +73,6 @@ const struct nv50_disp_chan_func
80gf119_disp_pioc_func = { 73gf119_disp_pioc_func = {
81 .init = gf119_disp_pioc_init, 74 .init = gf119_disp_pioc_init,
82 .fini = gf119_disp_pioc_fini, 75 .fini = gf119_disp_pioc_fini,
76 .intr = gf119_disp_chan_intr,
77 .user = nv50_disp_chan_user,
83}; 78};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 0211e0e8a35f..4faed6fce682 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -29,7 +29,7 @@
29static void 29static void
30nv50_disp_pioc_fini(struct nv50_disp_chan *chan) 30nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
31{ 31{
32 struct nv50_disp *disp = chan->root->disp; 32 struct nv50_disp *disp = chan->disp;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device; 34 struct nvkm_device *device = subdev->device;
35 int ctrl = chan->chid.ctrl; 35 int ctrl = chan->chid.ctrl;
@@ -48,7 +48,7 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
48static int 48static int
49nv50_disp_pioc_init(struct nv50_disp_chan *chan) 49nv50_disp_pioc_init(struct nv50_disp_chan *chan)
50{ 50{
51 struct nv50_disp *disp = chan->root->disp; 51 struct nv50_disp *disp = chan->disp;
52 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 52 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
53 struct nvkm_device *device = subdev->device; 53 struct nvkm_device *device = subdev->device;
54 int ctrl = chan->chid.ctrl; 54 int ctrl = chan->chid.ctrl;
@@ -82,4 +82,6 @@ const struct nv50_disp_chan_func
82nv50_disp_pioc_func = { 82nv50_disp_pioc_func = {
83 .init = nv50_disp_pioc_init, 83 .init = nv50_disp_pioc_init,
84 .fini = nv50_disp_pioc_fini, 84 .fini = nv50_disp_pioc_fini,
85 .intr = nv50_disp_chan_intr,
86 .user = nv50_disp_chan_user,
85}; 87};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index 99b3b9050635..e997a207f546 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -127,8 +127,13 @@ nv50_pior = {
127int 127int
128nv50_pior_new(struct nvkm_disp *disp, int id) 128nv50_pior_new(struct nvkm_disp *disp, int id)
129{ 129{
130 struct nvkm_device *device = disp->engine.subdev.device;
131 if (!(nvkm_rd32(device, 0x610184) & (0x10000000 << id)))
132 return 0;
133 return nvkm_ior_new_(&nv50_pior, disp, PIOR, id); 130 return nvkm_ior_new_(&nv50_pior, disp, PIOR, id);
134} 131}
132
133int
134nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask)
135{
136 struct nvkm_device *device = disp->engine.subdev.device;
137 *pmask = (nvkm_rd32(device, 0x610184) & 0x70000000) >> 28;
138 return 3;
139}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 6c9bfff6d043..ef66c5f38ad5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -12,6 +12,9 @@ void nvkm_disp_vblank(struct nvkm_disp *, int head);
12 12
13struct nvkm_disp_func { 13struct nvkm_disp_func {
14 void *(*dtor)(struct nvkm_disp *); 14 void *(*dtor)(struct nvkm_disp *);
15 int (*oneinit)(struct nvkm_disp *);
16 int (*init)(struct nvkm_disp *);
17 void (*fini)(struct nvkm_disp *);
15 void (*intr)(struct nvkm_disp *); 18 void (*intr)(struct nvkm_disp *);
16 19
17 const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *); 20 const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index 721e4f74d1fc..1ed371fd7ddf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30g84_disp_root = { 30g84_disp_root = {
31 .init = nv50_disp_root_init, 31 .user = {
32 .fini = nv50_disp_root_fini, 32 {{0,0,G82_DISP_CURSOR }, nv50_disp_curs_new },
33 .dmac = { 33 {{0,0,G82_DISP_OVERLAY }, nv50_disp_oimm_new },
34 &g84_disp_core_oclass, 34 {{0,0,G82_DISP_BASE_CHANNEL_DMA }, g84_disp_base_new },
35 &g84_disp_base_oclass, 35 {{0,0,G82_DISP_CORE_CHANNEL_DMA }, g84_disp_core_new },
36 &g84_disp_ovly_oclass, 36 {{0,0,G82_DISP_OVERLAY_CHANNEL_DMA}, g84_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &g84_disp_oimm_oclass,
40 &g84_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
index 9493f6edf62b..ef579eb00238 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30g94_disp_root = { 30g94_disp_root = {
31 .init = nv50_disp_root_init, 31 .user = {
32 .fini = nv50_disp_root_fini, 32 {{0,0, G82_DISP_CURSOR }, nv50_disp_curs_new },
33 .dmac = { 33 {{0,0, G82_DISP_OVERLAY }, nv50_disp_oimm_new },
34 &g94_disp_core_oclass, 34 {{0,0,GT200_DISP_BASE_CHANNEL_DMA }, g84_disp_base_new },
35 &gt200_disp_base_oclass, 35 {{0,0,GT206_DISP_CORE_CHANNEL_DMA }, g94_disp_core_new },
36 &gt200_disp_ovly_oclass, 36 {{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &g84_disp_oimm_oclass,
40 &g84_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index 333c8424b413..fe011165dc02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -22,104 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "head.h" 25#include "channv50.h"
26#include "dmacnv50.h"
27
28#include <core/ramht.h>
29#include <subdev/timer.h>
30 26
31#include <nvif/class.h> 27#include <nvif/class.h>
32 28
33void
34gf119_disp_root_fini(struct nv50_disp_root *root)
35{
36 struct nvkm_device *device = root->disp->base.engine.subdev.device;
37 /* disable all interrupts */
38 nvkm_wr32(device, 0x6100b0, 0x00000000);
39}
40
41int
42gf119_disp_root_init(struct nv50_disp_root *root)
43{
44 struct nv50_disp *disp = root->disp;
45 struct nvkm_head *head;
46 struct nvkm_device *device = disp->base.engine.subdev.device;
47 u32 tmp;
48 int i;
49
50 /* The below segments of code copying values from one register to
51 * another appear to inform EVO of the display capabilities or
52 * something similar.
53 */
54
55 /* ... CRTC caps */
56 list_for_each_entry(head, &disp->base.head, head) {
57 const u32 hoff = head->id * 0x800;
58 tmp = nvkm_rd32(device, 0x616104 + hoff);
59 nvkm_wr32(device, 0x6101b4 + hoff, tmp);
60 tmp = nvkm_rd32(device, 0x616108 + hoff);
61 nvkm_wr32(device, 0x6101b8 + hoff, tmp);
62 tmp = nvkm_rd32(device, 0x61610c + hoff);
63 nvkm_wr32(device, 0x6101bc + hoff, tmp);
64 }
65
66 /* ... DAC caps */
67 for (i = 0; i < disp->func->dac.nr; i++) {
68 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
69 nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
70 }
71
72 /* ... SOR caps */
73 for (i = 0; i < disp->func->sor.nr; i++) {
74 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
75 nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
76 }
77
78 /* steal display away from vbios, or something like that */
79 if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
80 nvkm_wr32(device, 0x6100ac, 0x00000100);
81 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
82 if (nvkm_msec(device, 2000,
83 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
84 break;
85 ) < 0)
86 return -EBUSY;
87 }
88
89 /* point at display engine memory area (hash table, objects) */
90 nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
91
92 /* enable supervisor interrupts, disable everything else */
93 nvkm_wr32(device, 0x610090, 0x00000000);
94 nvkm_wr32(device, 0x6100a0, 0x00000000);
95 nvkm_wr32(device, 0x6100b0, 0x00000307);
96
97 /* disable underflow reporting, preventing an intermittent issue
98 * on some gk104 boards where the production vbios left this
99 * setting enabled by default.
100 *
101 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
102 */
103 list_for_each_entry(head, &disp->base.head, head) {
104 const u32 hoff = head->id * 0x800;
105 nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
106 }
107
108 return 0;
109}
110
111static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
112gf119_disp_root = { 30gf119_disp_root = {
113 .init = gf119_disp_root_init, 31 .user = {
114 .fini = gf119_disp_root_fini, 32 {{0,0,GF110_DISP_CURSOR }, gf119_disp_curs_new },
115 .dmac = { 33 {{0,0,GF110_DISP_OVERLAY }, gf119_disp_oimm_new },
116 &gf119_disp_core_oclass, 34 {{0,0,GF110_DISP_BASE_CHANNEL_DMA }, gf119_disp_base_new },
117 &gf119_disp_base_oclass, 35 {{0,0,GF110_DISP_CORE_CHANNEL_DMA }, gf119_disp_core_new },
118 &gf119_disp_ovly_oclass, 36 {{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, gf119_disp_ovly_new },
119 }, 37 {}
120 .pioc = {
121 &gf119_disp_oimm_oclass,
122 &gf119_disp_curs_oclass,
123 }, 38 },
124}; 39};
125 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
index 0bfdb1d1c6ab..9e8ffd348b50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30gk104_disp_root = { 30gk104_disp_root = {
31 .init = gf119_disp_root_init, 31 .user = {
32 .fini = gf119_disp_root_fini, 32 {{0,0,GK104_DISP_CURSOR }, gf119_disp_curs_new },
33 .dmac = { 33 {{0,0,GK104_DISP_OVERLAY }, gf119_disp_oimm_new },
34 &gk104_disp_core_oclass, 34 {{0,0,GK104_DISP_BASE_CHANNEL_DMA }, gf119_disp_base_new },
35 &gk104_disp_base_oclass, 35 {{0,0,GK104_DISP_CORE_CHANNEL_DMA }, gk104_disp_core_new },
36 &gk104_disp_ovly_oclass, 36 {{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
index 1e8dbed8a67c..dc85cc1c9490 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30gk110_disp_root = { 30gk110_disp_root = {
31 .init = gf119_disp_root_init, 31 .user = {
32 .fini = gf119_disp_root_fini, 32 {{0,0,GK104_DISP_CURSOR }, gf119_disp_curs_new },
33 .dmac = { 33 {{0,0,GK104_DISP_OVERLAY }, gf119_disp_oimm_new },
34 &gk110_disp_core_oclass, 34 {{0,0,GK110_DISP_BASE_CHANNEL_DMA }, gf119_disp_base_new },
35 &gk110_disp_base_oclass, 35 {{0,0,GK110_DISP_CORE_CHANNEL_DMA }, gk104_disp_core_new },
36 &gk104_disp_ovly_oclass, 36 {{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
index 44c55be69e99..e0181ca08840 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30gm107_disp_root = { 30gm107_disp_root = {
31 .init = gf119_disp_root_init, 31 .user = {
32 .fini = gf119_disp_root_fini, 32 {{0,0,GK104_DISP_CURSOR }, gf119_disp_curs_new },
33 .dmac = { 33 {{0,0,GK104_DISP_OVERLAY }, gf119_disp_oimm_new },
34 &gm107_disp_core_oclass, 34 {{0,0,GK110_DISP_BASE_CHANNEL_DMA }, gf119_disp_base_new },
35 &gk110_disp_base_oclass, 35 {{0,0,GM107_DISP_CORE_CHANNEL_DMA }, gk104_disp_core_new },
36 &gk104_disp_ovly_oclass, 36 {{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 38f5ee1dfc58..e5e590e19f62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30gm200_disp_root = { 30gm200_disp_root = {
31 .init = gf119_disp_root_init, 31 .user = {
32 .fini = gf119_disp_root_fini, 32 {{0,0,GK104_DISP_CURSOR }, gf119_disp_curs_new },
33 .dmac = { 33 {{0,0,GK104_DISP_OVERLAY }, gf119_disp_oimm_new },
34 &gm200_disp_core_oclass, 34 {{0,0,GK110_DISP_BASE_CHANNEL_DMA }, gf119_disp_base_new },
35 &gk110_disp_base_oclass, 35 {{0,0,GM200_DISP_CORE_CHANNEL_DMA }, gk104_disp_core_new },
36 &gk104_disp_ovly_oclass, 36 {{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
index ac8fdd728ec6..762a1a922e05 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30gp100_disp_root = { 30gp100_disp_root = {
31 .init = gf119_disp_root_init, 31 .user = {
32 .fini = gf119_disp_root_fini, 32 {{0,0,GK104_DISP_CURSOR }, gf119_disp_curs_new },
33 .dmac = { 33 {{0,0,GK104_DISP_OVERLAY }, gf119_disp_oimm_new },
34 &gp100_disp_core_oclass, 34 {{0,0,GK110_DISP_BASE_CHANNEL_DMA }, gf119_disp_base_new },
35 &gk110_disp_base_oclass, 35 {{0,0,GP100_DISP_CORE_CHANNEL_DMA }, gk104_disp_core_new },
36 &gk104_disp_ovly_oclass, 36 {{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 37122ca579ad..c7f00946c9af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30gp102_disp_root = { 30gp102_disp_root = {
31 .init = gf119_disp_root_init, 31 .user = {
32 .fini = gf119_disp_root_fini, 32 {{0,0,GK104_DISP_CURSOR }, gp102_disp_curs_new },
33 .dmac = { 33 {{0,0,GK104_DISP_OVERLAY }, gp102_disp_oimm_new },
34 &gp102_disp_core_oclass, 34 {{0,0,GK110_DISP_BASE_CHANNEL_DMA }, gp102_disp_base_new },
35 &gp102_disp_base_oclass, 35 {{0,0,GP102_DISP_CORE_CHANNEL_DMA }, gp102_disp_core_new },
36 &gp102_disp_ovly_oclass, 36 {{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gp102_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &gp102_disp_oimm_oclass,
40 &gp102_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
index 124a0c24f92c..a6963654087c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30gt200_disp_root = { 30gt200_disp_root = {
31 .init = nv50_disp_root_init, 31 .user = {
32 .fini = nv50_disp_root_fini, 32 {{0,0, G82_DISP_CURSOR }, nv50_disp_curs_new },
33 .dmac = { 33 {{0,0, G82_DISP_OVERLAY }, nv50_disp_oimm_new },
34 &gt200_disp_core_oclass, 34 {{0,0,GT200_DISP_BASE_CHANNEL_DMA }, g84_disp_base_new },
35 &gt200_disp_base_oclass, 35 {{0,0,GT200_DISP_CORE_CHANNEL_DMA }, g84_disp_core_new },
36 &gt200_disp_ovly_oclass, 36 {{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &g84_disp_oimm_oclass,
40 &g84_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
index dff52f30668b..4fe0a3ae8891 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -22,22 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static const struct nv50_disp_root_func 29static const struct nv50_disp_root_func
30gt215_disp_root = { 30gt215_disp_root = {
31 .init = nv50_disp_root_init, 31 .user = {
32 .fini = nv50_disp_root_fini, 32 {{0,0,GT214_DISP_CURSOR }, nv50_disp_curs_new },
33 .dmac = { 33 {{0,0,GT214_DISP_OVERLAY }, nv50_disp_oimm_new },
34 &gt215_disp_core_oclass, 34 {{0,0,GT214_DISP_BASE_CHANNEL_DMA }, g84_disp_base_new },
35 &gt215_disp_base_oclass, 35 {{0,0,GT214_DISP_CORE_CHANNEL_DMA }, g94_disp_core_new },
36 &gt215_disp_ovly_oclass, 36 {{0,0,GT214_DISP_OVERLAY_CHANNEL_DMA}, g84_disp_ovly_new },
37 }, 37 {}
38 .pioc = {
39 &gt215_disp_oimm_oclass,
40 &gt215_disp_curs_oclass,
41 }, 38 },
42}; 39};
43 40
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
new file mode 100644
index 000000000000..9c658d632d37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "rootnv50.h"
23#include "channv50.h"
24
25#include <nvif/class.h>
26
27static const struct nv50_disp_root_func
28gv100_disp_root = {
29 .user = {
30 {{0,0,GV100_DISP_CURSOR }, gv100_disp_curs_new },
31 {{0,0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
32 {{0,0,GV100_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
33 {{0,0,GV100_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
34 {}
35 },
36};
37
38static int
39gv100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
40 void *data, u32 size, struct nvkm_object **pobject)
41{
42 return nv50_disp_root_new_(&gv100_disp_root, disp, oclass,
43 data, size, pobject);
44}
45
46const struct nvkm_disp_oclass
47gv100_disp_root_oclass = {
48 .base.oclass = GV100_DISP,
49 .base.minver = -1,
50 .base.maxver = -1,
51 .ctor = gv100_disp_root_new,
52};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 1208524aae14..3aa5a2879239 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -22,14 +22,12 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "rootnv50.h" 24#include "rootnv50.h"
25#include "dmacnv50.h" 25#include "channv50.h"
26#include "dp.h" 26#include "dp.h"
27#include "head.h" 27#include "head.h"
28#include "ior.h" 28#include "ior.h"
29 29
30#include <core/client.h> 30#include <core/client.h>
31#include <core/ramht.h>
32#include <subdev/timer.h>
33 31
34#include <nvif/class.h> 32#include <nvif/class.h>
35#include <nvif/cl5070.h> 33#include <nvif/cl5070.h>
@@ -271,23 +269,12 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
271} 269}
272 270
273static int 271static int
274nv50_disp_root_dmac_new_(const struct nvkm_oclass *oclass, 272nv50_disp_root_child_new_(const struct nvkm_oclass *oclass,
275 void *data, u32 size, struct nvkm_object **pobject) 273 void *argv, u32 argc, struct nvkm_object **pobject)
276{ 274{
277 const struct nv50_disp_dmac_oclass *sclass = oclass->priv; 275 struct nv50_disp *disp = nv50_disp_root(oclass->parent)->disp;
278 struct nv50_disp_root *root = nv50_disp_root(oclass->parent); 276 const struct nv50_disp_user *user = oclass->priv;
279 return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid, 277 return user->ctor(oclass, argv, argc, disp, pobject);
280 oclass, data, size, pobject);
281}
282
283static int
284nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
285 void *data, u32 size, struct nvkm_object **pobject)
286{
287 const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
288 struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
289 return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
290 sclass->chid.user, oclass, data, size, pobject);
291} 278}
292 279
293static int 280static int
@@ -296,68 +283,26 @@ nv50_disp_root_child_get_(struct nvkm_object *object, int index,
296{ 283{
297 struct nv50_disp_root *root = nv50_disp_root(object); 284 struct nv50_disp_root *root = nv50_disp_root(object);
298 285
299 if (index < ARRAY_SIZE(root->func->dmac)) { 286 if (root->func->user[index].ctor) {
300 sclass->base = root->func->dmac[index]->base; 287 sclass->base = root->func->user[index].base;
301 sclass->priv = root->func->dmac[index]; 288 sclass->priv = root->func->user + index;
302 sclass->ctor = nv50_disp_root_dmac_new_; 289 sclass->ctor = nv50_disp_root_child_new_;
303 return 0;
304 }
305
306 index -= ARRAY_SIZE(root->func->dmac);
307
308 if (index < ARRAY_SIZE(root->func->pioc)) {
309 sclass->base = root->func->pioc[index]->base;
310 sclass->priv = root->func->pioc[index];
311 sclass->ctor = nv50_disp_root_pioc_new_;
312 return 0; 290 return 0;
313 } 291 }
314 292
315 return -EINVAL; 293 return -EINVAL;
316} 294}
317 295
318static int
319nv50_disp_root_fini_(struct nvkm_object *object, bool suspend)
320{
321 struct nv50_disp_root *root = nv50_disp_root(object);
322 root->func->fini(root);
323 return 0;
324}
325
326static int
327nv50_disp_root_init_(struct nvkm_object *object)
328{
329 struct nv50_disp_root *root = nv50_disp_root(object);
330 struct nvkm_ior *ior;
331 int ret;
332
333 ret = root->func->init(root);
334 if (ret)
335 return ret;
336
337 /* Set 'normal' (ie. when it's attached to a head) state for
338 * each output resource to 'fully enabled'.
339 */
340 list_for_each_entry(ior, &root->disp->base.ior, head) {
341 ior->func->power(ior, true, true, true, true, true);
342 }
343
344 return 0;
345}
346
347static void * 296static void *
348nv50_disp_root_dtor_(struct nvkm_object *object) 297nv50_disp_root_dtor_(struct nvkm_object *object)
349{ 298{
350 struct nv50_disp_root *root = nv50_disp_root(object); 299 struct nv50_disp_root *root = nv50_disp_root(object);
351 nvkm_ramht_del(&root->ramht);
352 nvkm_gpuobj_del(&root->instmem);
353 return root; 300 return root;
354} 301}
355 302
356static const struct nvkm_object_func 303static const struct nvkm_object_func
357nv50_disp_root_ = { 304nv50_disp_root_ = {
358 .dtor = nv50_disp_root_dtor_, 305 .dtor = nv50_disp_root_dtor_,
359 .init = nv50_disp_root_init_,
360 .fini = nv50_disp_root_fini_,
361 .mthd = nv50_disp_root_mthd_, 306 .mthd = nv50_disp_root_mthd_,
362 .ntfy = nvkm_disp_ntfy, 307 .ntfy = nvkm_disp_ntfy,
363 .sclass = nv50_disp_root_child_get_, 308 .sclass = nv50_disp_root_child_get_,
@@ -370,8 +315,6 @@ nv50_disp_root_new_(const struct nv50_disp_root_func *func,
370{ 315{
371 struct nv50_disp *disp = nv50_disp(base); 316 struct nv50_disp *disp = nv50_disp(base);
372 struct nv50_disp_root *root; 317 struct nv50_disp_root *root;
373 struct nvkm_device *device = disp->base.engine.subdev.device;
374 int ret;
375 318
376 if (!(root = kzalloc(sizeof(*root), GFP_KERNEL))) 319 if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
377 return -ENOMEM; 320 return -ENOMEM;
@@ -380,102 +323,18 @@ nv50_disp_root_new_(const struct nv50_disp_root_func *func,
380 nvkm_object_ctor(&nv50_disp_root_, oclass, &root->object); 323 nvkm_object_ctor(&nv50_disp_root_, oclass, &root->object);
381 root->func = func; 324 root->func = func;
382 root->disp = disp; 325 root->disp = disp;
383
384 ret = nvkm_gpuobj_new(disp->base.engine.subdev.device, 0x10000, 0x10000,
385 false, NULL, &root->instmem);
386 if (ret)
387 return ret;
388
389 return nvkm_ramht_new(device, 0x1000, 0, root->instmem, &root->ramht);
390}
391
392void
393nv50_disp_root_fini(struct nv50_disp_root *root)
394{
395 struct nvkm_device *device = root->disp->base.engine.subdev.device;
396 /* disable all interrupts */
397 nvkm_wr32(device, 0x610024, 0x00000000);
398 nvkm_wr32(device, 0x610020, 0x00000000);
399}
400
401int
402nv50_disp_root_init(struct nv50_disp_root *root)
403{
404 struct nv50_disp *disp = root->disp;
405 struct nvkm_head *head;
406 struct nvkm_device *device = disp->base.engine.subdev.device;
407 u32 tmp;
408 int i;
409
410 /* The below segments of code copying values from one register to
411 * another appear to inform EVO of the display capabilities or
412 * something similar. NFI what the 0x614004 caps are for..
413 */
414 tmp = nvkm_rd32(device, 0x614004);
415 nvkm_wr32(device, 0x610184, tmp);
416
417 /* ... CRTC caps */
418 list_for_each_entry(head, &disp->base.head, head) {
419 tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
420 nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
421 tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
422 nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
423 tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
424 nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
425 tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
426 nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
427 }
428
429 /* ... DAC caps */
430 for (i = 0; i < disp->func->dac.nr; i++) {
431 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
432 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
433 }
434
435 /* ... SOR caps */
436 for (i = 0; i < disp->func->sor.nr; i++) {
437 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
438 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
439 }
440
441 /* ... PIOR caps */
442 for (i = 0; i < disp->func->pior.nr; i++) {
443 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
444 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
445 }
446
447 /* steal display away from vbios, or something like that */
448 if (nvkm_rd32(device, 0x610024) & 0x00000100) {
449 nvkm_wr32(device, 0x610024, 0x00000100);
450 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
451 if (nvkm_msec(device, 2000,
452 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
453 break;
454 ) < 0)
455 return -EBUSY;
456 }
457
458 /* point at display engine memory area (hash table, objects) */
459 nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
460
461 /* enable supervisor interrupts, disable everything else */
462 nvkm_wr32(device, 0x61002c, 0x00000370);
463 nvkm_wr32(device, 0x610028, 0x00000000);
464 return 0; 326 return 0;
465} 327}
466 328
467static const struct nv50_disp_root_func 329static const struct nv50_disp_root_func
468nv50_disp_root = { 330nv50_disp_root = {
469 .init = nv50_disp_root_init, 331 .user = {
470 .fini = nv50_disp_root_fini, 332 {{0,0,NV50_DISP_CURSOR }, nv50_disp_curs_new },
471 .dmac = { 333 {{0,0,NV50_DISP_OVERLAY }, nv50_disp_oimm_new },
472 &nv50_disp_core_oclass, 334 {{0,0,NV50_DISP_BASE_CHANNEL_DMA }, nv50_disp_base_new },
473 &nv50_disp_base_oclass, 335 {{0,0,NV50_DISP_CORE_CHANNEL_DMA }, nv50_disp_core_new },
474 &nv50_disp_ovly_oclass, 336 {{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nv50_disp_ovly_new },
475 }, 337 {}
476 .pioc = {
477 &nv50_disp_oimm_oclass,
478 &nv50_disp_curs_oclass,
479 }, 338 },
480}; 339};
481 340
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 4818fa69ae6c..6ca4f9184b51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -2,34 +2,27 @@
2#ifndef __NV50_DISP_ROOT_H__ 2#ifndef __NV50_DISP_ROOT_H__
3#define __NV50_DISP_ROOT_H__ 3#define __NV50_DISP_ROOT_H__
4#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object) 4#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
5#include <core/object.h>
5#include "nv50.h" 6#include "nv50.h"
6#include "channv50.h"
7#include "dmacnv50.h"
8 7
9struct nv50_disp_root { 8struct nv50_disp_root {
10 const struct nv50_disp_root_func *func; 9 const struct nv50_disp_root_func *func;
11 struct nv50_disp *disp; 10 struct nv50_disp *disp;
12 struct nvkm_object object; 11 struct nvkm_object object;
13
14 struct nvkm_gpuobj *instmem;
15 struct nvkm_ramht *ramht;
16}; 12};
17 13
18struct nv50_disp_root_func { 14struct nv50_disp_root_func {
19 int (*init)(struct nv50_disp_root *); 15 int blah;
20 void (*fini)(struct nv50_disp_root *); 16 struct nv50_disp_user {
21 const struct nv50_disp_dmac_oclass *dmac[3]; 17 struct nvkm_sclass base;
22 const struct nv50_disp_pioc_oclass *pioc[2]; 18 int (*ctor)(const struct nvkm_oclass *, void *argv, u32 argc,
19 struct nv50_disp *, struct nvkm_object **);
20 } user[];
23}; 21};
24 22
25int nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *, 23int nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
26 const struct nvkm_oclass *, void *data, u32 size, 24 const struct nvkm_oclass *, void *data, u32 size,
27 struct nvkm_object **); 25 struct nvkm_object **);
28int nv50_disp_root_init(struct nv50_disp_root *);
29void nv50_disp_root_fini(struct nv50_disp_root *);
30
31int gf119_disp_root_init(struct nv50_disp_root *);
32void gf119_disp_root_fini(struct nv50_disp_root *);
33 26
34extern const struct nvkm_disp_oclass nv50_disp_root_oclass; 27extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
35extern const struct nvkm_disp_oclass g84_disp_root_oclass; 28extern const struct nvkm_disp_oclass g84_disp_root_oclass;
@@ -43,4 +36,5 @@ extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
43extern const struct nvkm_disp_oclass gm200_disp_root_oclass; 36extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
44extern const struct nvkm_disp_oclass gp100_disp_root_oclass; 37extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
45extern const struct nvkm_disp_oclass gp102_disp_root_oclass; 38extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
39extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
46#endif 40#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c
index f40b909b4ca2..ec3a7db08118 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c
@@ -34,5 +34,5 @@ g84_sor = {
34int 34int
35g84_sor_new(struct nvkm_disp *disp, int id) 35g84_sor_new(struct nvkm_disp *disp, int id)
36{ 36{
37 return nv50_sor_new_(&g84_sor, disp, id); 37 return nvkm_ior_new_(&g84_sor, disp, SOR, id);
38} 38}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 49aeafde0031..4d59d02525d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -279,5 +279,13 @@ g94_sor = {
279int 279int
280g94_sor_new(struct nvkm_disp *disp, int id) 280g94_sor_new(struct nvkm_disp *disp, int id)
281{ 281{
282 return nv50_sor_new_(&g94_sor, disp, id); 282 return nvkm_ior_new_(&g94_sor, disp, SOR, id);
283}
284
285int
286g94_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
287{
288 struct nvkm_device *device = disp->engine.subdev.device;
289 *pmask = (nvkm_rd32(device, 0x610184) & 0x0f000000) >> 24;
290 return 4;
283} 291}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 700fc754f28a..e6e6dfbb1283 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -152,15 +152,6 @@ gf119_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
152 state->head = ctrl & 0x0000000f; 152 state->head = ctrl & 0x0000000f;
153} 153}
154 154
155int
156gf119_sor_new_(const struct nvkm_ior_func *func, struct nvkm_disp *disp, int id)
157{
158 struct nvkm_device *device = disp->engine.subdev.device;
159 if (!(nvkm_rd32(device, 0x612004) & (0x00000100 << id)))
160 return 0;
161 return nvkm_ior_new_(func, disp, SOR, id);
162}
163
164static const struct nvkm_ior_func 155static const struct nvkm_ior_func
165gf119_sor = { 156gf119_sor = {
166 .state = gf119_sor_state, 157 .state = gf119_sor_state,
@@ -189,5 +180,13 @@ gf119_sor = {
189int 180int
190gf119_sor_new(struct nvkm_disp *disp, int id) 181gf119_sor_new(struct nvkm_disp *disp, int id)
191{ 182{
192 return gf119_sor_new_(&gf119_sor, disp, id); 183 return nvkm_ior_new_(&gf119_sor, disp, SOR, id);
184}
185
186int
187gf119_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
188{
189 struct nvkm_device *device = disp->engine.subdev.device;
190 *pmask = (nvkm_rd32(device, 0x612004) & 0x0000ff00) >> 8;
191 return 8;
193} 192}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
index a1547bdf490b..b94090edaebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
@@ -49,5 +49,5 @@ gk104_sor = {
49int 49int
50gk104_sor_new(struct nvkm_disp *disp, int id) 50gk104_sor_new(struct nvkm_disp *disp, int id)
51{ 51{
52 return gf119_sor_new_(&gk104_sor, disp, id); 52 return nvkm_ior_new_(&gk104_sor, disp, SOR, id);
53} 53}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index 60230957d82b..e6965dec09c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -63,5 +63,5 @@ gm107_sor = {
63int 63int
64gm107_sor_new(struct nvkm_disp *disp, int id) 64gm107_sor_new(struct nvkm_disp *disp, int id)
65{ 65{
66 return gf119_sor_new_(&gm107_sor, disp, id); 66 return nvkm_ior_new_(&gm107_sor, disp, SOR, id);
67} 67}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index f9b8107aa2a2..d892bdf04034 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -23,7 +23,7 @@
23 */ 23 */
24#include "ior.h" 24#include "ior.h"
25 25
26static void 26void
27gm200_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu) 27gm200_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
28{ 28{
29 struct nvkm_device *device = sor->disp->engine.subdev.device; 29 struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -45,7 +45,7 @@ gm200_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
45 nvkm_wr32(device, 0x61c13c + loff, data[3] | (pc << shift)); 45 nvkm_wr32(device, 0x61c13c + loff, data[3] | (pc << shift));
46} 46}
47 47
48static void 48void
49gm200_sor_route_set(struct nvkm_outp *outp, struct nvkm_ior *ior) 49gm200_sor_route_set(struct nvkm_outp *outp, struct nvkm_ior *ior)
50{ 50{
51 struct nvkm_device *device = outp->disp->engine.subdev.device; 51 struct nvkm_device *device = outp->disp->engine.subdev.device;
@@ -62,7 +62,7 @@ gm200_sor_route_set(struct nvkm_outp *outp, struct nvkm_ior *ior)
62 nvkm_mask(device, 0x612388 + moff, 0x0000001f, link << 4 | sor); 62 nvkm_mask(device, 0x612388 + moff, 0x0000001f, link << 4 | sor);
63} 63}
64 64
65static int 65int
66gm200_sor_route_get(struct nvkm_outp *outp, int *link) 66gm200_sor_route_get(struct nvkm_outp *outp, int *link)
67{ 67{
68 struct nvkm_device *device = outp->disp->engine.subdev.device; 68 struct nvkm_device *device = outp->disp->engine.subdev.device;
@@ -120,5 +120,5 @@ gm200_sor = {
120int 120int
121gm200_sor_new(struct nvkm_disp *disp, int id) 121gm200_sor_new(struct nvkm_disp *disp, int id)
122{ 122{
123 return gf119_sor_new_(&gm200_sor, disp, id); 123 return nvkm_ior_new_(&gm200_sor, disp, SOR, id);
124} 124}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c
index da228b54b43e..54d134d4ca1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c
@@ -65,5 +65,5 @@ gt215_sor = {
65int 65int
66gt215_sor_new(struct nvkm_disp *disp, int id) 66gt215_sor_new(struct nvkm_disp *disp, int id)
67{ 67{
68 return nv50_sor_new_(&gt215_sor, disp, id); 68 return nvkm_ior_new_(&gt215_sor, disp, SOR, id);
69} 69}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
new file mode 100644
index 000000000000..040db8a338de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "ior.h"
23
24#include <subdev/timer.h>
25
26static void
27gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
28{
29 struct nvkm_device *device = sor->disp->engine.subdev.device;
30 const u32 hoff = head * 0x800;
31 nvkm_mask(device, 0x616550 + hoff, 0x0c00003f, 0x08000000 | watermark);
32}
33
34static void
35gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
36{
37 struct nvkm_device *device = sor->disp->engine.subdev.device;
38 const u32 hoff = head * 0x800;
39 nvkm_mask(device, 0x616568 + hoff, 0x0000ffff, h);
40 nvkm_mask(device, 0x61656c + hoff, 0x00ffffff, v);
41}
42
43static void
44gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
45{
46 struct nvkm_device *device = sor->disp->engine.subdev.device;
47 const u32 hoff = 0x800 * head;
48 const u32 data = 0x80000000 | (0x00000001 * enable);
49 const u32 mask = 0x8000000d;
50 nvkm_mask(device, 0x616560 + hoff, mask, data);
51 nvkm_msec(device, 2000,
52 if (!(nvkm_rd32(device, 0x616560 + hoff) & 0x80000000))
53 break;
54 );
55}
56
57static void
58gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
59{
60 struct nvkm_device *device = sor->disp->engine.subdev.device;
61 const u32 coff = (state == &sor->arm) * 0x8000 + sor->id * 0x20;
62 u32 ctrl = nvkm_rd32(device, 0x680300 + coff);
63
64 state->proto_evo = (ctrl & 0x00000f00) >> 8;
65 switch (state->proto_evo) {
66 case 0: state->proto = LVDS; state->link = 1; break;
67 case 1: state->proto = TMDS; state->link = 1; break;
68 case 2: state->proto = TMDS; state->link = 2; break;
69 case 5: state->proto = TMDS; state->link = 3; break;
70 case 8: state->proto = DP; state->link = 1; break;
71 case 9: state->proto = DP; state->link = 2; break;
72 default:
73 state->proto = UNKNOWN;
74 break;
75 }
76
77 state->head = ctrl & 0x000000ff;
78}
79
80static const struct nvkm_ior_func
81gv100_sor = {
82 .route = {
83 .get = gm200_sor_route_get,
84 .set = gm200_sor_route_set,
85 },
86 .state = gv100_sor_state,
87 .power = nv50_sor_power,
88 .clock = gf119_sor_clock,
89 .hdmi = {
90 .ctrl = gv100_hdmi_ctrl,
91 },
92 .dp = {
93 .lanes = { 0, 1, 2, 3 },
94 .links = gf119_sor_dp_links,
95 .power = g94_sor_dp_power,
96 .pattern = gm107_sor_dp_pattern,
97 .drive = gm200_sor_dp_drive,
98 .audio = gv100_sor_dp_audio,
99 .audio_sym = gv100_sor_dp_audio_sym,
100 .watermark = gv100_sor_dp_watermark,
101 },
102 .hda = {
103 .hpd = gf119_hda_hpd,
104 .eld = gf119_hda_eld,
105 },
106};
107
108int
109gv100_sor_new(struct nvkm_disp *disp, int id)
110{
111 return nvkm_ior_new_(&gv100_sor, disp, SOR, id);
112}
113
114int
115gv100_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
116{
117 struct nvkm_device *device = disp->engine.subdev.device;
118 *pmask = (nvkm_rd32(device, 0x610060) & 0x0000ff00) >> 8;
119 return (nvkm_rd32(device, 0x610074) & 0x00000f00) >> 8;
120}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c
index c0179ccb956d..8a70dd25b13a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c
@@ -44,5 +44,5 @@ mcp77_sor = {
44int 44int
45mcp77_sor_new(struct nvkm_disp *disp, int id) 45mcp77_sor_new(struct nvkm_disp *disp, int id)
46{ 46{
47 return nv50_sor_new_(&mcp77_sor, disp, id); 47 return nvkm_ior_new_(&mcp77_sor, disp, SOR, id);
48} 48}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c
index 9bb01cd96697..eac9c5be9166 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c
@@ -49,5 +49,5 @@ mcp89_sor = {
49int 49int
50mcp89_sor_new(struct nvkm_disp *disp, int id) 50mcp89_sor_new(struct nvkm_disp *disp, int id)
51{ 51{
52 return nv50_sor_new_(&mcp89_sor, disp, id); 52 return nvkm_ior_new_(&mcp89_sor, disp, SOR, id);
53} 53}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index f3ebd0c22e7d..b4729f8798af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -84,15 +84,6 @@ nv50_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
84 state->head = ctrl & 0x00000003; 84 state->head = ctrl & 0x00000003;
85} 85}
86 86
87int
88nv50_sor_new_(const struct nvkm_ior_func *func, struct nvkm_disp *disp, int id)
89{
90 struct nvkm_device *device = disp->engine.subdev.device;
91 if (!(nvkm_rd32(device, 0x610184) & (0x01000000 << id)))
92 return 0;
93 return nvkm_ior_new_(func, disp, SOR, id);
94}
95
96static const struct nvkm_ior_func 87static const struct nvkm_ior_func
97nv50_sor = { 88nv50_sor = {
98 .state = nv50_sor_state, 89 .state = nv50_sor_state,
@@ -103,5 +94,13 @@ nv50_sor = {
103int 94int
104nv50_sor_new(struct nvkm_disp *disp, int id) 95nv50_sor_new(struct nvkm_disp *disp, int id)
105{ 96{
106 return nv50_sor_new_(&nv50_sor, disp, id); 97 return nvkm_ior_new_(&nv50_sor, disp, SOR, id);
98}
99
100int
101nv50_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
102{
103 struct nvkm_device *device = disp->engine.subdev.device;
104 *pmask = (nvkm_rd32(device, 0x610184) & 0x03000000) >> 24;
105 return 2;
107} 106}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c
new file mode 100644
index 000000000000..89d783368b4f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c
@@ -0,0 +1,82 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "channv50.h"
23
24#include <core/client.h>
25
26#include <nvif/clc37b.h>
27#include <nvif/unpack.h>
28
29static void
30gv100_disp_wimm_intr(struct nv50_disp_chan *chan, bool en)
31{
32 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
33 const u32 mask = 0x00000001 << chan->head;
34 const u32 data = en ? mask : 0;
35 nvkm_mask(device, 0x611da8, mask, data);
36}
37
38const struct nv50_disp_chan_func
39gv100_disp_wimm = {
40 .init = gv100_disp_dmac_init,
41 .fini = gv100_disp_dmac_fini,
42 .intr = gv100_disp_wimm_intr,
43 .user = gv100_disp_chan_user,
44};
45
46static int
47gv100_disp_wimm_new_(const struct nv50_disp_chan_func *func,
48 const struct nv50_disp_chan_mthd *mthd,
49 struct nv50_disp *disp, int chid,
50 const struct nvkm_oclass *oclass, void *argv, u32 argc,
51 struct nvkm_object **pobject)
52{
53 union {
54 struct nvc37b_window_imm_channel_dma_v0 v0;
55 } *args = argv;
56 struct nvkm_object *parent = oclass->parent;
57 int wndw, ret = -ENOSYS;
58 u64 push;
59
60 nvif_ioctl(parent, "create window imm channel dma size %d\n", argc);
61 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
62 nvif_ioctl(parent, "create window imm channel dma vers %d "
63 "pushbuf %016llx index %d\n",
64 args->v0.version, args->v0.pushbuf, args->v0.index);
65 if (!(disp->wndw.mask & BIT(args->v0.index)))
66 return -EINVAL;
67 push = args->v0.pushbuf;
68 wndw = args->v0.index;
69 } else
70 return ret;
71
72 return nv50_disp_dmac_new_(func, mthd, disp, chid + wndw,
73 wndw, push, oclass, pobject);
74}
75
76int
77gv100_disp_wimm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
78 struct nv50_disp *disp, struct nvkm_object **pobject)
79{
80 return gv100_disp_wimm_new_(&gv100_disp_wimm, NULL, disp, 33,
81 oclass, argv, argc, pobject);
82}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
new file mode 100644
index 000000000000..98911805aabf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "channv50.h"
23
24#include <core/client.h>
25
26#include <nvif/clc37e.h>
27#include <nvif/unpack.h>
28
29static const struct nv50_disp_mthd_list
30gv100_disp_wndw_mthd_base = {
31 .mthd = 0x0000,
32 .addr = 0x000000,
33 .data = {
34 { 0x0200, 0x690200 },
35 { 0x020c, 0x69020c },
36 { 0x0210, 0x690210 },
37 { 0x0214, 0x690214 },
38 { 0x0218, 0x690218 },
39 { 0x021c, 0x69021c },
40 { 0x0220, 0x690220 },
41 { 0x0224, 0x690224 },
42 { 0x0228, 0x690228 },
43 { 0x022c, 0x69022c },
44 { 0x0230, 0x690230 },
45 { 0x0234, 0x690234 },
46 { 0x0238, 0x690238 },
47 { 0x0240, 0x690240 },
48 { 0x0244, 0x690244 },
49 { 0x0248, 0x690248 },
50 { 0x024c, 0x69024c },
51 { 0x0250, 0x690250 },
52 { 0x0254, 0x690254 },
53 { 0x0260, 0x690260 },
54 { 0x0264, 0x690264 },
55 { 0x0268, 0x690268 },
56 { 0x026c, 0x69026c },
57 { 0x0270, 0x690270 },
58 { 0x0274, 0x690274 },
59 { 0x0280, 0x690280 },
60 { 0x0284, 0x690284 },
61 { 0x0288, 0x690288 },
62 { 0x028c, 0x69028c },
63 { 0x0290, 0x690290 },
64 { 0x0298, 0x690298 },
65 { 0x029c, 0x69029c },
66 { 0x02a0, 0x6902a0 },
67 { 0x02a4, 0x6902a4 },
68 { 0x02a8, 0x6902a8 },
69 { 0x02ac, 0x6902ac },
70 { 0x02b0, 0x6902b0 },
71 { 0x02b4, 0x6902b4 },
72 { 0x02b8, 0x6902b8 },
73 { 0x02bc, 0x6902bc },
74 { 0x02c0, 0x6902c0 },
75 { 0x02c4, 0x6902c4 },
76 { 0x02c8, 0x6902c8 },
77 { 0x02cc, 0x6902cc },
78 { 0x02d0, 0x6902d0 },
79 { 0x02d4, 0x6902d4 },
80 { 0x02d8, 0x6902d8 },
81 { 0x02dc, 0x6902dc },
82 { 0x02e0, 0x6902e0 },
83 { 0x02e4, 0x6902e4 },
84 { 0x02e8, 0x6902e8 },
85 { 0x02ec, 0x6902ec },
86 { 0x02f0, 0x6902f0 },
87 { 0x02f4, 0x6902f4 },
88 { 0x02f8, 0x6902f8 },
89 { 0x02fc, 0x6902fc },
90 { 0x0300, 0x690300 },
91 { 0x0304, 0x690304 },
92 { 0x0308, 0x690308 },
93 { 0x0310, 0x690310 },
94 { 0x0314, 0x690314 },
95 { 0x0318, 0x690318 },
96 { 0x031c, 0x69031c },
97 { 0x0320, 0x690320 },
98 { 0x0324, 0x690324 },
99 { 0x0328, 0x690328 },
100 { 0x032c, 0x69032c },
101 { 0x033c, 0x69033c },
102 { 0x0340, 0x690340 },
103 { 0x0344, 0x690344 },
104 { 0x0348, 0x690348 },
105 { 0x034c, 0x69034c },
106 { 0x0350, 0x690350 },
107 { 0x0354, 0x690354 },
108 { 0x0358, 0x690358 },
109 { 0x0364, 0x690364 },
110 { 0x0368, 0x690368 },
111 { 0x036c, 0x69036c },
112 { 0x0370, 0x690370 },
113 { 0x0374, 0x690374 },
114 { 0x0380, 0x690380 },
115 {}
116 }
117};
118
119const struct nv50_disp_chan_mthd
120gv100_disp_wndw_mthd = {
121 .name = "Base",
122 .addr = 0x001000,
123 .prev = 0x000800,
124 .data = {
125 { "Global", 1, &gv100_disp_wndw_mthd_base },
126 {}
127 }
128};
129
130static void
131gv100_disp_wndw_intr(struct nv50_disp_chan *chan, bool en)
132{
133 struct nvkm_device *device = chan->disp->base.engine.subdev.device;
134 const u32 mask = 0x00000001 << chan->head;
135 const u32 data = en ? mask : 0;
136 nvkm_mask(device, 0x611da4, mask, data);
137}
138
139const struct nv50_disp_chan_func
140gv100_disp_wndw = {
141 .init = gv100_disp_dmac_init,
142 .fini = gv100_disp_dmac_fini,
143 .intr = gv100_disp_wndw_intr,
144 .user = gv100_disp_chan_user,
145 .bind = gv100_disp_dmac_bind,
146};
147
148static int
149gv100_disp_wndw_new_(const struct nv50_disp_chan_func *func,
150 const struct nv50_disp_chan_mthd *mthd,
151 struct nv50_disp *disp, int chid,
152 const struct nvkm_oclass *oclass, void *argv, u32 argc,
153 struct nvkm_object **pobject)
154{
155 union {
156 struct nvc37e_window_channel_dma_v0 v0;
157 } *args = argv;
158 struct nvkm_object *parent = oclass->parent;
159 int wndw, ret = -ENOSYS;
160 u64 push;
161
162 nvif_ioctl(parent, "create window channel dma size %d\n", argc);
163 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
164 nvif_ioctl(parent, "create window channel dma vers %d "
165 "pushbuf %016llx index %d\n",
166 args->v0.version, args->v0.pushbuf, args->v0.index);
167 if (!(disp->wndw.mask & BIT(args->v0.index)))
168 return -EINVAL;
169 push = args->v0.pushbuf;
170 wndw = args->v0.index;
171 } else
172 return ret;
173
174 return nv50_disp_dmac_new_(func, mthd, disp, chid + wndw,
175 wndw, push, oclass, pobject);
176}
177
178int
179gv100_disp_wndw_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
180 struct nv50_disp *disp, struct nvkm_object **pobject)
181{
182 return gv100_disp_wndw_new_(&gv100_disp_wndw, &gv100_disp_wndw_mthd,
183 disp, 1, oclass, argv, argc, pobject);
184}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
index c4a2ce9b0d71..e96d1f57f9f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
@@ -3,9 +3,11 @@ nvkm-y += nvkm/engine/dma/nv04.o
3nvkm-y += nvkm/engine/dma/nv50.o 3nvkm-y += nvkm/engine/dma/nv50.o
4nvkm-y += nvkm/engine/dma/gf100.o 4nvkm-y += nvkm/engine/dma/gf100.o
5nvkm-y += nvkm/engine/dma/gf119.o 5nvkm-y += nvkm/engine/dma/gf119.o
6nvkm-y += nvkm/engine/dma/gv100.o
6 7
7nvkm-y += nvkm/engine/dma/user.o 8nvkm-y += nvkm/engine/dma/user.o
8nvkm-y += nvkm/engine/dma/usernv04.o 9nvkm-y += nvkm/engine/dma/usernv04.o
9nvkm-y += nvkm/engine/dma/usernv50.o 10nvkm-y += nvkm/engine/dma/usernv50.o
10nvkm-y += nvkm/engine/dma/usergf100.o 11nvkm-y += nvkm/engine/dma/usergf100.o
11nvkm-y += nvkm/engine/dma/usergf119.o 12nvkm-y += nvkm/engine/dma/usergf119.o
13nvkm-y += nvkm/engine/dma/usergv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
index 5ad5d0f5db05..c65a4c2ea93d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2012 Red Hat Inc. 2 * Copyright 2018 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,20 +18,17 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */ 21 */
24#include "channv50.h" 22#include "priv.h"
25#include "rootnv50.h" 23#include "user.h"
26
27#include <nvif/class.h>
28 24
29const struct nv50_disp_pioc_oclass 25static const struct nvkm_dma_func
30g84_disp_oimm_oclass = { 26gv100_dma = {
31 .base.oclass = G82_DISP_OVERLAY, 27 .class_new = gv100_dmaobj_new,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_oimm_new,
35 .func = &nv50_disp_pioc_func,
36 .chid = { 5, 5 },
37}; 28};
29
30int
31gv100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
32{
33 return nvkm_dma_new_(&gv100_dma, device, index, pdma);
34}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
index 4bbac8a21c71..9fe01fd75474 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -16,4 +16,6 @@ int gf100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
16 struct nvkm_dmaobj **); 16 struct nvkm_dmaobj **);
17int gf119_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32, 17int gf119_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
18 struct nvkm_dmaobj **); 18 struct nvkm_dmaobj **);
19int gv100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
20 struct nvkm_dmaobj **);
19#endif 21#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c
new file mode 100644
index 000000000000..39eba9fc82be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c
@@ -0,0 +1,119 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#define gv100_dmaobj(p) container_of((p), struct gv100_dmaobj, base)
23#include "user.h"
24
25#include <core/client.h>
26#include <core/gpuobj.h>
27#include <subdev/fb.h>
28
29#include <nvif/cl0002.h>
30#include <nvif/unpack.h>
31
32struct gv100_dmaobj {
33 struct nvkm_dmaobj base;
34 u32 flags0;
35};
36
37static int
38gv100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
39 int align, struct nvkm_gpuobj **pgpuobj)
40{
41 struct gv100_dmaobj *dmaobj = gv100_dmaobj(base);
42 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
43 u64 start = dmaobj->base.start >> 8;
44 u64 limit = dmaobj->base.limit >> 8;
45 int ret;
46
47 ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
48 if (ret == 0) {
49 nvkm_kmap(*pgpuobj);
50 nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
51 nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(start));
52 nvkm_wo32(*pgpuobj, 0x08, upper_32_bits(start));
53 nvkm_wo32(*pgpuobj, 0x0c, lower_32_bits(limit));
54 nvkm_wo32(*pgpuobj, 0x10, upper_32_bits(limit));
55 nvkm_done(*pgpuobj);
56 }
57
58 return ret;
59}
60
61static const struct nvkm_dmaobj_func
62gv100_dmaobj_func = {
63 .bind = gv100_dmaobj_bind,
64};
65
66int
67gv100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
68 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
69{
70 union {
71 struct gf119_dma_v0 v0;
72 } *args;
73 struct nvkm_object *parent = oclass->parent;
74 struct gv100_dmaobj *dmaobj;
75 u32 kind, page;
76 int ret;
77
78 if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
79 return -ENOMEM;
80 *pdmaobj = &dmaobj->base;
81
82 ret = nvkm_dmaobj_ctor(&gv100_dmaobj_func, dma, oclass,
83 &data, &size, &dmaobj->base);
84 if (ret)
85 return ret;
86
87 ret = -ENOSYS;
88 args = data;
89
90 nvif_ioctl(parent, "create gv100 dma size %d\n", size);
91 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
92 nvif_ioctl(parent,
93 "create gv100 dma vers %d page %d kind %02x\n",
94 args->v0.version, args->v0.page, args->v0.kind);
95 kind = args->v0.kind != 0;
96 page = args->v0.page != 0;
97 } else
98 if (size == 0) {
99 kind = 0;
100 page = GF119_DMA_V0_PAGE_SP;
101 } else
102 return ret;
103
104 if (kind)
105 dmaobj->flags0 |= 0x00100000;
106 if (page)
107 dmaobj->flags0 |= 0x00000040;
108 dmaobj->flags0 |= 0x00000004; /* rw */
109
110 switch (dmaobj->base.target) {
111 case NV_MEM_TARGET_VRAM : dmaobj->flags0 |= 0x00000001; break;
112 case NV_MEM_TARGET_PCI : dmaobj->flags0 |= 0x00000002; break;
113 case NV_MEM_TARGET_PCI_NOSNOOP: dmaobj->flags0 |= 0x00000003; break;
114 default:
115 return -EINVAL;
116 }
117
118 return 0;
119}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 64e51838edf8..f00408577a6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -15,6 +15,7 @@ nvkm-y += nvkm/engine/fifo/gm200.o
15nvkm-y += nvkm/engine/fifo/gm20b.o 15nvkm-y += nvkm/engine/fifo/gm20b.o
16nvkm-y += nvkm/engine/fifo/gp100.o 16nvkm-y += nvkm/engine/fifo/gp100.o
17nvkm-y += nvkm/engine/fifo/gp10b.o 17nvkm-y += nvkm/engine/fifo/gp10b.o
18nvkm-y += nvkm/engine/fifo/gv100.o
18 19
19nvkm-y += nvkm/engine/fifo/chan.o 20nvkm-y += nvkm/engine/fifo/chan.o
20nvkm-y += nvkm/engine/fifo/channv50.o 21nvkm-y += nvkm/engine/fifo/channv50.o
@@ -31,6 +32,6 @@ nvkm-y += nvkm/engine/fifo/gpfifonv50.o
31nvkm-y += nvkm/engine/fifo/gpfifog84.o 32nvkm-y += nvkm/engine/fifo/gpfifog84.o
32nvkm-y += nvkm/engine/fifo/gpfifogf100.o 33nvkm-y += nvkm/engine/fifo/gpfifogf100.o
33nvkm-y += nvkm/engine/fifo/gpfifogk104.o 34nvkm-y += nvkm/engine/fifo/gpfifogk104.o
34nvkm-y += nvkm/engine/fifo/gpfifogk110.o 35nvkm-y += nvkm/engine/fifo/gpfifogv100.o
35nvkm-y += nvkm/engine/fifo/gpfifogm200.o 36
36nvkm-y += nvkm/engine/fifo/gpfifogp100.o 37nvkm-y += nvkm/engine/fifo/usergv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 64f6b7654a08..c773caf21f6b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -30,6 +30,7 @@
30#include <subdev/mc.h> 30#include <subdev/mc.h>
31 31
32#include <nvif/event.h> 32#include <nvif/event.h>
33#include <nvif/cl0080.h>
33#include <nvif/unpack.h> 34#include <nvif/unpack.h>
34 35
35void 36void
@@ -56,6 +57,12 @@ nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
56} 57}
57 58
58void 59void
60nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
61{
62 return fifo->func->fault(fifo, info);
63}
64
65void
59nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags, 66nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
60 struct nvkm_fifo_chan **pchan) 67 struct nvkm_fifo_chan **pchan)
61{ 68{
@@ -209,6 +216,20 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo)
209} 216}
210 217
211static int 218static int
219nvkm_fifo_class_new_(struct nvkm_device *device,
220 const struct nvkm_oclass *oclass, void *data, u32 size,
221 struct nvkm_object **pobject)
222{
223 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
224 return fifo->func->class_new(fifo, oclass, data, size, pobject);
225}
226
227static const struct nvkm_device_oclass
228nvkm_fifo_class_ = {
229 .ctor = nvkm_fifo_class_new_,
230};
231
232static int
212nvkm_fifo_class_new(struct nvkm_device *device, 233nvkm_fifo_class_new(struct nvkm_device *device,
213 const struct nvkm_oclass *oclass, void *data, u32 size, 234 const struct nvkm_oclass *oclass, void *data, u32 size,
214 struct nvkm_object **pobject) 235 struct nvkm_object **pobject)
@@ -232,13 +253,9 @@ nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
232 int c = 0; 253 int c = 0;
233 254
234 if (fifo->func->class_get) { 255 if (fifo->func->class_get) {
235 int ret = fifo->func->class_get(fifo, index, &sclass); 256 int ret = fifo->func->class_get(fifo, index, oclass);
236 if (ret == 0) { 257 if (ret == 0)
237 oclass->base = sclass->base; 258 *class = &nvkm_fifo_class_;
238 oclass->engn = sclass;
239 *class = &nvkm_fifo_class;
240 return 0;
241 }
242 return ret; 259 return ret;
243 } 260 }
244 261
@@ -271,6 +288,20 @@ nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
271} 288}
272 289
273static int 290static int
291nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
292{
293 struct nvkm_fifo *fifo = nvkm_fifo(engine);
294 switch (mthd) {
295 case NV_DEVICE_FIFO_CHANNELS: *data = fifo->nr; return 0;
296 default:
297 if (fifo->func->info)
298 return fifo->func->info(fifo, mthd, data);
299 break;
300 }
301 return -ENOSYS;
302}
303
304static int
274nvkm_fifo_oneinit(struct nvkm_engine *engine) 305nvkm_fifo_oneinit(struct nvkm_engine *engine)
275{ 306{
276 struct nvkm_fifo *fifo = nvkm_fifo(engine); 307 struct nvkm_fifo *fifo = nvkm_fifo(engine);
@@ -311,6 +342,7 @@ nvkm_fifo = {
311 .dtor = nvkm_fifo_dtor, 342 .dtor = nvkm_fifo_dtor,
312 .preinit = nvkm_fifo_preinit, 343 .preinit = nvkm_fifo_preinit,
313 .oneinit = nvkm_fifo_oneinit, 344 .oneinit = nvkm_fifo_oneinit,
345 .info = nvkm_fifo_info,
314 .init = nvkm_fifo_init, 346 .init = nvkm_fifo_init,
315 .fini = nvkm_fifo_fini, 347 .fini = nvkm_fifo_fini,
316 .intr = nvkm_fifo_intr, 348 .intr = nvkm_fifo_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h
new file mode 100644
index 000000000000..d0ac60b06720
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h
@@ -0,0 +1,11 @@
1#ifndef __NVKM_FIFO_CGRP_H__
2#define __NVKM_FIFO_CGRP_H__
3#include "priv.h"
4
5struct nvkm_fifo_cgrp {
6 int id;
7 struct list_head head;
8 struct list_head chan;
9 int chan_nr;
10};
11#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 1208e3d9dbe2..8e28ba6b2307 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -10,6 +10,7 @@ struct gk104_fifo_chan {
10 struct gk104_fifo *fifo; 10 struct gk104_fifo *fifo;
11 int runl; 11 int runl;
12 12
13 struct nvkm_fifo_cgrp *cgrp;
13 struct list_head head; 14 struct list_head head;
14 bool killed; 15 bool killed;
15 16
@@ -19,11 +20,20 @@ struct gk104_fifo_chan {
19 } engn[NVKM_SUBDEV_NR]; 20 } engn[NVKM_SUBDEV_NR];
20}; 21};
21 22
22int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *, 23extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
24
25int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
23 void *data, u32 size, struct nvkm_object **); 26 void *data, u32 size, struct nvkm_object **);
27void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *);
28void gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *);
29void gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *);
30int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
31 struct nvkm_object *);
32void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
33 struct nvkm_engine *);
34int gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *);
35int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
24 36
25extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass; 37int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
26extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass; 38 void *data, u32 size, struct nvkm_object **);
27extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
28extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
29#endif 39#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 84bd703dd897..a99046414a18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -22,16 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gk104.h" 24#include "gk104.h"
25#include "cgrp.h"
25#include "changk104.h" 26#include "changk104.h"
26 27
27#include <core/client.h> 28#include <core/client.h>
28#include <core/gpuobj.h> 29#include <core/gpuobj.h>
29#include <subdev/bar.h> 30#include <subdev/bar.h>
31#include <subdev/fault.h>
30#include <subdev/timer.h> 32#include <subdev/timer.h>
31#include <subdev/top.h> 33#include <subdev/top.h>
32#include <engine/sw.h> 34#include <engine/sw.h>
33 35
34#include <nvif/class.h> 36#include <nvif/class.h>
37#include <nvif/cl0080.h>
35 38
36struct gk104_fifo_engine_status { 39struct gk104_fifo_engine_status {
37 bool busy; 40 bool busy;
@@ -93,15 +96,39 @@ gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
93} 96}
94 97
95static int 98static int
99gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
100 void *argv, u32 argc, struct nvkm_object **pobject)
101{
102 struct gk104_fifo *fifo = gk104_fifo(base);
103 if (oclass->engn == &fifo->func->chan) {
104 const struct gk104_fifo_chan_user *user = oclass->engn;
105 return user->ctor(fifo, oclass, argv, argc, pobject);
106 } else
107 if (oclass->engn == &fifo->func->user) {
108 const struct gk104_fifo_user_user *user = oclass->engn;
109 return user->ctor(oclass, argv, argc, pobject);
110 }
111 WARN_ON(1);
112 return -EINVAL;
113}
114
115static int
96gk104_fifo_class_get(struct nvkm_fifo *base, int index, 116gk104_fifo_class_get(struct nvkm_fifo *base, int index,
97 const struct nvkm_fifo_chan_oclass **psclass) 117 struct nvkm_oclass *oclass)
98{ 118{
99 struct gk104_fifo *fifo = gk104_fifo(base); 119 struct gk104_fifo *fifo = gk104_fifo(base);
100 int c = 0; 120 int c = 0;
101 121
102 while ((*psclass = fifo->func->chan[c])) { 122 if (fifo->func->user.ctor && c++ == index) {
103 if (c++ == index) 123 oclass->base = fifo->func->user.user;
104 return 0; 124 oclass->engn = &fifo->func->user;
125 return 0;
126 }
127
128 if (fifo->func->chan.ctor && c++ == index) {
129 oclass->base = fifo->func->chan.user;
130 oclass->engn = &fifo->func->chan;
131 return 0;
105 } 132 }
106 133
107 return c; 134 return c;
@@ -124,10 +151,12 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
124void 151void
125gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) 152gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
126{ 153{
154 const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
127 struct gk104_fifo_chan *chan; 155 struct gk104_fifo_chan *chan;
128 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 156 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
129 struct nvkm_device *device = subdev->device; 157 struct nvkm_device *device = subdev->device;
130 struct nvkm_memory *mem; 158 struct nvkm_memory *mem;
159 struct nvkm_fifo_cgrp *cgrp;
131 int nr = 0; 160 int nr = 0;
132 int target; 161 int target;
133 162
@@ -137,9 +166,14 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
137 166
138 nvkm_kmap(mem); 167 nvkm_kmap(mem);
139 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 168 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
140 nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid); 169 func->chan(chan, mem, nr++ * func->size);
141 nvkm_wo32(mem, (nr * 8) + 4, 0x00000000); 170 }
142 nr++; 171
172 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
173 func->cgrp(cgrp, mem, nr++ * func->size);
174 list_for_each_entry(chan, &cgrp->chan, head) {
175 func->chan(chan, mem, nr++ * func->size);
176 }
143 } 177 }
144 nvkm_done(mem); 178 nvkm_done(mem);
145 179
@@ -155,10 +189,10 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
155 (target << 28)); 189 (target << 28));
156 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 190 nvkm_wr32(device, 0x002274, (runl << 20) | nr);
157 191
158 if (wait_event_timeout(fifo->runlist[runl].wait, 192 if (nvkm_msec(device, 2000,
159 !(nvkm_rd32(device, 0x002284 + (runl * 0x08)) 193 if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
160 & 0x00100000), 194 break;
161 msecs_to_jiffies(2000)) == 0) 195 ) < 0)
162 nvkm_error(subdev, "runlist %d update timeout\n", runl); 196 nvkm_error(subdev, "runlist %d update timeout\n", runl);
163unlock: 197unlock:
164 mutex_unlock(&subdev->mutex); 198 mutex_unlock(&subdev->mutex);
@@ -167,19 +201,45 @@ unlock:
167void 201void
168gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 202gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
169{ 203{
204 struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
170 mutex_lock(&fifo->base.engine.subdev.mutex); 205 mutex_lock(&fifo->base.engine.subdev.mutex);
171 list_del_init(&chan->head); 206 if (!list_empty(&chan->head)) {
207 list_del_init(&chan->head);
208 if (cgrp && !--cgrp->chan_nr)
209 list_del_init(&cgrp->head);
210 }
172 mutex_unlock(&fifo->base.engine.subdev.mutex); 211 mutex_unlock(&fifo->base.engine.subdev.mutex);
173} 212}
174 213
175void 214void
176gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 215gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
177{ 216{
217 struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
178 mutex_lock(&fifo->base.engine.subdev.mutex); 218 mutex_lock(&fifo->base.engine.subdev.mutex);
179 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 219 if (cgrp) {
220 if (!cgrp->chan_nr++)
221 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
222 list_add_tail(&chan->head, &cgrp->chan);
223 } else {
224 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
225 }
180 mutex_unlock(&fifo->base.engine.subdev.mutex); 226 mutex_unlock(&fifo->base.engine.subdev.mutex);
181} 227}
182 228
229void
230gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan,
231 struct nvkm_memory *memory, u32 offset)
232{
233 nvkm_wo32(memory, offset + 0, chan->base.chid);
234 nvkm_wo32(memory, offset + 4, 0x00000000);
235}
236
237const struct gk104_fifo_runlist_func
238gk104_fifo_runlist = {
239 .size = 8,
240 .chan = gk104_fifo_runlist_chan,
241};
242
183static void 243static void
184gk104_fifo_recover_work(struct work_struct *w) 244gk104_fifo_recover_work(struct work_struct *w)
185{ 245{
@@ -235,6 +295,32 @@ gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
235 schedule_work(&fifo->recover.work); 295 schedule_work(&fifo->recover.work);
236} 296}
237 297
298static struct gk104_fifo_chan *
299gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
300{
301 struct gk104_fifo_chan *chan;
302 struct nvkm_fifo_cgrp *cgrp;
303
304 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
305 if (chan->base.chid == chid) {
306 list_del_init(&chan->head);
307 return chan;
308 }
309 }
310
311 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
312 if (cgrp->id == chid) {
313 chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
314 list_del_init(&chan->head);
315 if (!--cgrp->chan_nr)
316 list_del_init(&cgrp->head);
317 return chan;
318 }
319 }
320
321 return NULL;
322}
323
238static void 324static void
239gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 325gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
240{ 326{
@@ -252,13 +338,10 @@ gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
252 return; 338 return;
253 339
254 /* Lookup SW state for channel, and mark it as dead. */ 340 /* Lookup SW state for channel, and mark it as dead. */
255 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 341 chan = gk104_fifo_recover_chid(fifo, runl, chid);
256 if (chan->base.chid == chid) { 342 if (chan) {
257 list_del_init(&chan->head); 343 chan->killed = true;
258 chan->killed = true; 344 nvkm_fifo_kevent(&fifo->base, chid);
259 nvkm_fifo_kevent(&fifo->base, chid);
260 break;
261 }
262 } 345 }
263 346
264 /* Disable channel. */ 347 /* Disable channel. */
@@ -347,6 +430,90 @@ gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
347 schedule_work(&fifo->recover.work); 430 schedule_work(&fifo->recover.work);
348} 431}
349 432
433static void
434gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
435{
436 struct gk104_fifo *fifo = gk104_fifo(base);
437 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
438 struct nvkm_device *device = subdev->device;
439 const struct nvkm_enum *er, *ee, *ec, *ea;
440 struct nvkm_engine *engine = NULL;
441 struct nvkm_fifo_chan *chan;
442 unsigned long flags;
443 char ct[8] = "HUB/", en[16] = "";
444 int engn;
445
446 er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
447 ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
448 if (info->hub) {
449 ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
450 } else {
451 ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
452 snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
453 }
454 ea = nvkm_enum_find(fifo->func->fault.access, info->access);
455
456 if (ee && ee->data2) {
457 switch (ee->data2) {
458 case NVKM_SUBDEV_BAR:
459 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
460 break;
461 case NVKM_SUBDEV_INSTMEM:
462 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
463 break;
464 case NVKM_ENGINE_IFB:
465 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
466 break;
467 default:
468 engine = nvkm_device_engine(device, ee->data2);
469 break;
470 }
471 }
472
473 if (ee == NULL) {
474 enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine);
475 if (engidx < NVKM_SUBDEV_NR) {
476 const char *src = nvkm_subdev_name[engidx];
477 char *dst = en;
478 do {
479 *dst++ = toupper(*src++);
480 } while(*src);
481 engine = nvkm_device_engine(device, engidx);
482 }
483 } else {
484 snprintf(en, sizeof(en), "%s", ee->name);
485 }
486
487 spin_lock_irqsave(&fifo->base.lock, flags);
488 chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
489
490 nvkm_error(subdev,
491 "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
492 "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
493 info->access, ea ? ea->name : "", info->addr,
494 info->engine, ee ? ee->name : en,
495 info->client, ct, ec ? ec->name : "",
496 info->reason, er ? er->name : "", chan ? chan->chid : -1,
497 info->inst, chan ? chan->object.client->name : "unknown");
498
499 /* Kill the channel that caused the fault. */
500 if (chan)
501 gk104_fifo_recover_chan(&fifo->base, chan->chid);
502
503 /* Channel recovery will probably have already done this for the
504 * correct engine(s), but just in case we can't find the channel
505 * information...
506 */
507 for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
508 if (fifo->engine[engn].engine == engine) {
509 gk104_fifo_recover_engn(fifo, engn);
510 break;
511 }
512 }
513
514 spin_unlock_irqrestore(&fifo->base.lock, flags);
515}
516
350static const struct nvkm_enum 517static const struct nvkm_enum
351gk104_fifo_bind_reason[] = { 518gk104_fifo_bind_reason[] = {
352 { 0x01, "BIND_NOT_UNBOUND" }, 519 { 0x01, "BIND_NOT_UNBOUND" },
@@ -456,88 +623,21 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
456 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); 623 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
457 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); 624 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
458 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); 625 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
459 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); 626 u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
460 u32 gpc = (stat & 0x1f000000) >> 24; 627 struct nvkm_fault_data info;
461 u32 client = (stat & 0x00001f00) >> 8; 628
462 u32 write = (stat & 0x00000080); 629 info.inst = (u64)inst << 12;
463 u32 hub = (stat & 0x00000040); 630 info.addr = ((u64)vahi << 32) | valo;
464 u32 reason = (stat & 0x0000000f); 631 info.time = 0;
465 const struct nvkm_enum *er, *eu, *ec; 632 info.engine = unit;
466 struct nvkm_engine *engine = NULL; 633 info.valid = 1;
467 struct nvkm_fifo_chan *chan; 634 info.gpc = (type & 0x1f000000) >> 24;
468 unsigned long flags; 635 info.client = (type & 0x00001f00) >> 8;
469 char gpcid[8] = "", en[16] = ""; 636 info.access = (type & 0x00000080) >> 7;
470 int engn; 637 info.hub = (type & 0x00000040) >> 6;
471 638 info.reason = (type & 0x000000ff);
472 er = nvkm_enum_find(fifo->func->fault.reason, reason); 639
473 eu = nvkm_enum_find(fifo->func->fault.engine, unit); 640 nvkm_fifo_fault(&fifo->base, &info);
474 if (hub) {
475 ec = nvkm_enum_find(fifo->func->fault.hubclient, client);
476 } else {
477 ec = nvkm_enum_find(fifo->func->fault.gpcclient, client);
478 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
479 }
480
481 if (eu && eu->data2) {
482 switch (eu->data2) {
483 case NVKM_SUBDEV_BAR:
484 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
485 break;
486 case NVKM_SUBDEV_INSTMEM:
487 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
488 break;
489 case NVKM_ENGINE_IFB:
490 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
491 break;
492 default:
493 engine = nvkm_device_engine(device, eu->data2);
494 break;
495 }
496 }
497
498 if (eu == NULL) {
499 enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
500 if (engidx < NVKM_SUBDEV_NR) {
501 const char *src = nvkm_subdev_name[engidx];
502 char *dst = en;
503 do {
504 *dst++ = toupper(*src++);
505 } while(*src);
506 engine = nvkm_device_engine(device, engidx);
507 }
508 } else {
509 snprintf(en, sizeof(en), "%s", eu->name);
510 }
511
512 spin_lock_irqsave(&fifo->base.lock, flags);
513 chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12);
514
515 nvkm_error(subdev,
516 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
517 "reason %02x [%s] on channel %d [%010llx %s]\n",
518 write ? "write" : "read", (u64)vahi << 32 | valo,
519 unit, en, client, gpcid, ec ? ec->name : "",
520 reason, er ? er->name : "", chan ? chan->chid : -1,
521 (u64)inst << 12,
522 chan ? chan->object.client->name : "unknown");
523
524
525 /* Kill the channel that caused the fault. */
526 if (chan)
527 gk104_fifo_recover_chan(&fifo->base, chan->chid);
528
529 /* Channel recovery will probably have already done this for the
530 * correct engine(s), but just in case we can't find the channel
531 * information...
532 */
533 for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
534 if (fifo->engine[engn].engine == engine) {
535 gk104_fifo_recover_engn(fifo, engn);
536 break;
537 }
538 }
539
540 spin_unlock_irqrestore(&fifo->base.lock, flags);
541} 641}
542 642
543static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 643static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -766,6 +866,34 @@ gk104_fifo_fini(struct nvkm_fifo *base)
766} 866}
767 867
768static int 868static int
869gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
870{
871 struct gk104_fifo *fifo = gk104_fifo(base);
872 switch (mthd) {
873 case NV_DEVICE_FIFO_RUNLISTS:
874 *data = (1ULL << fifo->runlist_nr) - 1;
875 return 0;
876 case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)...
877 NV_DEVICE_FIFO_RUNLIST_ENGINES(63): {
878 int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn;
879 if (runl < fifo->runlist_nr) {
880 unsigned long engm = fifo->runlist[runl].engm;
881 struct nvkm_engine *engine;
882 *data = 0;
883 for_each_set_bit(engn, &engm, fifo->engine_nr) {
884 if ((engine = fifo->engine[engn].engine))
885 *data |= BIT_ULL(engine->subdev.index);
886 }
887 return 0;
888 }
889 }
890 return -EINVAL;
891 default:
892 return -EINVAL;
893 }
894}
895
896static int
769gk104_fifo_oneinit(struct nvkm_fifo *base) 897gk104_fifo_oneinit(struct nvkm_fifo *base)
770{ 898{
771 struct gk104_fifo *fifo = gk104_fifo(base); 899 struct gk104_fifo *fifo = gk104_fifo(base);
@@ -813,19 +941,18 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
813 kfree(map); 941 kfree(map);
814 942
815 for (i = 0; i < fifo->runlist_nr; i++) { 943 for (i = 0; i < fifo->runlist_nr; i++) {
816 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 944 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) {
817 0x8000, 0x1000, false, 945 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
818 &fifo->runlist[i].mem[0]); 946 fifo->base.nr * 2/* TSG+chan */ *
819 if (ret) 947 fifo->func->runlist->size,
820 return ret; 948 0x1000, false,
821 949 &fifo->runlist[i].mem[j]);
822 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 950 if (ret)
823 0x8000, 0x1000, false, 951 return ret;
824 &fifo->runlist[i].mem[1]); 952 }
825 if (ret)
826 return ret;
827 953
828 init_waitqueue_head(&fifo->runlist[i].wait); 954 init_waitqueue_head(&fifo->runlist[i].wait);
955 INIT_LIST_HEAD(&fifo->runlist[i].cgrp);
829 INIT_LIST_HEAD(&fifo->runlist[i].chan); 956 INIT_LIST_HEAD(&fifo->runlist[i].chan);
830 } 957 }
831 958
@@ -868,6 +995,9 @@ gk104_fifo_init(struct nvkm_fifo *base)
868 995
869 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); 996 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
870 997
998 if (fifo->func->init_pbdma_timeout)
999 fifo->func->init_pbdma_timeout(fifo);
1000
871 nvkm_wr32(device, 0x002100, 0xffffffff); 1001 nvkm_wr32(device, 0x002100, 0xffffffff);
872 nvkm_wr32(device, 0x002140, 0x7fffffff); 1002 nvkm_wr32(device, 0x002140, 0x7fffffff);
873} 1003}
@@ -894,13 +1024,16 @@ static const struct nvkm_fifo_func
894gk104_fifo_ = { 1024gk104_fifo_ = {
895 .dtor = gk104_fifo_dtor, 1025 .dtor = gk104_fifo_dtor,
896 .oneinit = gk104_fifo_oneinit, 1026 .oneinit = gk104_fifo_oneinit,
1027 .info = gk104_fifo_info,
897 .init = gk104_fifo_init, 1028 .init = gk104_fifo_init,
898 .fini = gk104_fifo_fini, 1029 .fini = gk104_fifo_fini,
899 .intr = gk104_fifo_intr, 1030 .intr = gk104_fifo_intr,
1031 .fault = gk104_fifo_fault,
900 .uevent_init = gk104_fifo_uevent_init, 1032 .uevent_init = gk104_fifo_uevent_init,
901 .uevent_fini = gk104_fifo_uevent_fini, 1033 .uevent_fini = gk104_fifo_uevent_fini,
902 .recover_chan = gk104_fifo_recover_chan, 1034 .recover_chan = gk104_fifo_recover_chan,
903 .class_get = gk104_fifo_class_get, 1035 .class_get = gk104_fifo_class_get,
1036 .class_new = gk104_fifo_class_new,
904}; 1037};
905 1038
906int 1039int
@@ -919,6 +1052,13 @@ gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
919} 1052}
920 1053
921const struct nvkm_enum 1054const struct nvkm_enum
1055gk104_fifo_fault_access[] = {
1056 { 0x0, "READ" },
1057 { 0x1, "WRITE" },
1058 {}
1059};
1060
1061const struct nvkm_enum
922gk104_fifo_fault_engine[] = { 1062gk104_fifo_fault_engine[] = {
923 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 1063 { 0x00, "GR", NULL, NVKM_ENGINE_GR },
924 { 0x01, "DISPLAY" }, 1064 { 0x01, "DISPLAY" },
@@ -1035,14 +1175,13 @@ gk104_fifo_fault_gpcclient[] = {
1035 1175
1036static const struct gk104_fifo_func 1176static const struct gk104_fifo_func
1037gk104_fifo = { 1177gk104_fifo = {
1178 .fault.access = gk104_fifo_fault_access,
1038 .fault.engine = gk104_fifo_fault_engine, 1179 .fault.engine = gk104_fifo_fault_engine,
1039 .fault.reason = gk104_fifo_fault_reason, 1180 .fault.reason = gk104_fifo_fault_reason,
1040 .fault.hubclient = gk104_fifo_fault_hubclient, 1181 .fault.hubclient = gk104_fifo_fault_hubclient,
1041 .fault.gpcclient = gk104_fifo_fault_gpcclient, 1182 .fault.gpcclient = gk104_fifo_fault_gpcclient,
1042 .chan = { 1183 .runlist = &gk104_fifo_runlist,
1043 &gk104_fifo_gpfifo_oclass, 1184 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
1044 NULL
1045 },
1046}; 1185};
1047 1186
1048int 1187int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 1579785cf941..d295b81e18d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -3,6 +3,7 @@
3#define __GK104_FIFO_H__ 3#define __GK104_FIFO_H__
4#define gk104_fifo(p) container_of((p), struct gk104_fifo, base) 4#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
5#include "priv.h" 5#include "priv.h"
6struct nvkm_fifo_cgrp;
6 7
7#include <core/enum.h> 8#include <core/enum.h>
8#include <subdev/mmu.h> 9#include <subdev/mmu.h>
@@ -31,6 +32,7 @@ struct gk104_fifo {
31 struct nvkm_memory *mem[2]; 32 struct nvkm_memory *mem[2];
32 int next; 33 int next;
33 wait_queue_head_t wait; 34 wait_queue_head_t wait;
35 struct list_head cgrp;
34 struct list_head chan; 36 struct list_head chan;
35 u32 engm; 37 u32 engm;
36 } runlist[16]; 38 } runlist[16];
@@ -43,14 +45,36 @@ struct gk104_fifo {
43}; 45};
44 46
45struct gk104_fifo_func { 47struct gk104_fifo_func {
48 void (*init_pbdma_timeout)(struct gk104_fifo *);
49
46 struct { 50 struct {
51 const struct nvkm_enum *access;
47 const struct nvkm_enum *engine; 52 const struct nvkm_enum *engine;
48 const struct nvkm_enum *reason; 53 const struct nvkm_enum *reason;
49 const struct nvkm_enum *hubclient; 54 const struct nvkm_enum *hubclient;
50 const struct nvkm_enum *gpcclient; 55 const struct nvkm_enum *gpcclient;
51 } fault; 56 } fault;
52 57
53 const struct nvkm_fifo_chan_oclass *chan[]; 58 const struct gk104_fifo_runlist_func {
59 u8 size;
60 void (*cgrp)(struct nvkm_fifo_cgrp *,
61 struct nvkm_memory *, u32 offset);
62 void (*chan)(struct gk104_fifo_chan *,
63 struct nvkm_memory *, u32 offset);
64 } *runlist;
65
66 struct gk104_fifo_user_user {
67 struct nvkm_sclass user;
68 int (*ctor)(const struct nvkm_oclass *, void *, u32,
69 struct nvkm_object **);
70 } user;
71
72 struct gk104_fifo_chan_user {
73 struct nvkm_sclass user;
74 int (*ctor)(struct gk104_fifo *, const struct nvkm_oclass *,
75 void *, u32, struct nvkm_object **);
76 } chan;
77 bool cgrp_force;
54}; 78};
55 79
56int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *, 80int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
@@ -59,30 +83,23 @@ void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
59void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *); 83void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
60void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl); 84void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
61 85
62static inline u64 86extern const struct nvkm_enum gk104_fifo_fault_access[];
63gk104_fifo_engine_subdev(int engine)
64{
65 switch (engine) {
66 case 0: return (1ULL << NVKM_ENGINE_GR) |
67 (1ULL << NVKM_ENGINE_SW) |
68 (1ULL << NVKM_ENGINE_CE2);
69 case 1: return (1ULL << NVKM_ENGINE_MSPDEC);
70 case 2: return (1ULL << NVKM_ENGINE_MSPPP);
71 case 3: return (1ULL << NVKM_ENGINE_MSVLD);
72 case 4: return (1ULL << NVKM_ENGINE_CE0);
73 case 5: return (1ULL << NVKM_ENGINE_CE1);
74 case 6: return (1ULL << NVKM_ENGINE_MSENC);
75 default:
76 WARN_ON(1);
77 return 0;
78 }
79}
80
81extern const struct nvkm_enum gk104_fifo_fault_engine[]; 87extern const struct nvkm_enum gk104_fifo_fault_engine[];
82extern const struct nvkm_enum gk104_fifo_fault_reason[]; 88extern const struct nvkm_enum gk104_fifo_fault_reason[];
83extern const struct nvkm_enum gk104_fifo_fault_hubclient[]; 89extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
84extern const struct nvkm_enum gk104_fifo_fault_gpcclient[]; 90extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
91extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
92void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
93 struct nvkm_memory *, u32);
94
95extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
96void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
97 struct nvkm_memory *, u32);
98
99void gk208_fifo_init_pbdma_timeout(struct gk104_fifo *);
85 100
86extern const struct nvkm_enum gm107_fifo_fault_engine[]; 101extern const struct nvkm_enum gm107_fifo_fault_engine[];
102extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
103
87extern const struct nvkm_enum gp100_fifo_fault_engine[]; 104extern const struct nvkm_enum gp100_fifo_fault_engine[];
88#endif 105#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index b2f8ab7bf847..ac7655a130fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -22,18 +22,38 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "gk104.h" 24#include "gk104.h"
25#include "cgrp.h"
25#include "changk104.h" 26#include "changk104.h"
26 27
28#include <core/memory.h>
29
30#include <nvif/class.h>
31
32void
33gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
34 struct nvkm_memory *memory, u32 offset)
35{
36 nvkm_wo32(memory, offset + 0, (cgrp->chan_nr << 26) | (128 << 18) |
37 (3 << 14) | 0x00002000 | cgrp->id);
38 nvkm_wo32(memory, offset + 4, 0x00000000);
39}
40
41const struct gk104_fifo_runlist_func
42gk110_fifo_runlist = {
43 .size = 8,
44 .cgrp = gk110_fifo_runlist_cgrp,
45 .chan = gk104_fifo_runlist_chan,
46};
47
27static const struct gk104_fifo_func 48static const struct gk104_fifo_func
28gk110_fifo = { 49gk110_fifo = {
50 .fault.access = gk104_fifo_fault_access,
29 .fault.engine = gk104_fifo_fault_engine, 51 .fault.engine = gk104_fifo_fault_engine,
30 .fault.reason = gk104_fifo_fault_reason, 52 .fault.reason = gk104_fifo_fault_reason,
31 .fault.hubclient = gk104_fifo_fault_hubclient, 53 .fault.hubclient = gk104_fifo_fault_hubclient,
32 .fault.gpcclient = gk104_fifo_fault_gpcclient, 54 .fault.gpcclient = gk104_fifo_fault_gpcclient,
33 .chan = { 55 .runlist = &gk110_fifo_runlist,
34 &gk110_fifo_gpfifo_oclass, 56 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
35 NULL
36 },
37}; 57};
38 58
39int 59int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 160617d376e4..5ea7e452cc66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -24,16 +24,28 @@
24#include "gk104.h" 24#include "gk104.h"
25#include "changk104.h" 25#include "changk104.h"
26 26
27#include <nvif/class.h>
28
29void
30gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
31{
32 struct nvkm_device *device = fifo->base.engine.subdev.device;
33 int i;
34
35 for (i = 0; i < fifo->pbdma_nr; i++)
36 nvkm_wr32(device, 0x04012c + (i * 0x2000), 0x0000ffff);
37}
38
27static const struct gk104_fifo_func 39static const struct gk104_fifo_func
28gk208_fifo = { 40gk208_fifo = {
41 .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
42 .fault.access = gk104_fifo_fault_access,
29 .fault.engine = gk104_fifo_fault_engine, 43 .fault.engine = gk104_fifo_fault_engine,
30 .fault.reason = gk104_fifo_fault_reason, 44 .fault.reason = gk104_fifo_fault_reason,
31 .fault.hubclient = gk104_fifo_fault_hubclient, 45 .fault.hubclient = gk104_fifo_fault_hubclient,
32 .fault.gpcclient = gk104_fifo_fault_gpcclient, 46 .fault.gpcclient = gk104_fifo_fault_gpcclient,
33 .chan = { 47 .runlist = &gk110_fifo_runlist,
34 &gk104_fifo_gpfifo_oclass, 48 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
35 NULL
36 },
37}; 49};
38 50
39int 51int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index be9f5c16ed7d..535a0eb67a5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -22,16 +22,18 @@
22#include "gk104.h" 22#include "gk104.h"
23#include "changk104.h" 23#include "changk104.h"
24 24
25#include <nvif/class.h>
26
25static const struct gk104_fifo_func 27static const struct gk104_fifo_func
26gk20a_fifo = { 28gk20a_fifo = {
29 .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
30 .fault.access = gk104_fifo_fault_access,
27 .fault.engine = gk104_fifo_fault_engine, 31 .fault.engine = gk104_fifo_fault_engine,
28 .fault.reason = gk104_fifo_fault_reason, 32 .fault.reason = gk104_fifo_fault_reason,
29 .fault.hubclient = gk104_fifo_fault_hubclient, 33 .fault.hubclient = gk104_fifo_fault_hubclient,
30 .fault.gpcclient = gk104_fifo_fault_gpcclient, 34 .fault.gpcclient = gk104_fifo_fault_gpcclient,
31 .chan = { 35 .runlist = &gk110_fifo_runlist,
32 &gk104_fifo_gpfifo_oclass, 36 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
33 NULL
34 },
35}; 37};
36 38
37int 39int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 29c080683b32..79ae19b1db67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -24,6 +24,25 @@
24#include "gk104.h" 24#include "gk104.h"
25#include "changk104.h" 25#include "changk104.h"
26 26
27#include <core/gpuobj.h>
28
29#include <nvif/class.h>
30
31static void
32gm107_fifo_runlist_chan(struct gk104_fifo_chan *chan,
33 struct nvkm_memory *memory, u32 offset)
34{
35 nvkm_wo32(memory, offset + 0, chan->base.chid);
36 nvkm_wo32(memory, offset + 4, chan->base.inst->addr >> 12);
37}
38
39const struct gk104_fifo_runlist_func
40gm107_fifo_runlist = {
41 .size = 8,
42 .cgrp = gk110_fifo_runlist_cgrp,
43 .chan = gm107_fifo_runlist_chan,
44};
45
27const struct nvkm_enum 46const struct nvkm_enum
28gm107_fifo_fault_engine[] = { 47gm107_fifo_fault_engine[] = {
29 { 0x01, "DISPLAY" }, 48 { 0x01, "DISPLAY" },
@@ -49,14 +68,14 @@ gm107_fifo_fault_engine[] = {
49 68
50static const struct gk104_fifo_func 69static const struct gk104_fifo_func
51gm107_fifo = { 70gm107_fifo = {
71 .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
72 .fault.access = gk104_fifo_fault_access,
52 .fault.engine = gm107_fifo_fault_engine, 73 .fault.engine = gm107_fifo_fault_engine,
53 .fault.reason = gk104_fifo_fault_reason, 74 .fault.reason = gk104_fifo_fault_reason,
54 .fault.hubclient = gk104_fifo_fault_hubclient, 75 .fault.hubclient = gk104_fifo_fault_hubclient,
55 .fault.gpcclient = gk104_fifo_fault_gpcclient, 76 .fault.gpcclient = gk104_fifo_fault_gpcclient,
56 .chan = { 77 .runlist = &gm107_fifo_runlist,
57 &gk110_fifo_gpfifo_oclass, 78 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
58 NULL
59 },
60}; 79};
61 80
62int 81int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index b069f785c5d8..49565faa854d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -24,16 +24,18 @@
24#include "gk104.h" 24#include "gk104.h"
25#include "changk104.h" 25#include "changk104.h"
26 26
27#include <nvif/class.h>
28
27static const struct gk104_fifo_func 29static const struct gk104_fifo_func
28gm200_fifo = { 30gm200_fifo = {
31 .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
32 .fault.access = gk104_fifo_fault_access,
29 .fault.engine = gm107_fifo_fault_engine, 33 .fault.engine = gm107_fifo_fault_engine,
30 .fault.reason = gk104_fifo_fault_reason, 34 .fault.reason = gk104_fifo_fault_reason,
31 .fault.hubclient = gk104_fifo_fault_hubclient, 35 .fault.hubclient = gk104_fifo_fault_hubclient,
32 .fault.gpcclient = gk104_fifo_fault_gpcclient, 36 .fault.gpcclient = gk104_fifo_fault_gpcclient,
33 .chan = { 37 .runlist = &gm107_fifo_runlist,
34 &gm200_fifo_gpfifo_oclass, 38 .chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
35 NULL
36 },
37}; 39};
38 40
39int 41int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 2ed87c2e8299..46736513bd11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -22,16 +22,18 @@
22#include "gk104.h" 22#include "gk104.h"
23#include "changk104.h" 23#include "changk104.h"
24 24
25#include <nvif/class.h>
26
25static const struct gk104_fifo_func 27static const struct gk104_fifo_func
26gm20b_fifo = { 28gm20b_fifo = {
29 .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
30 .fault.access = gk104_fifo_fault_access,
27 .fault.engine = gm107_fifo_fault_engine, 31 .fault.engine = gm107_fifo_fault_engine,
28 .fault.reason = gk104_fifo_fault_reason, 32 .fault.reason = gk104_fifo_fault_reason,
29 .fault.hubclient = gk104_fifo_fault_hubclient, 33 .fault.hubclient = gk104_fifo_fault_hubclient,
30 .fault.gpcclient = gk104_fifo_fault_gpcclient, 34 .fault.gpcclient = gk104_fifo_fault_gpcclient,
31 .chan = { 35 .runlist = &gm107_fifo_runlist,
32 &gm200_fifo_gpfifo_oclass, 36 .chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
33 NULL
34 },
35}; 37};
36 38
37int 39int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 41f16cf5a918..e2f8f9087d7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -24,6 +24,8 @@
24#include "gk104.h" 24#include "gk104.h"
25#include "changk104.h" 25#include "changk104.h"
26 26
27#include <nvif/class.h>
28
27const struct nvkm_enum 29const struct nvkm_enum
28gp100_fifo_fault_engine[] = { 30gp100_fifo_fault_engine[] = {
29 { 0x01, "DISPLAY" }, 31 { 0x01, "DISPLAY" },
@@ -50,14 +52,15 @@ gp100_fifo_fault_engine[] = {
50 52
51static const struct gk104_fifo_func 53static const struct gk104_fifo_func
52gp100_fifo = { 54gp100_fifo = {
55 .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
56 .fault.access = gk104_fifo_fault_access,
53 .fault.engine = gp100_fifo_fault_engine, 57 .fault.engine = gp100_fifo_fault_engine,
54 .fault.reason = gk104_fifo_fault_reason, 58 .fault.reason = gk104_fifo_fault_reason,
55 .fault.hubclient = gk104_fifo_fault_hubclient, 59 .fault.hubclient = gk104_fifo_fault_hubclient,
56 .fault.gpcclient = gk104_fifo_fault_gpcclient, 60 .fault.gpcclient = gk104_fifo_fault_gpcclient,
57 .chan = { 61 .runlist = &gm107_fifo_runlist,
58 &gp100_fifo_gpfifo_oclass, 62 .chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
59 NULL 63 .cgrp_force = true,
60 },
61}; 64};
62 65
63int 66int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 4af96c3e69ff..7733bf7c6545 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -22,16 +22,19 @@
22#include "gk104.h" 22#include "gk104.h"
23#include "changk104.h" 23#include "changk104.h"
24 24
25#include <nvif/class.h>
26
25static const struct gk104_fifo_func 27static const struct gk104_fifo_func
26gp10b_fifo = { 28gp10b_fifo = {
29 .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
30 .fault.access = gk104_fifo_fault_access,
27 .fault.engine = gp100_fifo_fault_engine, 31 .fault.engine = gp100_fifo_fault_engine,
28 .fault.reason = gk104_fifo_fault_reason, 32 .fault.reason = gk104_fifo_fault_reason,
29 .fault.hubclient = gk104_fifo_fault_hubclient, 33 .fault.hubclient = gk104_fifo_fault_hubclient,
30 .fault.gpcclient = gk104_fifo_fault_gpcclient, 34 .fault.gpcclient = gk104_fifo_fault_gpcclient,
31 .chan = { 35 .runlist = &gm107_fifo_runlist,
32 &gp100_fifo_gpfifo_oclass, 36 .chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
33 NULL 37 .cgrp_force = true,
34 },
35}; 38};
36 39
37int 40int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 80c87521bebe..118b37aea318 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -22,6 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24#include "changk104.h" 24#include "changk104.h"
25#include "cgrp.h"
25 26
26#include <core/client.h> 27#include <core/client.h>
27#include <core/gpuobj.h> 28#include <core/gpuobj.h>
@@ -33,27 +34,40 @@
33#include <nvif/cla06f.h> 34#include <nvif/cla06f.h>
34#include <nvif/unpack.h> 35#include <nvif/unpack.h>
35 36
36static int 37int
37gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan) 38gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *chan)
38{ 39{
39 struct gk104_fifo *fifo = chan->fifo; 40 struct gk104_fifo *fifo = chan->fifo;
40 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 41 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
41 struct nvkm_device *device = subdev->device; 42 struct nvkm_device *device = subdev->device;
42 struct nvkm_client *client = chan->base.object.client; 43 struct nvkm_client *client = chan->base.object.client;
44 struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
43 int ret = 0; 45 int ret = 0;
44 46
45 mutex_lock(&subdev->mutex); 47 if (cgrp)
46 nvkm_wr32(device, 0x002634, chan->base.chid); 48 nvkm_wr32(device, 0x002634, cgrp->id | 0x01000000);
49 else
50 nvkm_wr32(device, 0x002634, chan->base.chid);
47 if (nvkm_msec(device, 2000, 51 if (nvkm_msec(device, 2000,
48 if (!(nvkm_rd32(device, 0x002634) & 0x00100000)) 52 if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
49 break; 53 break;
50 ) < 0) { 54 ) < 0) {
51 nvkm_error(subdev, "channel %d [%s] kick timeout\n", 55 nvkm_error(subdev, "%s %d [%s] kick timeout\n",
52 chan->base.chid, client->name); 56 cgrp ? "tsg" : "channel",
57 cgrp ? cgrp->id : chan->base.chid, client->name);
53 nvkm_fifo_recover_chan(&fifo->base, chan->base.chid); 58 nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
54 ret = -ETIMEDOUT; 59 ret = -ETIMEDOUT;
55 } 60 }
56 mutex_unlock(&subdev->mutex); 61 return ret;
62}
63
64int
65gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
66{
67 int ret;
68 mutex_lock(&chan->base.fifo->engine.subdev.mutex);
69 ret = gk104_fifo_gpfifo_kick_locked(chan);
70 mutex_unlock(&chan->base.fifo->engine.subdev.mutex);
57 return ret; 71 return ret;
58} 72}
59 73
@@ -62,9 +76,8 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
62{ 76{
63 switch (engine->subdev.index) { 77 switch (engine->subdev.index) {
64 case NVKM_ENGINE_SW : 78 case NVKM_ENGINE_SW :
65 case NVKM_ENGINE_CE0 : 79 case NVKM_ENGINE_CE0...NVKM_ENGINE_CE_LAST:
66 case NVKM_ENGINE_CE1 : 80 return 0;
67 case NVKM_ENGINE_CE2 : return 0x0000;
68 case NVKM_ENGINE_GR : return 0x0210; 81 case NVKM_ENGINE_GR : return 0x0210;
69 case NVKM_ENGINE_SEC : return 0x0220; 82 case NVKM_ENGINE_SEC : return 0x0220;
70 case NVKM_ENGINE_MSPDEC: return 0x0250; 83 case NVKM_ENGINE_MSPDEC: return 0x0250;
@@ -133,7 +146,7 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
133 return 0; 146 return 0;
134} 147}
135 148
136static void 149void
137gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, 150gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
138 struct nvkm_engine *engine) 151 struct nvkm_engine *engine)
139{ 152{
@@ -142,7 +155,7 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
142 nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); 155 nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
143} 156}
144 157
145static int 158int
146gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, 159gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
147 struct nvkm_engine *engine, 160 struct nvkm_engine *engine,
148 struct nvkm_object *object) 161 struct nvkm_object *object)
@@ -167,7 +180,7 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
167 chan->engn[engn].vma, NULL, 0); 180 chan->engn[engn].vma, NULL, 0);
168} 181}
169 182
170static void 183void
171gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base) 184gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
172{ 185{
173 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 186 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
@@ -185,7 +198,7 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
185 nvkm_wr32(device, 0x800000 + coff, 0x00000000); 198 nvkm_wr32(device, 0x800000 + coff, 0x00000000);
186} 199}
187 200
188static void 201void
189gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base) 202gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
190{ 203{
191 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 204 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
@@ -205,13 +218,15 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
205 } 218 }
206} 219}
207 220
208static void * 221void *
209gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base) 222gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
210{ 223{
211 return gk104_fifo_chan(base); 224 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
225 kfree(chan->cgrp);
226 return chan;
212} 227}
213 228
214static const struct nvkm_fifo_chan_func 229const struct nvkm_fifo_chan_func
215gk104_fifo_gpfifo_func = { 230gk104_fifo_gpfifo_func = {
216 .dtor = gk104_fifo_gpfifo_dtor, 231 .dtor = gk104_fifo_gpfifo_dtor,
217 .init = gk104_fifo_gpfifo_init, 232 .init = gk104_fifo_gpfifo_init,
@@ -223,62 +238,30 @@ gk104_fifo_gpfifo_func = {
223 .engine_fini = gk104_fifo_gpfifo_engine_fini, 238 .engine_fini = gk104_fifo_gpfifo_engine_fini,
224}; 239};
225 240
226struct gk104_fifo_chan_func {
227 u32 engine;
228 u64 subdev;
229};
230
231static int 241static int
232gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func, 242gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
233 struct gk104_fifo *fifo, u32 *engmask, u16 *chid,
234 u64 vmm, u64 ioffset, u64 ilength, 243 u64 vmm, u64 ioffset, u64 ilength,
235 const struct nvkm_oclass *oclass, 244 const struct nvkm_oclass *oclass,
236 struct nvkm_object **pobject) 245 struct nvkm_object **pobject)
237{ 246{
238 struct gk104_fifo_chan *chan; 247 struct gk104_fifo_chan *chan;
239 int runlist = -1, ret = -ENOSYS, i, j; 248 int runlist = ffs(*runlists) -1, ret, i;
240 u32 engines = 0, present = 0; 249 unsigned long engm;
241 u64 subdevs = 0; 250 u64 subdevs = 0;
242 u64 usermem; 251 u64 usermem;
243 252
244 if (!vmm) 253 if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
245 return -EINVAL; 254 return -EINVAL;
255 *runlists = BIT_ULL(runlist);
246 256
247 /* Determine which downstream engines are present */ 257 engm = fifo->runlist[runlist].engm;
248 for (i = 0; i < fifo->engine_nr; i++) { 258 for_each_set_bit(i, &engm, fifo->engine_nr) {
249 struct nvkm_engine *engine = fifo->engine[i].engine; 259 if (fifo->engine[i].engine)
250 if (engine) { 260 subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
251 u64 submask = BIT_ULL(engine->subdev.index);
252 for (j = 0; func[j].subdev; j++) {
253 if (func[j].subdev & submask) {
254 present |= func[j].engine;
255 break;
256 }
257 }
258
259 if (!func[j].subdev)
260 continue;
261
262 if (runlist < 0 && (*engmask & present))
263 runlist = fifo->engine[i].runl;
264 if (runlist == fifo->engine[i].runl) {
265 engines |= func[j].engine;
266 subdevs |= func[j].subdev;
267 }
268 }
269 }
270
271 /* Just an engine mask query? All done here! */
272 if (!*engmask) {
273 *engmask = present;
274 return nvkm_object_new(oclass, NULL, 0, pobject);
275 } 261 }
276 262
277 /* No runlist? No supported engines. */ 263 if (subdevs & BIT_ULL(NVKM_ENGINE_GR))
278 *engmask = present; 264 subdevs |= BIT_ULL(NVKM_ENGINE_SW);
279 if (runlist < 0)
280 return -ENODEV;
281 *engmask = engines;
282 265
283 /* Allocate the channel. */ 266 /* Allocate the channel. */
284 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) 267 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
@@ -297,6 +280,18 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
297 280
298 *chid = chan->base.chid; 281 *chid = chan->base.chid;
299 282
283 /* Hack to support GPUs where even individual channels should be
284 * part of a channel group.
285 */
286 if (fifo->func->cgrp_force) {
287 if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
288 return -ENOMEM;
289 chan->cgrp->id = chan->base.chid;
290 INIT_LIST_HEAD(&chan->cgrp->head);
291 INIT_LIST_HEAD(&chan->cgrp->chan);
292 chan->cgrp->chan_nr = 0;
293 }
294
300 /* Clear channel control registers. */ 295 /* Clear channel control registers. */
301 usermem = chan->base.chid * 0x200; 296 usermem = chan->base.chid * 0x200;
302 ilength = order_base_2(ilength / 8); 297 ilength = order_base_2(ilength / 8);
@@ -328,45 +323,25 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
328 return 0; 323 return 0;
329} 324}
330 325
331static const struct gk104_fifo_chan_func
332gk104_fifo_gpfifo[] = {
333 { NVA06F_V0_ENGINE_SW | NVA06F_V0_ENGINE_GR,
334 BIT_ULL(NVKM_ENGINE_SW) | BIT_ULL(NVKM_ENGINE_GR)
335 },
336 { NVA06F_V0_ENGINE_SEC , BIT_ULL(NVKM_ENGINE_SEC ) },
337 { NVA06F_V0_ENGINE_MSVLD , BIT_ULL(NVKM_ENGINE_MSVLD ) },
338 { NVA06F_V0_ENGINE_MSPDEC, BIT_ULL(NVKM_ENGINE_MSPDEC) },
339 { NVA06F_V0_ENGINE_MSPPP , BIT_ULL(NVKM_ENGINE_MSPPP ) },
340 { NVA06F_V0_ENGINE_MSENC , BIT_ULL(NVKM_ENGINE_MSENC ) },
341 { NVA06F_V0_ENGINE_VIC , BIT_ULL(NVKM_ENGINE_VIC ) },
342 { NVA06F_V0_ENGINE_NVDEC , BIT_ULL(NVKM_ENGINE_NVDEC ) },
343 { NVA06F_V0_ENGINE_NVENC0, BIT_ULL(NVKM_ENGINE_NVENC0) },
344 { NVA06F_V0_ENGINE_NVENC1, BIT_ULL(NVKM_ENGINE_NVENC1) },
345 { NVA06F_V0_ENGINE_CE0 , BIT_ULL(NVKM_ENGINE_CE0 ) },
346 { NVA06F_V0_ENGINE_CE1 , BIT_ULL(NVKM_ENGINE_CE1 ) },
347 { NVA06F_V0_ENGINE_CE2 , BIT_ULL(NVKM_ENGINE_CE2 ) },
348 {}
349};
350
351int 326int
352gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, 327gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
353 void *data, u32 size, struct nvkm_object **pobject) 328 void *data, u32 size, struct nvkm_object **pobject)
354{ 329{
355 struct nvkm_object *parent = oclass->parent; 330 struct nvkm_object *parent = oclass->parent;
356 union { 331 union {
357 struct kepler_channel_gpfifo_a_v0 v0; 332 struct kepler_channel_gpfifo_a_v0 v0;
358 } *args = data; 333 } *args = data;
359 struct gk104_fifo *fifo = gk104_fifo(base);
360 int ret = -ENOSYS; 334 int ret = -ENOSYS;
361 335
362 nvif_ioctl(parent, "create channel gpfifo size %d\n", size); 336 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
363 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 337 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
364 nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx " 338 nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
365 "ioffset %016llx ilength %08x engine %08x\n", 339 "ioffset %016llx ilength %08x "
340 "runlist %016llx\n",
366 args->v0.version, args->v0.vmm, args->v0.ioffset, 341 args->v0.version, args->v0.vmm, args->v0.ioffset,
367 args->v0.ilength, args->v0.engines); 342 args->v0.ilength, args->v0.runlist);
368 return gk104_fifo_gpfifo_new_(gk104_fifo_gpfifo, fifo, 343 return gk104_fifo_gpfifo_new_(fifo,
369 &args->v0.engines, 344 &args->v0.runlist,
370 &args->v0.chid, 345 &args->v0.chid,
371 args->v0.vmm, 346 args->v0.vmm,
372 args->v0.ioffset, 347 args->v0.ioffset,
@@ -376,11 +351,3 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
376 351
377 return ret; 352 return ret;
378} 353}
379
380const struct nvkm_fifo_chan_oclass
381gk104_fifo_gpfifo_oclass = {
382 .base.oclass = KEPLER_CHANNEL_GPFIFO_A,
383 .base.minver = 0,
384 .base.maxver = 0,
385 .ctor = gk104_fifo_gpfifo_new,
386};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
new file mode 100644
index 000000000000..9598853ced56
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -0,0 +1,225 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "changk104.h"
23#include "cgrp.h"
24
25#include <core/client.h>
26#include <core/gpuobj.h>
27
28#include <nvif/cla06f.h>
29#include <nvif/unpack.h>
30
31static int
32gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
33{
34 struct nvkm_subdev *subdev = &chan->base.fifo->engine.subdev;
35 struct nvkm_device *device = subdev->device;
36 const u32 mask = ce ? 0x00020000 : 0x00010000;
37 const u32 data = valid ? mask : 0x00000000;
38 int ret;
39
40 /* Block runlist to prevent the channel from being rescheduled. */
41 mutex_lock(&subdev->mutex);
42 nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
43
44 /* Preempt the channel. */
45 ret = gk104_fifo_gpfifo_kick_locked(chan);
46 if (ret == 0) {
47 /* Update engine context validity. */
48 nvkm_kmap(chan->base.inst);
49 nvkm_mo32(chan->base.inst, 0x0ac, mask, data);
50 nvkm_done(chan->base.inst);
51 }
52
53 /* Resume runlist. */
54 nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
55 mutex_unlock(&subdev->mutex);
56 return ret;
57}
58
59static int
60gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
61 struct nvkm_engine *engine, bool suspend)
62{
63 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
64 struct nvkm_gpuobj *inst = chan->base.inst;
65 int ret;
66
67 if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
68 engine->subdev.index <= NVKM_ENGINE_CE_LAST)
69 return gk104_fifo_gpfifo_kick(chan);
70
71 ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
72 if (ret && suspend)
73 return ret;
74
75 nvkm_kmap(inst);
76 nvkm_wo32(inst, 0x0210, 0x00000000);
77 nvkm_wo32(inst, 0x0214, 0x00000000);
78 nvkm_done(inst);
79 return ret;
80}
81
82static int
83gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
84 struct nvkm_engine *engine)
85{
86 struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
87 struct nvkm_gpuobj *inst = chan->base.inst;
88 u64 addr;
89
90 if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
91 engine->subdev.index <= NVKM_ENGINE_CE_LAST)
92 return 0;
93
94 addr = chan->engn[engine->subdev.index].vma->addr;
95 nvkm_kmap(inst);
96 nvkm_wo32(inst, 0x210, lower_32_bits(addr) | 0x00000004);
97 nvkm_wo32(inst, 0x214, upper_32_bits(addr));
98 nvkm_done(inst);
99
100 return gv100_fifo_gpfifo_engine_valid(chan, false, true);
101}
102
103const struct nvkm_fifo_chan_func
104gv100_fifo_gpfifo_func = {
105 .dtor = gk104_fifo_gpfifo_dtor,
106 .init = gk104_fifo_gpfifo_init,
107 .fini = gk104_fifo_gpfifo_fini,
108 .ntfy = gf100_fifo_chan_ntfy,
109 .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
110 .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
111 .engine_init = gv100_fifo_gpfifo_engine_init,
112 .engine_fini = gv100_fifo_gpfifo_engine_fini,
113};
114
115static int
116gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
117 u64 vmm, u64 ioffset, u64 ilength,
118 const struct nvkm_oclass *oclass,
119 struct nvkm_object **pobject)
120{
121 struct gk104_fifo_chan *chan;
122 int runlist = ffs(*runlists) -1, ret, i;
123 unsigned long engm;
124 u64 subdevs = 0;
125 u64 usermem;
126
127 if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
128 return -EINVAL;
129 *runlists = BIT_ULL(runlist);
130
131 engm = fifo->runlist[runlist].engm;
132 for_each_set_bit(i, &engm, fifo->engine_nr) {
133 if (fifo->engine[i].engine)
134 subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
135 }
136
137 /* Allocate the channel. */
138 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
139 return -ENOMEM;
140 *pobject = &chan->base.object;
141 chan->fifo = fifo;
142 chan->runl = runlist;
143 INIT_LIST_HEAD(&chan->head);
144
145 ret = nvkm_fifo_chan_ctor(&gv100_fifo_gpfifo_func, &fifo->base,
146 0x1000, 0x1000, true, vmm, 0, subdevs,
147 1, fifo->user.bar->addr, 0x200,
148 oclass, &chan->base);
149 if (ret)
150 return ret;
151
152 *chid = chan->base.chid;
153
154 /* Hack to support GPUs where even individual channels should be
155 * part of a channel group.
156 */
157 if (fifo->func->cgrp_force) {
158 if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
159 return -ENOMEM;
160 chan->cgrp->id = chan->base.chid;
161 INIT_LIST_HEAD(&chan->cgrp->head);
162 INIT_LIST_HEAD(&chan->cgrp->chan);
163 chan->cgrp->chan_nr = 0;
164 }
165
166 /* Clear channel control registers. */
167 usermem = chan->base.chid * 0x200;
168 ilength = order_base_2(ilength / 8);
169
170 nvkm_kmap(fifo->user.mem);
171 for (i = 0; i < 0x200; i += 4)
172 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
173 nvkm_done(fifo->user.mem);
174 usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
175
176 /* RAMFC */
177 nvkm_kmap(chan->base.inst);
178 nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
179 nvkm_wo32(chan->base.inst, 0x00c, upper_32_bits(usermem));
180 nvkm_wo32(chan->base.inst, 0x010, 0x0000face);
181 nvkm_wo32(chan->base.inst, 0x030, 0x7ffff902);
182 nvkm_wo32(chan->base.inst, 0x048, lower_32_bits(ioffset));
183 nvkm_wo32(chan->base.inst, 0x04c, upper_32_bits(ioffset) |
184 (ilength << 16));
185 nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
186 nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
187 nvkm_wo32(chan->base.inst, 0x0e4, 0x00000020);
188 nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
189 nvkm_wo32(chan->base.inst, 0x0f4, 0x00001100);
190 nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
191 nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
192 nvkm_wo32(chan->base.inst, 0x220, 0x020a1000);
193 nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
194 nvkm_done(chan->base.inst);
195 return gv100_fifo_gpfifo_engine_valid(chan, true, true);
196}
197
198int
199gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
200 void *data, u32 size, struct nvkm_object **pobject)
201{
202 struct nvkm_object *parent = oclass->parent;
203 union {
204 struct kepler_channel_gpfifo_a_v0 v0;
205 } *args = data;
206 int ret = -ENOSYS;
207
208 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
209 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
210 nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
211 "ioffset %016llx ilength %08x "
212 "runlist %016llx\n",
213 args->v0.version, args->v0.vmm, args->v0.ioffset,
214 args->v0.ilength, args->v0.runlist);
215 return gv100_fifo_gpfifo_new_(fifo,
216 &args->v0.runlist,
217 &args->v0.chid,
218 args->v0.vmm,
219 args->v0.ioffset,
220 args->v0.ilength,
221 oclass, pobject);
222 }
223
224 return ret;
225}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
new file mode 100644
index 000000000000..4e1d159c0ae7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -0,0 +1,306 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "gk104.h"
23#include "cgrp.h"
24#include "changk104.h"
25#include "user.h"
26
27#include <core/gpuobj.h>
28
29#include <nvif/class.h>
30
31static void
32gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
33 struct nvkm_memory *memory, u32 offset)
34{
35 struct nvkm_memory *usermem = chan->fifo->user.mem;
36 const u64 user = nvkm_memory_addr(usermem) + (chan->base.chid * 0x200);
37 const u64 inst = chan->base.inst->addr;
38
39 nvkm_wo32(memory, offset + 0x0, lower_32_bits(user));
40 nvkm_wo32(memory, offset + 0x4, upper_32_bits(user));
41 nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->base.chid);
42 nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
43}
44
45static void
46gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
47 struct nvkm_memory *memory, u32 offset)
48{
49 nvkm_wo32(memory, offset + 0x0, (128 << 24) | (3 << 16) | 0x00000001);
50 nvkm_wo32(memory, offset + 0x4, cgrp->chan_nr);
51 nvkm_wo32(memory, offset + 0x8, cgrp->id);
52 nvkm_wo32(memory, offset + 0xc, 0x00000000);
53}
54
55const struct gk104_fifo_runlist_func
56gv100_fifo_runlist = {
57 .size = 16,
58 .cgrp = gv100_fifo_runlist_cgrp,
59 .chan = gv100_fifo_runlist_chan,
60};
61
62static const struct nvkm_enum
63gv100_fifo_fault_gpcclient[] = {
64 { 0x00, "T1_0" },
65 { 0x01, "T1_1" },
66 { 0x02, "T1_2" },
67 { 0x03, "T1_3" },
68 { 0x04, "T1_4" },
69 { 0x05, "T1_5" },
70 { 0x06, "T1_6" },
71 { 0x07, "T1_7" },
72 { 0x08, "PE_0" },
73 { 0x09, "PE_1" },
74 { 0x0a, "PE_2" },
75 { 0x0b, "PE_3" },
76 { 0x0c, "PE_4" },
77 { 0x0d, "PE_5" },
78 { 0x0e, "PE_6" },
79 { 0x0f, "PE_7" },
80 { 0x10, "RAST" },
81 { 0x11, "GCC" },
82 { 0x12, "GPCCS" },
83 { 0x13, "PROP_0" },
84 { 0x14, "PROP_1" },
85 { 0x15, "PROP_2" },
86 { 0x16, "PROP_3" },
87 { 0x17, "GPM" },
88 { 0x18, "LTP_UTLB_0" },
89 { 0x19, "LTP_UTLB_1" },
90 { 0x1a, "LTP_UTLB_2" },
91 { 0x1b, "LTP_UTLB_3" },
92 { 0x1c, "LTP_UTLB_4" },
93 { 0x1d, "LTP_UTLB_5" },
94 { 0x1e, "LTP_UTLB_6" },
95 { 0x1f, "LTP_UTLB_7" },
96 { 0x20, "RGG_UTLB" },
97 { 0x21, "T1_8" },
98 { 0x22, "T1_9" },
99 { 0x23, "T1_10" },
100 { 0x24, "T1_11" },
101 { 0x25, "T1_12" },
102 { 0x26, "T1_13" },
103 { 0x27, "T1_14" },
104 { 0x28, "T1_15" },
105 { 0x29, "TPCCS_0" },
106 { 0x2a, "TPCCS_1" },
107 { 0x2b, "TPCCS_2" },
108 { 0x2c, "TPCCS_3" },
109 { 0x2d, "TPCCS_4" },
110 { 0x2e, "TPCCS_5" },
111 { 0x2f, "TPCCS_6" },
112 { 0x30, "TPCCS_7" },
113 { 0x31, "PE_8" },
114 { 0x32, "PE_9" },
115 { 0x33, "TPCCS_8" },
116 { 0x34, "TPCCS_9" },
117 { 0x35, "T1_16" },
118 { 0x36, "T1_17" },
119 { 0x37, "T1_18" },
120 { 0x38, "T1_19" },
121 { 0x39, "PE_10" },
122 { 0x3a, "PE_11" },
123 { 0x3b, "TPCCS_10" },
124 { 0x3c, "TPCCS_11" },
125 { 0x3d, "T1_20" },
126 { 0x3e, "T1_21" },
127 { 0x3f, "T1_22" },
128 { 0x40, "T1_23" },
129 { 0x41, "PE_12" },
130 { 0x42, "PE_13" },
131 { 0x43, "TPCCS_12" },
132 { 0x44, "TPCCS_13" },
133 { 0x45, "T1_24" },
134 { 0x46, "T1_25" },
135 { 0x47, "T1_26" },
136 { 0x48, "T1_27" },
137 { 0x49, "PE_14" },
138 { 0x4a, "PE_15" },
139 { 0x4b, "TPCCS_14" },
140 { 0x4c, "TPCCS_15" },
141 { 0x4d, "T1_28" },
142 { 0x4e, "T1_29" },
143 { 0x4f, "T1_30" },
144 { 0x50, "T1_31" },
145 { 0x51, "PE_16" },
146 { 0x52, "PE_17" },
147 { 0x53, "TPCCS_16" },
148 { 0x54, "TPCCS_17" },
149 { 0x55, "T1_32" },
150 { 0x56, "T1_33" },
151 { 0x57, "T1_34" },
152 { 0x58, "T1_35" },
153 { 0x59, "PE_18" },
154 { 0x5a, "PE_19" },
155 { 0x5b, "TPCCS_18" },
156 { 0x5c, "TPCCS_19" },
157 { 0x5d, "T1_36" },
158 { 0x5e, "T1_37" },
159 { 0x5f, "T1_38" },
160 { 0x60, "T1_39" },
161 {}
162};
163
164static const struct nvkm_enum
165gv100_fifo_fault_hubclient[] = {
166 { 0x00, "VIP" },
167 { 0x01, "CE0" },
168 { 0x02, "CE1" },
169 { 0x03, "DNISO" },
170 { 0x04, "FE" },
171 { 0x05, "FECS" },
172 { 0x06, "HOST" },
173 { 0x07, "HOST_CPU" },
174 { 0x08, "HOST_CPU_NB" },
175 { 0x09, "ISO" },
176 { 0x0a, "MMU" },
177 { 0x0b, "NVDEC" },
178 { 0x0d, "NVENC1" },
179 { 0x0e, "NISO" },
180 { 0x0f, "P2P" },
181 { 0x10, "PD" },
182 { 0x11, "PERF" },
183 { 0x12, "PMU" },
184 { 0x13, "RASTERTWOD" },
185 { 0x14, "SCC" },
186 { 0x15, "SCC_NB" },
187 { 0x16, "SEC" },
188 { 0x17, "SSYNC" },
189 { 0x18, "CE2" },
190 { 0x19, "XV" },
191 { 0x1a, "MMU_NB" },
192 { 0x1b, "NVENC0" },
193 { 0x1c, "DFALCON" },
194 { 0x1d, "SKED" },
195 { 0x1e, "AFALCON" },
196 { 0x1f, "DONT_CARE" },
197 { 0x20, "HSCE0" },
198 { 0x21, "HSCE1" },
199 { 0x22, "HSCE2" },
200 { 0x23, "HSCE3" },
201 { 0x24, "HSCE4" },
202 { 0x25, "HSCE5" },
203 { 0x26, "HSCE6" },
204 { 0x27, "HSCE7" },
205 { 0x28, "HSCE8" },
206 { 0x29, "HSCE9" },
207 { 0x2a, "HSHUB" },
208 { 0x2b, "PTP_X0" },
209 { 0x2c, "PTP_X1" },
210 { 0x2d, "PTP_X2" },
211 { 0x2e, "PTP_X3" },
212 { 0x2f, "PTP_X4" },
213 { 0x30, "PTP_X5" },
214 { 0x31, "PTP_X6" },
215 { 0x32, "PTP_X7" },
216 { 0x33, "NVENC2" },
217 { 0x34, "VPR_SCRUBBER0" },
218 { 0x35, "VPR_SCRUBBER1" },
219 { 0x36, "DWBIF" },
220 { 0x37, "FBFALCON" },
221 { 0x38, "CE_SHIM" },
222 { 0x39, "GSP" },
223 {}
224};
225
226static const struct nvkm_enum
227gv100_fifo_fault_reason[] = {
228 { 0x00, "PDE" },
229 { 0x01, "PDE_SIZE" },
230 { 0x02, "PTE" },
231 { 0x03, "VA_LIMIT_VIOLATION" },
232 { 0x04, "UNBOUND_INST_BLOCK" },
233 { 0x05, "PRIV_VIOLATION" },
234 { 0x06, "RO_VIOLATION" },
235 { 0x07, "WO_VIOLATION" },
236 { 0x08, "PITCH_MASK_VIOLATION" },
237 { 0x09, "WORK_CREATION" },
238 { 0x0a, "UNSUPPORTED_APERTURE" },
239 { 0x0b, "COMPRESSION_FAILURE" },
240 { 0x0c, "UNSUPPORTED_KIND" },
241 { 0x0d, "REGION_VIOLATION" },
242 { 0x0e, "POISONED" },
243 { 0x0f, "ATOMIC_VIOLATION" },
244 {}
245};
246
247static const struct nvkm_enum
248gv100_fifo_fault_engine[] = {
249 { 0x01, "DISPLAY" },
250 { 0x03, "PTP" },
251 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
252 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
253 { 0x06, "PWR_PMU" },
254 { 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
255 { 0x09, "PERF" },
256 { 0x1f, "PHYSICAL" },
257 { 0x20, "HOST0" },
258 { 0x21, "HOST1" },
259 { 0x22, "HOST2" },
260 { 0x23, "HOST3" },
261 { 0x24, "HOST4" },
262 { 0x25, "HOST5" },
263 { 0x26, "HOST6" },
264 { 0x27, "HOST7" },
265 { 0x28, "HOST8" },
266 { 0x29, "HOST9" },
267 { 0x2a, "HOST10" },
268 { 0x2b, "HOST11" },
269 { 0x2c, "HOST12" },
270 { 0x2d, "HOST13" },
271 {}
272};
273
274static const struct nvkm_enum
275gv100_fifo_fault_access[] = {
276 { 0x0, "VIRT_READ" },
277 { 0x1, "VIRT_WRITE" },
278 { 0x2, "VIRT_ATOMIC" },
279 { 0x3, "VIRT_PREFETCH" },
280 { 0x4, "VIRT_ATOMIC_WEAK" },
281 { 0x8, "PHYS_READ" },
282 { 0x9, "PHYS_WRITE" },
283 { 0xa, "PHYS_ATOMIC" },
284 { 0xb, "PHYS_PREFETCH" },
285 {}
286};
287
288static const struct gk104_fifo_func
289gv100_fifo = {
290 .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
291 .fault.access = gv100_fifo_fault_access,
292 .fault.engine = gv100_fifo_fault_engine,
293 .fault.reason = gv100_fifo_fault_reason,
294 .fault.hubclient = gv100_fifo_fault_hubclient,
295 .fault.gpcclient = gv100_fifo_fault_gpcclient,
296 .runlist = &gv100_fifo_runlist,
297 .user = {{-1,-1,VOLTA_USERMODE_A }, gv100_fifo_user_new },
298 .chan = {{ 0, 0,VOLTA_CHANNEL_GPFIFO_A}, gv100_fifo_gpfifo_new },
299 .cgrp_force = true,
300};
301
302int
303gv100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
304{
305 return gk104_fifo_new_(&gv100_fifo, device, index, 4096, pfifo);
306}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index ae76b1aaccd4..d5acbba293f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -18,16 +18,19 @@ struct nvkm_fifo_chan_oclass;
18struct nvkm_fifo_func { 18struct nvkm_fifo_func {
19 void *(*dtor)(struct nvkm_fifo *); 19 void *(*dtor)(struct nvkm_fifo *);
20 int (*oneinit)(struct nvkm_fifo *); 20 int (*oneinit)(struct nvkm_fifo *);
21 int (*info)(struct nvkm_fifo *, u64 mthd, u64 *data);
21 void (*init)(struct nvkm_fifo *); 22 void (*init)(struct nvkm_fifo *);
22 void (*fini)(struct nvkm_fifo *); 23 void (*fini)(struct nvkm_fifo *);
23 void (*intr)(struct nvkm_fifo *); 24 void (*intr)(struct nvkm_fifo *);
25 void (*fault)(struct nvkm_fifo *, struct nvkm_fault_data *);
24 void (*pause)(struct nvkm_fifo *, unsigned long *); 26 void (*pause)(struct nvkm_fifo *, unsigned long *);
25 void (*start)(struct nvkm_fifo *, unsigned long *); 27 void (*start)(struct nvkm_fifo *, unsigned long *);
26 void (*uevent_init)(struct nvkm_fifo *); 28 void (*uevent_init)(struct nvkm_fifo *);
27 void (*uevent_fini)(struct nvkm_fifo *); 29 void (*uevent_fini)(struct nvkm_fifo *);
28 void (*recover_chan)(struct nvkm_fifo *, int chid); 30 void (*recover_chan)(struct nvkm_fifo *, int chid);
29 int (*class_get)(struct nvkm_fifo *, int index, 31 int (*class_get)(struct nvkm_fifo *, int index, struct nvkm_oclass *);
30 const struct nvkm_fifo_chan_oclass **); 32 int (*class_new)(struct nvkm_fifo *, const struct nvkm_oclass *,
33 void *, u32, struct nvkm_object **);
31 const struct nvkm_fifo_chan_oclass *chan[]; 34 const struct nvkm_fifo_chan_oclass *chan[];
32}; 35};
33 36
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
new file mode 100644
index 000000000000..ed840921ebe8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
@@ -0,0 +1,6 @@
1#ifndef __NVKM_FIFO_USER_H__
2#define __NVKM_FIFO_USER_H__
3#include "priv.h"
4int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
5 struct nvkm_object **);
6#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c
new file mode 100644
index 000000000000..3dc3b8b312de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "user.h"
23
24static int
25gv100_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
26 enum nvkm_object_map *type, u64 *addr, u64 *size)
27{
28 struct nvkm_device *device = object->engine->subdev.device;
29 *addr = 0x810000 + device->func->resource_addr(device, 0);
30 *size = 0x010000;
31 *type = NVKM_OBJECT_MAP_IO;
32 return 0;
33}
34
35static const struct nvkm_object_func
36gv100_fifo_user = {
37 .map = gv100_fifo_user_map,
38};
39
40int
41gv100_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
42 struct nvkm_object **pobject)
43{
44 return nvkm_object_new_(&gv100_fifo_user, oclass, argv, argc, pobject);
45}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 8a22558b7b52..93e3733f54e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -33,8 +33,10 @@ nvkm-y += nvkm/engine/gr/gm200.o
33nvkm-y += nvkm/engine/gr/gm20b.o 33nvkm-y += nvkm/engine/gr/gm20b.o
34nvkm-y += nvkm/engine/gr/gp100.o 34nvkm-y += nvkm/engine/gr/gp100.o
35nvkm-y += nvkm/engine/gr/gp102.o 35nvkm-y += nvkm/engine/gr/gp102.o
36nvkm-y += nvkm/engine/gr/gp104.o
36nvkm-y += nvkm/engine/gr/gp107.o 37nvkm-y += nvkm/engine/gr/gp107.o
37nvkm-y += nvkm/engine/gr/gp10b.o 38nvkm-y += nvkm/engine/gr/gp10b.o
39nvkm-y += nvkm/engine/gr/gv100.o
38 40
39nvkm-y += nvkm/engine/gr/ctxnv40.o 41nvkm-y += nvkm/engine/gr/ctxnv40.o
40nvkm-y += nvkm/engine/gr/ctxnv50.o 42nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -54,4 +56,6 @@ nvkm-y += nvkm/engine/gr/ctxgm200.o
54nvkm-y += nvkm/engine/gr/ctxgm20b.o 56nvkm-y += nvkm/engine/gr/ctxgm20b.o
55nvkm-y += nvkm/engine/gr/ctxgp100.o 57nvkm-y += nvkm/engine/gr/ctxgp100.o
56nvkm-y += nvkm/engine/gr/ctxgp102.o 58nvkm-y += nvkm/engine/gr/ctxgp102.o
59nvkm-y += nvkm/engine/gr/ctxgp104.o
57nvkm-y += nvkm/engine/gr/ctxgp107.o 60nvkm-y += nvkm/engine/gr/ctxgp107.o
61nvkm-y += nvkm/engine/gr/ctxgv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 881015080d83..e813a3f8ea93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -850,12 +850,17 @@ gf100_grctx_init_gcc_0[] = {
850}; 850};
851 851
852const struct gf100_gr_pack 852const struct gf100_gr_pack
853gf100_grctx_pack_gpc[] = { 853gf100_grctx_pack_gpc_0[] = {
854 { gf100_grctx_init_gpc_unk_0 }, 854 { gf100_grctx_init_gpc_unk_0 },
855 { gf100_grctx_init_prop_0 }, 855 { gf100_grctx_init_prop_0 },
856 { gf100_grctx_init_gpc_unk_1 }, 856 { gf100_grctx_init_gpc_unk_1 },
857 { gf100_grctx_init_setup_0 }, 857 { gf100_grctx_init_setup_0 },
858 { gf100_grctx_init_zcull_0 }, 858 { gf100_grctx_init_zcull_0 },
859 {}
860};
861
862const struct gf100_gr_pack
863gf100_grctx_pack_gpc_1[] = {
859 { gf100_grctx_init_crstr_0 }, 864 { gf100_grctx_init_crstr_0 },
860 { gf100_grctx_init_gpm_0 }, 865 { gf100_grctx_init_gpm_0 },
861 { gf100_grctx_init_gcc_0 }, 866 { gf100_grctx_init_gcc_0 },
@@ -1025,6 +1030,13 @@ gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
1025} 1030}
1026 1031
1027void 1032void
1033gf100_grctx_generate_r419cb8(struct gf100_gr *gr)
1034{
1035 struct nvkm_device *device = gr->base.engine.subdev.device;
1036 nvkm_mask(device, 0x419cb8, 0x00007c00, 0x00000000);
1037}
1038
1039void
1028gf100_grctx_generate_bundle(struct gf100_grctx *info) 1040gf100_grctx_generate_bundle(struct gf100_grctx *info)
1029{ 1041{
1030 const struct gf100_grctx_func *grctx = info->gr->func->grctx; 1042 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
@@ -1080,89 +1092,38 @@ gf100_grctx_generate_unkn(struct gf100_gr *gr)
1080} 1092}
1081 1093
1082void 1094void
1083gf100_grctx_generate_tpcid(struct gf100_gr *gr)
1084{
1085 struct nvkm_device *device = gr->base.engine.subdev.device;
1086 int gpc, tpc, id;
1087
1088 for (tpc = 0, id = 0; tpc < 4; tpc++) {
1089 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1090 if (tpc < gr->tpc_nr[gpc]) {
1091 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
1092 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), id);
1093 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
1094 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
1095 id++;
1096 }
1097
1098 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
1099 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
1100 }
1101 }
1102}
1103
1104void
1105gf100_grctx_generate_r406028(struct gf100_gr *gr)
1106{
1107 struct nvkm_device *device = gr->base.engine.subdev.device;
1108 u32 tmp[GPC_MAX / 8] = {}, i = 0;
1109 for (i = 0; i < gr->gpc_nr; i++)
1110 tmp[i / 8] |= gr->tpc_nr[i] << ((i % 8) * 4);
1111 for (i = 0; i < 4; i++) {
1112 nvkm_wr32(device, 0x406028 + (i * 4), tmp[i]);
1113 nvkm_wr32(device, 0x405870 + (i * 4), tmp[i]);
1114 }
1115}
1116
1117void
1118gf100_grctx_generate_r4060a8(struct gf100_gr *gr) 1095gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
1119{ 1096{
1120 struct nvkm_device *device = gr->base.engine.subdev.device; 1097 struct nvkm_device *device = gr->base.engine.subdev.device;
1121 u8 tpcnr[GPC_MAX], data[TPC_MAX]; 1098 const u8 gpcmax = nvkm_rd32(device, 0x022430);
1122 int gpc, tpc, i; 1099 const u8 tpcmax = nvkm_rd32(device, 0x022434) * gpcmax;
1123 1100 int i, j, sm = 0;
1124 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1101 u32 data;
1125 memset(data, 0x1f, sizeof(data)); 1102
1126 1103 for (i = 0; i < DIV_ROUND_UP(tpcmax, 4); i++) {
1127 gpc = -1; 1104 for (data = 0, j = 0; j < 4; j++) {
1128 for (tpc = 0; tpc < gr->tpc_total; tpc++) { 1105 if (sm < gr->sm_nr)
1129 do { 1106 data |= gr->sm[sm++].gpc << (j * 8);
1130 gpc = (gpc + 1) % gr->gpc_nr; 1107 else
1131 } while (!tpcnr[gpc]); 1108 data |= 0x1f << (j * 8);
1132 tpcnr[gpc]--; 1109 }
1133 data[tpc] = gpc; 1110 nvkm_wr32(device, 0x4060a8 + (i * 4), data);
1134 } 1111 }
1135
1136 for (i = 0; i < 4; i++)
1137 nvkm_wr32(device, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
1138} 1112}
1139 1113
1140void 1114void
1141gf100_grctx_generate_r418bb8(struct gf100_gr *gr) 1115gf100_grctx_generate_rop_mapping(struct gf100_gr *gr)
1142{ 1116{
1143 struct nvkm_device *device = gr->base.engine.subdev.device; 1117 struct nvkm_device *device = gr->base.engine.subdev.device;
1144 u32 data[6] = {}, data2[2] = {}; 1118 u32 data[6] = {}, data2[2] = {};
1145 u8 tpcnr[GPC_MAX];
1146 u8 shift, ntpcv; 1119 u8 shift, ntpcv;
1147 int gpc, tpc, i; 1120 int i;
1148
1149 /* calculate first set of magics */
1150 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1151
1152 gpc = -1;
1153 for (tpc = 0; tpc < gr->tpc_total; tpc++) {
1154 do {
1155 gpc = (gpc + 1) % gr->gpc_nr;
1156 } while (!tpcnr[gpc]);
1157 tpcnr[gpc]--;
1158
1159 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
1160 }
1161 1121
1162 for (; tpc < 32; tpc++) 1122 /* Pack tile map into register format. */
1163 data[tpc / 6] |= 7 << ((tpc % 6) * 5); 1123 for (i = 0; i < 32; i++)
1124 data[i / 6] |= (gr->tile[i] & 0x07) << ((i % 6) * 5);
1164 1125
1165 /* and the second... */ 1126 /* Magic. */
1166 shift = 0; 1127 shift = 0;
1167 ntpcv = gr->tpc_total; 1128 ntpcv = gr->tpc_total;
1168 while (!(ntpcv & (1 << 4))) { 1129 while (!(ntpcv & (1 << 4))) {
@@ -1197,40 +1158,214 @@ gf100_grctx_generate_r418bb8(struct gf100_gr *gr)
1197} 1158}
1198 1159
1199void 1160void
1200gf100_grctx_generate_r406800(struct gf100_gr *gr) 1161gf100_grctx_generate_max_ways_evict(struct gf100_gr *gr)
1201{ 1162{
1202 struct nvkm_device *device = gr->base.engine.subdev.device; 1163 struct nvkm_device *device = gr->base.engine.subdev.device;
1203 u64 tpc_mask = 0, tpc_set = 0; 1164 u32 fbps = nvkm_rd32(device, 0x121c74);
1204 u8 tpcnr[GPC_MAX]; 1165 if (fbps == 1)
1205 int gpc, tpc; 1166 nvkm_mask(device, 0x17e91c, 0x001f0000, 0x00090000);
1206 int i, a, b; 1167}
1207 1168
1208 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1169static const u32
1209 for (gpc = 0; gpc < gr->gpc_nr; gpc++) 1170gf100_grctx_alpha_beta_map[17][32] = {
1210 tpc_mask |= ((1ULL << gr->tpc_nr[gpc]) - 1) << (gpc * 8); 1171 [1] = {
1211 1172 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1212 for (i = 0, gpc = -1, b = -1; i < 32; i++) { 1173 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1213 a = (i * (gr->tpc_total - 1)) / 32; 1174 },
1214 if (a != b) { 1175 [2] = {
1215 b = a; 1176 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1216 do { 1177 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1217 gpc = (gpc + 1) % gr->gpc_nr; 1178 },
1218 } while (!tpcnr[gpc]); 1179 //XXX: 3
1219 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; 1180 [4] = {
1220 1181 1, 1, 1, 1, 1, 1, 1, 1,
1221 tpc_set |= 1ULL << ((gpc * 8) + tpc); 1182 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1183 3, 3, 3, 3, 3, 3, 3, 3,
1184 },
1185 //XXX: 5
1186 //XXX: 6
1187 [7] = {
1188 1, 1, 1, 1,
1189 2, 2, 2, 2, 2, 2,
1190 3, 3, 3, 3, 3, 3,
1191 4, 4, 4, 4, 4, 4,
1192 5, 5, 5, 5, 5, 5,
1193 6, 6, 6, 6,
1194 },
1195 [8] = {
1196 1, 1, 1,
1197 2, 2, 2, 2, 2,
1198 3, 3, 3, 3, 3,
1199 4, 4, 4, 4, 4, 4,
1200 5, 5, 5, 5, 5,
1201 6, 6, 6, 6, 6,
1202 7, 7, 7,
1203 },
1204 //XXX: 9
1205 //XXX: 10
1206 [11] = {
1207 1, 1,
1208 2, 2, 2, 2,
1209 3, 3, 3,
1210 4, 4, 4, 4,
1211 5, 5, 5,
1212 6, 6, 6,
1213 7, 7, 7, 7,
1214 8, 8, 8,
1215 9, 9, 9, 9,
1216 10, 10,
1217 },
1218 //XXX: 12
1219 //XXX: 13
1220 [14] = {
1221 1, 1,
1222 2, 2,
1223 3, 3, 3,
1224 4, 4, 4,
1225 5, 5,
1226 6, 6, 6,
1227 7, 7,
1228 8, 8, 8,
1229 9, 9,
1230 10, 10, 10,
1231 11, 11, 11,
1232 12, 12,
1233 13, 13,
1234 },
1235 [15] = {
1236 1, 1,
1237 2, 2,
1238 3, 3,
1239 4, 4, 4,
1240 5, 5,
1241 6, 6, 6,
1242 7, 7,
1243 8, 8,
1244 9, 9, 9,
1245 10, 10,
1246 11, 11, 11,
1247 12, 12,
1248 13, 13,
1249 14, 14,
1250 },
1251 [16] = {
1252 1, 1,
1253 2, 2,
1254 3, 3,
1255 4, 4,
1256 5, 5,
1257 6, 6, 6,
1258 7, 7,
1259 8, 8,
1260 9, 9,
1261 10, 10, 10,
1262 11, 11,
1263 12, 12,
1264 13, 13,
1265 14, 14,
1266 15, 15,
1267 },
1268};
1269
1270void
1271gf100_grctx_generate_alpha_beta_tables(struct gf100_gr *gr)
1272{
1273 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1274 struct nvkm_device *device = subdev->device;
1275 int i, gpc;
1276
1277 for (i = 0; i < 32; i++) {
1278 u32 atarget = gf100_grctx_alpha_beta_map[gr->tpc_total][i];
1279 u32 abits[GPC_MAX] = {}, amask = 0, bmask = 0;
1280
1281 if (!atarget) {
1282 nvkm_warn(subdev, "missing alpha/beta mapping table\n");
1283 atarget = max_t(u32, gr->tpc_total * i / 32, 1);
1284 }
1285
1286 while (atarget) {
1287 for (gpc = 0; atarget && gpc < gr->gpc_nr; gpc++) {
1288 if (abits[gpc] < gr->tpc_nr[gpc]) {
1289 abits[gpc]++;
1290 atarget--;
1291 }
1292 }
1222 } 1293 }
1223 1294
1224 nvkm_wr32(device, 0x406800 + (i * 0x20), lower_32_bits(tpc_set)); 1295 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1225 nvkm_wr32(device, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask)); 1296 u32 bbits = gr->tpc_nr[gpc] - abits[gpc];
1226 if (gr->gpc_nr > 4) { 1297 amask |= ((1 << abits[gpc]) - 1) << (gpc * 8);
1227 nvkm_wr32(device, 0x406804 + (i * 0x20), upper_32_bits(tpc_set)); 1298 bmask |= ((1 << bbits) - 1) << abits[gpc] << (gpc * 8);
1228 nvkm_wr32(device, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
1229 } 1299 }
1300
1301 nvkm_wr32(device, 0x406800 + (i * 0x20), amask);
1302 nvkm_wr32(device, 0x406c00 + (i * 0x20), bmask);
1230 } 1303 }
1231} 1304}
1232 1305
1233void 1306void
1307gf100_grctx_generate_tpc_nr(struct gf100_gr *gr, int gpc)
1308{
1309 struct nvkm_device *device = gr->base.engine.subdev.device;
1310 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
1311 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
1312}
1313
1314void
1315gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
1316{
1317 struct nvkm_device *device = gr->base.engine.subdev.device;
1318 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), sm);
1319 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), sm);
1320 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm);
1321 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
1322}
1323
1324void
1325gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
1326{
1327 struct nvkm_device *device = gr->base.engine.subdev.device;
1328 const struct gf100_grctx_func *func = gr->func->grctx;
1329 int gpc, sm, i, j;
1330 u32 data;
1331
1332 for (sm = 0; sm < gr->sm_nr; sm++) {
1333 func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm);
1334 if (func->tpc_nr)
1335 func->tpc_nr(gr, gr->sm[sm].gpc);
1336 }
1337
1338 for (gpc = 0, i = 0; i < 4; i++) {
1339 for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
1340 data |= gr->tpc_nr[gpc] << (j * 4);
1341 nvkm_wr32(device, 0x406028 + (i * 4), data);
1342 nvkm_wr32(device, 0x405870 + (i * 4), data);
1343 }
1344
1345 if (func->r4060a8)
1346 func->r4060a8(gr);
1347
1348 func->rop_mapping(gr);
1349
1350 if (func->alpha_beta_tables)
1351 func->alpha_beta_tables(gr);
1352 if (func->max_ways_evict)
1353 func->max_ways_evict(gr);
1354 if (func->dist_skip_table)
1355 func->dist_skip_table(gr);
1356 if (func->r406500)
1357 func->r406500(gr);
1358 if (func->gpc_tpc_nr)
1359 func->gpc_tpc_nr(gr);
1360 if (func->r419f78)
1361 func->r419f78(gr);
1362 if (func->tpc_mask)
1363 func->tpc_mask(gr);
1364 if (func->smid_config)
1365 func->smid_config(gr);
1366}
1367
1368void
1234gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) 1369gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
1235{ 1370{
1236 struct nvkm_device *device = gr->base.engine.subdev.device; 1371 struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -1239,29 +1374,63 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
1239 1374
1240 nvkm_mc_unk260(device, 0); 1375 nvkm_mc_unk260(device, 0);
1241 1376
1242 gf100_gr_mmio(gr, grctx->hub); 1377 if (!gr->fuc_sw_ctx) {
1243 gf100_gr_mmio(gr, grctx->gpc); 1378 gf100_gr_mmio(gr, grctx->hub);
1244 gf100_gr_mmio(gr, grctx->zcull); 1379 gf100_gr_mmio(gr, grctx->gpc_0);
1245 gf100_gr_mmio(gr, grctx->tpc); 1380 gf100_gr_mmio(gr, grctx->zcull);
1246 gf100_gr_mmio(gr, grctx->ppc); 1381 gf100_gr_mmio(gr, grctx->gpc_1);
1382 gf100_gr_mmio(gr, grctx->tpc);
1383 gf100_gr_mmio(gr, grctx->ppc);
1384 } else {
1385 gf100_gr_mmio(gr, gr->fuc_sw_ctx);
1386 }
1387
1388 gf100_gr_wait_idle(gr);
1247 1389
1248 idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000); 1390 idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
1249 1391
1250 grctx->bundle(info);
1251 grctx->pagepool(info); 1392 grctx->pagepool(info);
1393 grctx->bundle(info);
1252 grctx->attrib(info); 1394 grctx->attrib(info);
1395 if (grctx->patch_ltc)
1396 grctx->patch_ltc(info);
1253 grctx->unkn(gr); 1397 grctx->unkn(gr);
1254 1398
1255 gf100_grctx_generate_tpcid(gr); 1399 gf100_grctx_generate_floorsweep(gr);
1256 gf100_grctx_generate_r406028(gr); 1400
1257 gf100_grctx_generate_r4060a8(gr); 1401 gf100_gr_wait_idle(gr);
1258 gf100_grctx_generate_r418bb8(gr); 1402
1259 gf100_grctx_generate_r406800(gr); 1403 if (grctx->r400088) grctx->r400088(gr, false);
1404 if (gr->fuc_bundle)
1405 gf100_gr_icmd(gr, gr->fuc_bundle);
1406 else
1407 gf100_gr_icmd(gr, grctx->icmd);
1408 if (grctx->sw_veid_bundle_init)
1409 gf100_gr_icmd(gr, grctx->sw_veid_bundle_init);
1410 if (grctx->r400088) grctx->r400088(gr, true);
1260 1411
1261 gf100_gr_icmd(gr, grctx->icmd);
1262 nvkm_wr32(device, 0x404154, idle_timeout); 1412 nvkm_wr32(device, 0x404154, idle_timeout);
1263 gf100_gr_mthd(gr, grctx->mthd); 1413
1414 if (gr->fuc_method)
1415 gf100_gr_mthd(gr, gr->fuc_method);
1416 else
1417 gf100_gr_mthd(gr, grctx->mthd);
1264 nvkm_mc_unk260(device, 1); 1418 nvkm_mc_unk260(device, 1);
1419
1420 if (grctx->r419cb8)
1421 grctx->r419cb8(gr);
1422 if (grctx->r418800)
1423 grctx->r418800(gr);
1424 if (grctx->r419eb0)
1425 grctx->r419eb0(gr);
1426 if (grctx->r419e00)
1427 grctx->r419e00(gr);
1428 if (grctx->r418e94)
1429 grctx->r418e94(gr);
1430 if (grctx->r419a3c)
1431 grctx->r419a3c(gr);
1432 if (grctx->r408840)
1433 grctx->r408840(gr);
1265} 1434}
1266 1435
1267#define CB_RESERVED 0x80000 1436#define CB_RESERVED 0x80000
@@ -1280,6 +1449,32 @@ gf100_grctx_generate(struct gf100_gr *gr)
1280 int ret, i; 1449 int ret, i;
1281 u64 addr; 1450 u64 addr;
1282 1451
1452 /* NV_PGRAPH_FE_PWR_MODE_FORCE_ON. */
1453 nvkm_wr32(device, 0x404170, 0x00000012);
1454 nvkm_msec(device, 2000,
1455 if (!(nvkm_rd32(device, 0x404170) & 0x00000010))
1456 break;
1457 );
1458
1459 if (grctx->unkn88c)
1460 grctx->unkn88c(gr, true);
1461
1462 /* Reset FECS. */
1463 nvkm_wr32(device, 0x409614, 0x00000070);
1464 nvkm_usec(device, 10, NVKM_DELAY);
1465 nvkm_mask(device, 0x409614, 0x00000700, 0x00000700);
1466 nvkm_usec(device, 10, NVKM_DELAY);
1467 nvkm_rd32(device, 0x409614);
1468
1469 if (grctx->unkn88c)
1470 grctx->unkn88c(gr, false);
1471
1472 /* NV_PGRAPH_FE_PWR_MODE_AUTO. */
1473 nvkm_wr32(device, 0x404170, 0x00000010);
1474
1475 /* Init SCC RAM. */
1476 nvkm_wr32(device, 0x40802c, 0x00000001);
1477
1283 /* Allocate memory to for a "channel", which we'll use to generate 1478 /* Allocate memory to for a "channel", which we'll use to generate
1284 * the default context values. 1479 * the default context values.
1285 */ 1480 */
@@ -1392,7 +1587,8 @@ gf100_grctx = {
1392 .main = gf100_grctx_generate_main, 1587 .main = gf100_grctx_generate_main,
1393 .unkn = gf100_grctx_generate_unkn, 1588 .unkn = gf100_grctx_generate_unkn,
1394 .hub = gf100_grctx_pack_hub, 1589 .hub = gf100_grctx_pack_hub,
1395 .gpc = gf100_grctx_pack_gpc, 1590 .gpc_0 = gf100_grctx_pack_gpc_0,
1591 .gpc_1 = gf100_grctx_pack_gpc_1,
1396 .zcull = gf100_grctx_pack_zcull, 1592 .zcull = gf100_grctx_pack_zcull,
1397 .tpc = gf100_grctx_pack_tpc, 1593 .tpc = gf100_grctx_pack_tpc,
1398 .icmd = gf100_grctx_pack_icmd, 1594 .icmd = gf100_grctx_pack_icmd,
@@ -1404,4 +1600,11 @@ gf100_grctx = {
1404 .attrib = gf100_grctx_generate_attrib, 1600 .attrib = gf100_grctx_generate_attrib,
1405 .attrib_nr_max = 0x324, 1601 .attrib_nr_max = 0x324,
1406 .attrib_nr = 0x218, 1602 .attrib_nr = 0x218,
1603 .sm_id = gf100_grctx_generate_sm_id,
1604 .tpc_nr = gf100_grctx_generate_tpc_nr,
1605 .r4060a8 = gf100_grctx_generate_r4060a8,
1606 .rop_mapping = gf100_grctx_generate_rop_mapping,
1607 .alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
1608 .max_ways_evict = gf100_grctx_generate_max_ways_evict,
1609 .r419cb8 = gf100_grctx_generate_r419cb8,
1407}; 1610};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 5199e5aa0cb7..33e932bd73b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -21,19 +21,22 @@ void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int)
21#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1) 21#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
22 22
23struct gf100_grctx_func { 23struct gf100_grctx_func {
24 void (*unkn88c)(struct gf100_gr *, bool on);
24 /* main context generation function */ 25 /* main context generation function */
25 void (*main)(struct gf100_gr *, struct gf100_grctx *); 26 void (*main)(struct gf100_gr *, struct gf100_grctx *);
26 /* context-specific modify-on-first-load list generation function */ 27 /* context-specific modify-on-first-load list generation function */
27 void (*unkn)(struct gf100_gr *); 28 void (*unkn)(struct gf100_gr *);
28 /* mmio context data */ 29 /* mmio context data */
29 const struct gf100_gr_pack *hub; 30 const struct gf100_gr_pack *hub;
30 const struct gf100_gr_pack *gpc; 31 const struct gf100_gr_pack *gpc_0;
32 const struct gf100_gr_pack *gpc_1;
31 const struct gf100_gr_pack *zcull; 33 const struct gf100_gr_pack *zcull;
32 const struct gf100_gr_pack *tpc; 34 const struct gf100_gr_pack *tpc;
33 const struct gf100_gr_pack *ppc; 35 const struct gf100_gr_pack *ppc;
34 /* indirect context data, generated with icmds/mthds */ 36 /* indirect context data, generated with icmds/mthds */
35 const struct gf100_gr_pack *icmd; 37 const struct gf100_gr_pack *icmd;
36 const struct gf100_gr_pack *mthd; 38 const struct gf100_gr_pack *mthd;
39 const struct gf100_gr_pack *sw_veid_bundle_init;
37 /* bundle circular buffer */ 40 /* bundle circular buffer */
38 void (*bundle)(struct gf100_grctx *); 41 void (*bundle)(struct gf100_grctx *);
39 u32 bundle_size; 42 u32 bundle_size;
@@ -48,6 +51,31 @@ struct gf100_grctx_func {
48 u32 attrib_nr; 51 u32 attrib_nr;
49 u32 alpha_nr_max; 52 u32 alpha_nr_max;
50 u32 alpha_nr; 53 u32 alpha_nr;
54 u32 gfxp_nr;
55 /* other patch buffer stuff */
56 void (*patch_ltc)(struct gf100_grctx *);
57 /* floorsweeping */
58 void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm);
59 void (*tpc_nr)(struct gf100_gr *, int gpc);
60 void (*r4060a8)(struct gf100_gr *);
61 void (*rop_mapping)(struct gf100_gr *);
62 void (*alpha_beta_tables)(struct gf100_gr *);
63 void (*max_ways_evict)(struct gf100_gr *);
64 void (*dist_skip_table)(struct gf100_gr *);
65 void (*r406500)(struct gf100_gr *);
66 void (*gpc_tpc_nr)(struct gf100_gr *);
67 void (*r419f78)(struct gf100_gr *);
68 void (*tpc_mask)(struct gf100_gr *);
69 void (*smid_config)(struct gf100_gr *);
70 /* misc other things */
71 void (*r400088)(struct gf100_gr *, bool);
72 void (*r419cb8)(struct gf100_gr *);
73 void (*r418800)(struct gf100_gr *);
74 void (*r419eb0)(struct gf100_gr *);
75 void (*r419e00)(struct gf100_gr *);
76 void (*r418e94)(struct gf100_gr *);
77 void (*r419a3c)(struct gf100_gr *);
78 void (*r408840)(struct gf100_gr *);
51}; 79};
52 80
53extern const struct gf100_grctx_func gf100_grctx; 81extern const struct gf100_grctx_func gf100_grctx;
@@ -57,11 +85,14 @@ void gf100_grctx_generate_bundle(struct gf100_grctx *);
57void gf100_grctx_generate_pagepool(struct gf100_grctx *); 85void gf100_grctx_generate_pagepool(struct gf100_grctx *);
58void gf100_grctx_generate_attrib(struct gf100_grctx *); 86void gf100_grctx_generate_attrib(struct gf100_grctx *);
59void gf100_grctx_generate_unkn(struct gf100_gr *); 87void gf100_grctx_generate_unkn(struct gf100_gr *);
60void gf100_grctx_generate_tpcid(struct gf100_gr *); 88void gf100_grctx_generate_floorsweep(struct gf100_gr *);
61void gf100_grctx_generate_r406028(struct gf100_gr *); 89void gf100_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
90void gf100_grctx_generate_tpc_nr(struct gf100_gr *, int);
62void gf100_grctx_generate_r4060a8(struct gf100_gr *); 91void gf100_grctx_generate_r4060a8(struct gf100_gr *);
63void gf100_grctx_generate_r418bb8(struct gf100_gr *); 92void gf100_grctx_generate_rop_mapping(struct gf100_gr *);
64void gf100_grctx_generate_r406800(struct gf100_gr *); 93void gf100_grctx_generate_alpha_beta_tables(struct gf100_gr *);
94void gf100_grctx_generate_max_ways_evict(struct gf100_gr *);
95void gf100_grctx_generate_r419cb8(struct gf100_gr *);
65 96
66extern const struct gf100_grctx_func gf108_grctx; 97extern const struct gf100_grctx_func gf108_grctx;
67void gf108_grctx_generate_attrib(struct gf100_grctx *); 98void gf108_grctx_generate_attrib(struct gf100_grctx *);
@@ -72,22 +103,25 @@ extern const struct gf100_grctx_func gf110_grctx;
72 103
73extern const struct gf100_grctx_func gf117_grctx; 104extern const struct gf100_grctx_func gf117_grctx;
74void gf117_grctx_generate_attrib(struct gf100_grctx *); 105void gf117_grctx_generate_attrib(struct gf100_grctx *);
106void gf117_grctx_generate_rop_mapping(struct gf100_gr *);
107void gf117_grctx_generate_dist_skip_table(struct gf100_gr *);
75 108
76extern const struct gf100_grctx_func gf119_grctx; 109extern const struct gf100_grctx_func gf119_grctx;
77 110
78extern const struct gf100_grctx_func gk104_grctx; 111extern const struct gf100_grctx_func gk104_grctx;
112void gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *);
113void gk104_grctx_generate_gpc_tpc_nr(struct gf100_gr *);
114
79extern const struct gf100_grctx_func gk20a_grctx; 115extern const struct gf100_grctx_func gk20a_grctx;
80void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
81void gk104_grctx_generate_bundle(struct gf100_grctx *); 116void gk104_grctx_generate_bundle(struct gf100_grctx *);
82void gk104_grctx_generate_pagepool(struct gf100_grctx *); 117void gk104_grctx_generate_pagepool(struct gf100_grctx *);
118void gk104_grctx_generate_patch_ltc(struct gf100_grctx *);
83void gk104_grctx_generate_unkn(struct gf100_gr *); 119void gk104_grctx_generate_unkn(struct gf100_gr *);
84void gk104_grctx_generate_r418bb8(struct gf100_gr *); 120void gk104_grctx_generate_r418800(struct gf100_gr *);
85
86void gm107_grctx_generate_bundle(struct gf100_grctx *);
87void gm107_grctx_generate_pagepool(struct gf100_grctx *);
88void gm107_grctx_generate_attrib(struct gf100_grctx *);
89 121
90extern const struct gf100_grctx_func gk110_grctx; 122extern const struct gf100_grctx_func gk110_grctx;
123void gk110_grctx_generate_r419eb0(struct gf100_gr *);
124
91extern const struct gf100_grctx_func gk110b_grctx; 125extern const struct gf100_grctx_func gk110b_grctx;
92extern const struct gf100_grctx_func gk208_grctx; 126extern const struct gf100_grctx_func gk208_grctx;
93 127
@@ -95,22 +129,30 @@ extern const struct gf100_grctx_func gm107_grctx;
95void gm107_grctx_generate_bundle(struct gf100_grctx *); 129void gm107_grctx_generate_bundle(struct gf100_grctx *);
96void gm107_grctx_generate_pagepool(struct gf100_grctx *); 130void gm107_grctx_generate_pagepool(struct gf100_grctx *);
97void gm107_grctx_generate_attrib(struct gf100_grctx *); 131void gm107_grctx_generate_attrib(struct gf100_grctx *);
132void gm107_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
98 133
99extern const struct gf100_grctx_func gm200_grctx; 134extern const struct gf100_grctx_func gm200_grctx;
100void gm200_grctx_generate_tpcid(struct gf100_gr *); 135void gm200_grctx_generate_dist_skip_table(struct gf100_gr *);
101void gm200_grctx_generate_405b60(struct gf100_gr *); 136void gm200_grctx_generate_r406500(struct gf100_gr *);
137void gm200_grctx_generate_tpc_mask(struct gf100_gr *);
138void gm200_grctx_generate_smid_config(struct gf100_gr *);
139void gm200_grctx_generate_r419a3c(struct gf100_gr *);
102 140
103extern const struct gf100_grctx_func gm20b_grctx; 141extern const struct gf100_grctx_func gm20b_grctx;
104 142
105extern const struct gf100_grctx_func gp100_grctx; 143extern const struct gf100_grctx_func gp100_grctx;
106void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
107void gp100_grctx_generate_pagepool(struct gf100_grctx *); 144void gp100_grctx_generate_pagepool(struct gf100_grctx *);
145void gp100_grctx_generate_smid_config(struct gf100_gr *);
108 146
109extern const struct gf100_grctx_func gp102_grctx; 147extern const struct gf100_grctx_func gp102_grctx;
110void gp102_grctx_generate_attrib(struct gf100_grctx *); 148void gp102_grctx_generate_attrib(struct gf100_grctx *);
111 149
150extern const struct gf100_grctx_func gp104_grctx;
151
112extern const struct gf100_grctx_func gp107_grctx; 152extern const struct gf100_grctx_func gp107_grctx;
113 153
154extern const struct gf100_grctx_func gv100_grctx;
155
114/* context init value lists */ 156/* context init value lists */
115 157
116extern const struct gf100_gr_pack gf100_grctx_pack_icmd[]; 158extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
@@ -128,7 +170,8 @@ extern const struct gf100_gr_init gf100_grctx_init_memfmt_0[];
128extern const struct gf100_gr_init gf100_grctx_init_rstr2d_0[]; 170extern const struct gf100_gr_init gf100_grctx_init_rstr2d_0[];
129extern const struct gf100_gr_init gf100_grctx_init_scc_0[]; 171extern const struct gf100_gr_init gf100_grctx_init_scc_0[];
130 172
131extern const struct gf100_gr_pack gf100_grctx_pack_gpc[]; 173extern const struct gf100_gr_pack gf100_grctx_pack_gpc_0[];
174extern const struct gf100_gr_pack gf100_grctx_pack_gpc_1[];
132extern const struct gf100_gr_init gf100_grctx_init_gpc_unk_0[]; 175extern const struct gf100_gr_init gf100_grctx_init_gpc_unk_0[];
133extern const struct gf100_gr_init gf100_grctx_init_prop_0[]; 176extern const struct gf100_gr_init gf100_grctx_init_prop_0[];
134extern const struct gf100_gr_init gf100_grctx_init_gpc_unk_1[]; 177extern const struct gf100_gr_init gf100_grctx_init_gpc_unk_1[];
@@ -177,6 +220,8 @@ extern const struct gf100_gr_init gf117_grctx_init_pe_0[];
177 220
178extern const struct gf100_gr_init gf117_grctx_init_wwdx_0[]; 221extern const struct gf100_gr_init gf117_grctx_init_wwdx_0[];
179 222
223extern const struct gf100_gr_pack gf117_grctx_pack_gpc_1[];
224
180extern const struct gf100_gr_init gk104_grctx_init_memfmt_0[]; 225extern const struct gf100_gr_init gk104_grctx_init_memfmt_0[];
181extern const struct gf100_gr_init gk104_grctx_init_ds_0[]; 226extern const struct gf100_gr_init gk104_grctx_init_ds_0[];
182extern const struct gf100_gr_init gk104_grctx_init_scc_0[]; 227extern const struct gf100_gr_init gk104_grctx_init_scc_0[];
@@ -186,7 +231,6 @@ extern const struct gf100_gr_init gk104_grctx_init_gpm_0[];
186extern const struct gf100_gr_init gk104_grctx_init_pes_0[]; 231extern const struct gf100_gr_init gk104_grctx_init_pes_0[];
187 232
188extern const struct gf100_gr_pack gk104_grctx_pack_hub[]; 233extern const struct gf100_gr_pack gk104_grctx_pack_hub[];
189extern const struct gf100_gr_pack gk104_grctx_pack_gpc[];
190extern const struct gf100_gr_pack gk104_grctx_pack_tpc[]; 234extern const struct gf100_gr_pack gk104_grctx_pack_tpc[];
191extern const struct gf100_gr_pack gk104_grctx_pack_ppc[]; 235extern const struct gf100_gr_pack gk104_grctx_pack_ppc[];
192extern const struct gf100_gr_pack gk104_grctx_pack_icmd[]; 236extern const struct gf100_gr_pack gk104_grctx_pack_icmd[];
@@ -200,7 +244,8 @@ extern const struct gf100_gr_pack gk110_grctx_pack_hub[];
200extern const struct gf100_gr_init gk110_grctx_init_pri_0[]; 244extern const struct gf100_gr_init gk110_grctx_init_pri_0[];
201extern const struct gf100_gr_init gk110_grctx_init_cwd_0[]; 245extern const struct gf100_gr_init gk110_grctx_init_cwd_0[];
202 246
203extern const struct gf100_gr_pack gk110_grctx_pack_gpc[]; 247extern const struct gf100_gr_pack gk110_grctx_pack_gpc_0[];
248extern const struct gf100_gr_pack gk110_grctx_pack_gpc_1[];
204extern const struct gf100_gr_init gk110_grctx_init_gpc_unk_2[]; 249extern const struct gf100_gr_init gk110_grctx_init_gpc_unk_2[];
205 250
206extern const struct gf100_gr_init gk110_grctx_init_tex_0[]; 251extern const struct gf100_gr_init gk110_grctx_init_tex_0[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index 54fd74e9cca0..7a0564b6e3c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -84,7 +84,8 @@ gf104_grctx = {
84 .main = gf100_grctx_generate_main, 84 .main = gf100_grctx_generate_main,
85 .unkn = gf100_grctx_generate_unkn, 85 .unkn = gf100_grctx_generate_unkn,
86 .hub = gf100_grctx_pack_hub, 86 .hub = gf100_grctx_pack_hub,
87 .gpc = gf100_grctx_pack_gpc, 87 .gpc_0 = gf100_grctx_pack_gpc_0,
88 .gpc_1 = gf100_grctx_pack_gpc_1,
88 .zcull = gf100_grctx_pack_zcull, 89 .zcull = gf100_grctx_pack_zcull,
89 .tpc = gf104_grctx_pack_tpc, 90 .tpc = gf104_grctx_pack_tpc,
90 .icmd = gf100_grctx_pack_icmd, 91 .icmd = gf100_grctx_pack_icmd,
@@ -96,4 +97,11 @@ gf104_grctx = {
96 .attrib = gf100_grctx_generate_attrib, 97 .attrib = gf100_grctx_generate_attrib,
97 .attrib_nr_max = 0x324, 98 .attrib_nr_max = 0x324,
98 .attrib_nr = 0x218, 99 .attrib_nr = 0x218,
100 .sm_id = gf100_grctx_generate_sm_id,
101 .tpc_nr = gf100_grctx_generate_tpc_nr,
102 .r4060a8 = gf100_grctx_generate_r4060a8,
103 .rop_mapping = gf100_grctx_generate_rop_mapping,
104 .alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
105 .max_ways_evict = gf100_grctx_generate_max_ways_evict,
106 .r419cb8 = gf100_grctx_generate_r419cb8,
99}; 107};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 82f71b10c06e..dda2c32e6232 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -667,12 +667,17 @@ gf108_grctx_init_gpm_0[] = {
667}; 667};
668 668
669static const struct gf100_gr_pack 669static const struct gf100_gr_pack
670gf108_grctx_pack_gpc[] = { 670gf108_grctx_pack_gpc_0[] = {
671 { gf100_grctx_init_gpc_unk_0 }, 671 { gf100_grctx_init_gpc_unk_0 },
672 { gf100_grctx_init_prop_0 }, 672 { gf100_grctx_init_prop_0 },
673 { gf100_grctx_init_gpc_unk_1 }, 673 { gf100_grctx_init_gpc_unk_1 },
674 { gf108_grctx_init_setup_0 }, 674 { gf108_grctx_init_setup_0 },
675 { gf100_grctx_init_zcull_0 }, 675 { gf100_grctx_init_zcull_0 },
676 {}
677};
678
679static const struct gf100_gr_pack
680gf108_grctx_pack_gpc_1[] = {
676 { gf100_grctx_init_crstr_0 }, 681 { gf100_grctx_init_crstr_0 },
677 { gf108_grctx_init_gpm_0 }, 682 { gf108_grctx_init_gpm_0 },
678 { gf100_grctx_init_gcc_0 }, 683 { gf100_grctx_init_gcc_0 },
@@ -780,7 +785,8 @@ gf108_grctx = {
780 .main = gf100_grctx_generate_main, 785 .main = gf100_grctx_generate_main,
781 .unkn = gf108_grctx_generate_unkn, 786 .unkn = gf108_grctx_generate_unkn,
782 .hub = gf108_grctx_pack_hub, 787 .hub = gf108_grctx_pack_hub,
783 .gpc = gf108_grctx_pack_gpc, 788 .gpc_0 = gf108_grctx_pack_gpc_0,
789 .gpc_1 = gf108_grctx_pack_gpc_1,
784 .zcull = gf100_grctx_pack_zcull, 790 .zcull = gf100_grctx_pack_zcull,
785 .tpc = gf108_grctx_pack_tpc, 791 .tpc = gf108_grctx_pack_tpc,
786 .icmd = gf108_grctx_pack_icmd, 792 .icmd = gf108_grctx_pack_icmd,
@@ -794,4 +800,11 @@ gf108_grctx = {
794 .attrib_nr = 0x218, 800 .attrib_nr = 0x218,
795 .alpha_nr_max = 0x324, 801 .alpha_nr_max = 0x324,
796 .alpha_nr = 0x218, 802 .alpha_nr = 0x218,
803 .sm_id = gf100_grctx_generate_sm_id,
804 .tpc_nr = gf100_grctx_generate_tpc_nr,
805 .r4060a8 = gf100_grctx_generate_r4060a8,
806 .rop_mapping = gf100_grctx_generate_rop_mapping,
807 .alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
808 .max_ways_evict = gf100_grctx_generate_max_ways_evict,
809 .r419cb8 = gf100_grctx_generate_r419cb8,
797}; 810};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index 7df398b53f8f..f5cca5e6a4f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -314,15 +314,12 @@ gf110_grctx_init_setup_0[] = {
314}; 314};
315 315
316static const struct gf100_gr_pack 316static const struct gf100_gr_pack
317gf110_grctx_pack_gpc[] = { 317gf110_grctx_pack_gpc_0[] = {
318 { gf100_grctx_init_gpc_unk_0 }, 318 { gf100_grctx_init_gpc_unk_0 },
319 { gf100_grctx_init_prop_0 }, 319 { gf100_grctx_init_prop_0 },
320 { gf100_grctx_init_gpc_unk_1 }, 320 { gf100_grctx_init_gpc_unk_1 },
321 { gf110_grctx_init_setup_0 }, 321 { gf110_grctx_init_setup_0 },
322 { gf100_grctx_init_zcull_0 }, 322 { gf100_grctx_init_zcull_0 },
323 { gf100_grctx_init_crstr_0 },
324 { gf100_grctx_init_gpm_0 },
325 { gf100_grctx_init_gcc_0 },
326 {} 323 {}
327}; 324};
328 325
@@ -335,7 +332,8 @@ gf110_grctx = {
335 .main = gf100_grctx_generate_main, 332 .main = gf100_grctx_generate_main,
336 .unkn = gf100_grctx_generate_unkn, 333 .unkn = gf100_grctx_generate_unkn,
337 .hub = gf100_grctx_pack_hub, 334 .hub = gf100_grctx_pack_hub,
338 .gpc = gf110_grctx_pack_gpc, 335 .gpc_0 = gf110_grctx_pack_gpc_0,
336 .gpc_1 = gf100_grctx_pack_gpc_1,
339 .zcull = gf100_grctx_pack_zcull, 337 .zcull = gf100_grctx_pack_zcull,
340 .tpc = gf100_grctx_pack_tpc, 338 .tpc = gf100_grctx_pack_tpc,
341 .icmd = gf110_grctx_pack_icmd, 339 .icmd = gf110_grctx_pack_icmd,
@@ -347,4 +345,11 @@ gf110_grctx = {
347 .attrib = gf100_grctx_generate_attrib, 345 .attrib = gf100_grctx_generate_attrib,
348 .attrib_nr_max = 0x324, 346 .attrib_nr_max = 0x324,
349 .attrib_nr = 0x218, 347 .attrib_nr = 0x218,
348 .sm_id = gf100_grctx_generate_sm_id,
349 .tpc_nr = gf100_grctx_generate_tpc_nr,
350 .r4060a8 = gf100_grctx_generate_r4060a8,
351 .rop_mapping = gf100_grctx_generate_rop_mapping,
352 .alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
353 .max_ways_evict = gf100_grctx_generate_max_ways_evict,
354 .r419cb8 = gf100_grctx_generate_r419cb8,
350}; 355};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 19301d88577d..276c282d19aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -84,12 +84,17 @@ gf117_grctx_init_setup_0[] = {
84}; 84};
85 85
86static const struct gf100_gr_pack 86static const struct gf100_gr_pack
87gf117_grctx_pack_gpc[] = { 87gf117_grctx_pack_gpc_0[] = {
88 { gf100_grctx_init_gpc_unk_0 }, 88 { gf100_grctx_init_gpc_unk_0 },
89 { gf119_grctx_init_prop_0 }, 89 { gf119_grctx_init_prop_0 },
90 { gf119_grctx_init_gpc_unk_1 }, 90 { gf119_grctx_init_gpc_unk_1 },
91 { gf117_grctx_init_setup_0 }, 91 { gf117_grctx_init_setup_0 },
92 { gf100_grctx_init_zcull_0 }, 92 { gf100_grctx_init_zcull_0 },
93 {}
94};
95
96const struct gf100_gr_pack
97gf117_grctx_pack_gpc_1[] = {
93 { gf119_grctx_init_crstr_0 }, 98 { gf119_grctx_init_crstr_0 },
94 { gf108_grctx_init_gpm_0 }, 99 { gf108_grctx_init_gpm_0 },
95 { gf100_grctx_init_gcc_0 }, 100 { gf100_grctx_init_gcc_0 },
@@ -180,6 +185,62 @@ gf117_grctx_pack_ppc[] = {
180 ******************************************************************************/ 185 ******************************************************************************/
181 186
182void 187void
188gf117_grctx_generate_dist_skip_table(struct gf100_gr *gr)
189{
190 struct nvkm_device *device = gr->base.engine.subdev.device;
191 int i;
192
193 for (i = 0; i < 8; i++)
194 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
195}
196
197void
198gf117_grctx_generate_rop_mapping(struct gf100_gr *gr)
199{
200 struct nvkm_device *device = gr->base.engine.subdev.device;
201 u32 data[6] = {}, data2[2] = {};
202 u8 shift, ntpcv;
203 int i;
204
205 /* Pack tile map into register format. */
206 for (i = 0; i < 32; i++)
207 data[i / 6] |= (gr->tile[i] & 0x07) << ((i % 6) * 5);
208
209 /* Magic. */
210 shift = 0;
211 ntpcv = gr->tpc_total;
212 while (!(ntpcv & (1 << 4))) {
213 ntpcv <<= 1;
214 shift++;
215 }
216
217 data2[0] = (ntpcv << 16);
218 data2[0] |= (shift << 21);
219 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
220 for (i = 1; i < 7; i++)
221 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
222
223 /* GPC_BROADCAST */
224 nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
225 gr->screen_tile_row_offset);
226 for (i = 0; i < 6; i++)
227 nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
228
229 /* GPC_BROADCAST.TP_BROADCAST */
230 nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
231 gr->screen_tile_row_offset | data2[0]);
232 nvkm_wr32(device, 0x41bfe4, data2[1]);
233 for (i = 0; i < 6; i++)
234 nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
235
236 /* UNK78xx */
237 nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
238 gr->screen_tile_row_offset);
239 for (i = 0; i < 6; i++)
240 nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
241}
242
243void
183gf117_grctx_generate_attrib(struct gf100_grctx *info) 244gf117_grctx_generate_attrib(struct gf100_grctx *info)
184{ 245{
185 struct gf100_gr *gr = info->gr; 246 struct gf100_gr *gr = info->gr;
@@ -217,50 +278,13 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
217 } 278 }
218} 279}
219 280
220static void
221gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
222{
223 struct nvkm_device *device = gr->base.engine.subdev.device;
224 const struct gf100_grctx_func *grctx = gr->func->grctx;
225 u32 idle_timeout;
226 int i;
227
228 nvkm_mc_unk260(device, 0);
229
230 gf100_gr_mmio(gr, grctx->hub);
231 gf100_gr_mmio(gr, grctx->gpc);
232 gf100_gr_mmio(gr, grctx->zcull);
233 gf100_gr_mmio(gr, grctx->tpc);
234 gf100_gr_mmio(gr, grctx->ppc);
235
236 idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
237
238 grctx->bundle(info);
239 grctx->pagepool(info);
240 grctx->attrib(info);
241 grctx->unkn(gr);
242
243 gf100_grctx_generate_tpcid(gr);
244 gf100_grctx_generate_r406028(gr);
245 gf100_grctx_generate_r4060a8(gr);
246 gk104_grctx_generate_r418bb8(gr);
247 gf100_grctx_generate_r406800(gr);
248
249 for (i = 0; i < 8; i++)
250 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
251
252 gf100_gr_icmd(gr, grctx->icmd);
253 nvkm_wr32(device, 0x404154, idle_timeout);
254 gf100_gr_mthd(gr, grctx->mthd);
255 nvkm_mc_unk260(device, 1);
256}
257
258const struct gf100_grctx_func 281const struct gf100_grctx_func
259gf117_grctx = { 282gf117_grctx = {
260 .main = gf117_grctx_generate_main, 283 .main = gf100_grctx_generate_main,
261 .unkn = gk104_grctx_generate_unkn, 284 .unkn = gk104_grctx_generate_unkn,
262 .hub = gf117_grctx_pack_hub, 285 .hub = gf117_grctx_pack_hub,
263 .gpc = gf117_grctx_pack_gpc, 286 .gpc_0 = gf117_grctx_pack_gpc_0,
287 .gpc_1 = gf117_grctx_pack_gpc_1,
264 .zcull = gf100_grctx_pack_zcull, 288 .zcull = gf100_grctx_pack_zcull,
265 .tpc = gf117_grctx_pack_tpc, 289 .tpc = gf117_grctx_pack_tpc,
266 .ppc = gf117_grctx_pack_ppc, 290 .ppc = gf117_grctx_pack_ppc,
@@ -275,4 +299,12 @@ gf117_grctx = {
275 .attrib_nr = 0x218, 299 .attrib_nr = 0x218,
276 .alpha_nr_max = 0x7ff, 300 .alpha_nr_max = 0x7ff,
277 .alpha_nr = 0x324, 301 .alpha_nr = 0x324,
302 .sm_id = gf100_grctx_generate_sm_id,
303 .tpc_nr = gf100_grctx_generate_tpc_nr,
304 .r4060a8 = gf100_grctx_generate_r4060a8,
305 .rop_mapping = gf117_grctx_generate_rop_mapping,
306 .alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
307 .max_ways_evict = gf100_grctx_generate_max_ways_evict,
308 .dist_skip_table = gf117_grctx_generate_dist_skip_table,
309 .r419cb8 = gf100_grctx_generate_r419cb8,
278}; 310};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 605185b078be..0cfe46366af6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -431,15 +431,12 @@ gf119_grctx_init_crstr_0[] = {
431}; 431};
432 432
433static const struct gf100_gr_pack 433static const struct gf100_gr_pack
434gf119_grctx_pack_gpc[] = { 434gf119_grctx_pack_gpc_0[] = {
435 { gf100_grctx_init_gpc_unk_0 }, 435 { gf100_grctx_init_gpc_unk_0 },
436 { gf119_grctx_init_prop_0 }, 436 { gf119_grctx_init_prop_0 },
437 { gf119_grctx_init_gpc_unk_1 }, 437 { gf119_grctx_init_gpc_unk_1 },
438 { gf119_grctx_init_setup_0 }, 438 { gf119_grctx_init_setup_0 },
439 { gf100_grctx_init_zcull_0 }, 439 { gf100_grctx_init_zcull_0 },
440 { gf119_grctx_init_crstr_0 },
441 { gf108_grctx_init_gpm_0 },
442 { gf100_grctx_init_gcc_0 },
443 {} 440 {}
444}; 441};
445 442
@@ -503,7 +500,8 @@ gf119_grctx = {
503 .main = gf100_grctx_generate_main, 500 .main = gf100_grctx_generate_main,
504 .unkn = gf108_grctx_generate_unkn, 501 .unkn = gf108_grctx_generate_unkn,
505 .hub = gf119_grctx_pack_hub, 502 .hub = gf119_grctx_pack_hub,
506 .gpc = gf119_grctx_pack_gpc, 503 .gpc_0 = gf119_grctx_pack_gpc_0,
504 .gpc_1 = gf117_grctx_pack_gpc_1,
507 .zcull = gf100_grctx_pack_zcull, 505 .zcull = gf100_grctx_pack_zcull,
508 .tpc = gf119_grctx_pack_tpc, 506 .tpc = gf119_grctx_pack_tpc,
509 .icmd = gf119_grctx_pack_icmd, 507 .icmd = gf119_grctx_pack_icmd,
@@ -517,4 +515,11 @@ gf119_grctx = {
517 .attrib_nr = 0x218, 515 .attrib_nr = 0x218,
518 .alpha_nr_max = 0x324, 516 .alpha_nr_max = 0x324,
519 .alpha_nr = 0x218, 517 .alpha_nr = 0x218,
518 .sm_id = gf100_grctx_generate_sm_id,
519 .tpc_nr = gf100_grctx_generate_tpc_nr,
520 .r4060a8 = gf100_grctx_generate_r4060a8,
521 .rop_mapping = gf100_grctx_generate_rop_mapping,
522 .alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
523 .max_ways_evict = gf100_grctx_generate_max_ways_evict,
524 .r419cb8 = gf100_grctx_generate_r419cb8,
520}; 525};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 825c8fd500bc..304e9d268bad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -739,13 +739,18 @@ gk104_grctx_init_gpm_0[] = {
739 {} 739 {}
740}; 740};
741 741
742const struct gf100_gr_pack 742static const struct gf100_gr_pack
743gk104_grctx_pack_gpc[] = { 743gk104_grctx_pack_gpc_0[] = {
744 { gf100_grctx_init_gpc_unk_0 }, 744 { gf100_grctx_init_gpc_unk_0 },
745 { gf119_grctx_init_prop_0 }, 745 { gf119_grctx_init_prop_0 },
746 { gf119_grctx_init_gpc_unk_1 }, 746 { gf119_grctx_init_gpc_unk_1 },
747 { gk104_grctx_init_setup_0 }, 747 { gk104_grctx_init_setup_0 },
748 { gf100_grctx_init_zcull_0 }, 748 { gf100_grctx_init_zcull_0 },
749 {}
750};
751
752static const struct gf100_gr_pack
753gk104_grctx_pack_gpc_1[] = {
749 { gf119_grctx_init_crstr_0 }, 754 { gf119_grctx_init_crstr_0 },
750 { gk104_grctx_init_gpm_0 }, 755 { gk104_grctx_init_gpm_0 },
751 { gf100_grctx_init_gcc_0 }, 756 { gf100_grctx_init_gcc_0 },
@@ -841,6 +846,32 @@ gk104_grctx_pack_ppc[] = {
841 ******************************************************************************/ 846 ******************************************************************************/
842 847
843void 848void
849gk104_grctx_generate_r418800(struct gf100_gr *gr)
850{
851 struct nvkm_device *device = gr->base.engine.subdev.device;
852 /*XXX: Not real sure where to apply these, there doesn't seem
853 * to be any pattern to which chipsets it's done on.
854 *
855 * Perhaps a VBIOS tweak?
856 */
857 if (0) {
858 nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
859 nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
860 }
861}
862
863void
864gk104_grctx_generate_patch_ltc(struct gf100_grctx *info)
865{
866 struct nvkm_device *device = info->gr->base.engine.subdev.device;
867 u32 data0 = nvkm_rd32(device, 0x17e91c);
868 u32 data1 = nvkm_rd32(device, 0x17e920);
869 /*XXX: Figure out how to modify this correctly! */
870 mmio_wr32(info, 0x17e91c, data0);
871 mmio_wr32(info, 0x17e920, data1);
872}
873
874void
844gk104_grctx_generate_bundle(struct gf100_grctx *info) 875gk104_grctx_generate_bundle(struct gf100_grctx *info)
845{ 876{
846 const struct gf100_grctx_func *grctx = info->gr->func->grctx; 877 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
@@ -881,114 +912,74 @@ gk104_grctx_generate_unkn(struct gf100_gr *gr)
881 nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008); 912 nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
882} 913}
883 914
884void 915static void
885gk104_grctx_generate_r418bb8(struct gf100_gr *gr) 916gk104_grctx_generate_r419f78(struct gf100_gr *gr)
886{ 917{
887 struct nvkm_device *device = gr->base.engine.subdev.device; 918 struct nvkm_device *device = gr->base.engine.subdev.device;
888 u32 data[6] = {}, data2[2] = {}; 919 nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
889 u8 tpcnr[GPC_MAX];
890 u8 shift, ntpcv;
891 int gpc, tpc, i;
892
893 /* calculate first set of magics */
894 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
895
896 gpc = -1;
897 for (tpc = 0; tpc < gr->tpc_total; tpc++) {
898 do {
899 gpc = (gpc + 1) % gr->gpc_nr;
900 } while (!tpcnr[gpc]);
901 tpcnr[gpc]--;
902
903 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
904 }
905
906 for (; tpc < 32; tpc++)
907 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
908
909 /* and the second... */
910 shift = 0;
911 ntpcv = gr->tpc_total;
912 while (!(ntpcv & (1 << 4))) {
913 ntpcv <<= 1;
914 shift++;
915 }
916
917 data2[0] = (ntpcv << 16);
918 data2[0] |= (shift << 21);
919 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
920 for (i = 1; i < 7; i++)
921 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
922
923 /* GPC_BROADCAST */
924 nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
925 gr->screen_tile_row_offset);
926 for (i = 0; i < 6; i++)
927 nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
928
929 /* GPC_BROADCAST.TP_BROADCAST */
930 nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
931 gr->screen_tile_row_offset | data2[0]);
932 nvkm_wr32(device, 0x41bfe4, data2[1]);
933 for (i = 0; i < 6; i++)
934 nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
935
936 /* UNK78xx */
937 nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
938 gr->screen_tile_row_offset);
939 for (i = 0; i < 6; i++)
940 nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
941} 920}
942 921
943void 922void
944gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) 923gk104_grctx_generate_gpc_tpc_nr(struct gf100_gr *gr)
945{ 924{
946 struct nvkm_device *device = gr->base.engine.subdev.device; 925 struct nvkm_device *device = gr->base.engine.subdev.device;
947 const struct gf100_grctx_func *grctx = gr->func->grctx;
948 u32 idle_timeout;
949 int i;
950
951 nvkm_mc_unk260(device, 0);
952
953 gf100_gr_mmio(gr, grctx->hub);
954 gf100_gr_mmio(gr, grctx->gpc);
955 gf100_gr_mmio(gr, grctx->zcull);
956 gf100_gr_mmio(gr, grctx->tpc);
957 gf100_gr_mmio(gr, grctx->ppc);
958
959 idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
960
961 grctx->bundle(info);
962 grctx->pagepool(info);
963 grctx->attrib(info);
964 grctx->unkn(gr);
965
966 gf100_grctx_generate_tpcid(gr);
967 gf100_grctx_generate_r406028(gr);
968 gk104_grctx_generate_r418bb8(gr);
969 gf100_grctx_generate_r406800(gr);
970
971 for (i = 0; i < 8; i++)
972 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
973
974 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr); 926 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
975 nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000); 927}
976
977 gf100_gr_icmd(gr, grctx->icmd);
978 nvkm_wr32(device, 0x404154, idle_timeout);
979 gf100_gr_mthd(gr, grctx->mthd);
980 nvkm_mc_unk260(device, 1);
981 928
982 nvkm_mask(device, 0x418800, 0x00200000, 0x00200000); 929void
983 nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000); 930gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *gr)
931{
932 struct nvkm_device *device = gr->base.engine.subdev.device;
933 int i, j, gpc, ppc;
934
935 for (i = 0; i < 32; i++) {
936 u32 atarget = max_t(u32, gr->tpc_total * i / 32, 1);
937 u32 btarget = gr->tpc_total - atarget;
938 bool alpha = atarget < btarget;
939 u64 amask = 0, bmask = 0;
940
941 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
942 for (ppc = 0; ppc < gr->func->ppc_nr; ppc++) {
943 u32 ppc_tpcs = gr->ppc_tpc_nr[gpc][ppc];
944 u32 abits, bbits, pmask;
945
946 if (alpha) {
947 abits = atarget ? ppc_tpcs : 0;
948 bbits = ppc_tpcs - abits;
949 } else {
950 bbits = btarget ? ppc_tpcs : 0;
951 abits = ppc_tpcs - bbits;
952 }
953
954 pmask = gr->ppc_tpc_mask[gpc][ppc];
955 while (ppc_tpcs-- > abits)
956 pmask &= pmask - 1;
957 amask |= (u64)pmask << (gpc * 8);
958
959 pmask ^= gr->ppc_tpc_mask[gpc][ppc];
960 bmask |= (u64)pmask << (gpc * 8);
961
962 atarget -= min(abits, atarget);
963 btarget -= min(bbits, btarget);
964 if ((abits > 0) || (bbits > 0))
965 alpha = !alpha;
966 }
967 }
968
969 for (j = 0; j < gr->gpc_nr; j += 4, amask >>= 32, bmask >>= 32) {
970 nvkm_wr32(device, 0x406800 + (i * 0x20) + j, amask);
971 nvkm_wr32(device, 0x406c00 + (i * 0x20) + j, bmask);
972 }
973 }
984} 974}
985 975
986const struct gf100_grctx_func 976const struct gf100_grctx_func
987gk104_grctx = { 977gk104_grctx = {
988 .main = gk104_grctx_generate_main, 978 .main = gf100_grctx_generate_main,
989 .unkn = gk104_grctx_generate_unkn, 979 .unkn = gk104_grctx_generate_unkn,
990 .hub = gk104_grctx_pack_hub, 980 .hub = gk104_grctx_pack_hub,
991 .gpc = gk104_grctx_pack_gpc, 981 .gpc_0 = gk104_grctx_pack_gpc_0,
982 .gpc_1 = gk104_grctx_pack_gpc_1,
992 .zcull = gf100_grctx_pack_zcull, 983 .zcull = gf100_grctx_pack_zcull,
993 .tpc = gk104_grctx_pack_tpc, 984 .tpc = gk104_grctx_pack_tpc,
994 .ppc = gk104_grctx_pack_ppc, 985 .ppc = gk104_grctx_pack_ppc,
@@ -1005,4 +996,13 @@ gk104_grctx = {
1005 .attrib_nr = 0x218, 996 .attrib_nr = 0x218,
1006 .alpha_nr_max = 0x7ff, 997 .alpha_nr_max = 0x7ff,
1007 .alpha_nr = 0x648, 998 .alpha_nr = 0x648,
999 .patch_ltc = gk104_grctx_generate_patch_ltc,
1000 .sm_id = gf100_grctx_generate_sm_id,
1001 .tpc_nr = gf100_grctx_generate_tpc_nr,
1002 .rop_mapping = gf117_grctx_generate_rop_mapping,
1003 .alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
1004 .dist_skip_table = gf117_grctx_generate_dist_skip_table,
1005 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
1006 .r419f78 = gk104_grctx_generate_r419f78,
1007 .r418800 = gk104_grctx_generate_r418800,
1008}; 1008};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 7b95ec2fe453..86547cfc38dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -704,12 +704,17 @@ gk110_grctx_init_gpc_unk_2[] = {
704}; 704};
705 705
706const struct gf100_gr_pack 706const struct gf100_gr_pack
707gk110_grctx_pack_gpc[] = { 707gk110_grctx_pack_gpc_0[] = {
708 { gf100_grctx_init_gpc_unk_0 }, 708 { gf100_grctx_init_gpc_unk_0 },
709 { gf119_grctx_init_prop_0 }, 709 { gf119_grctx_init_prop_0 },
710 { gf119_grctx_init_gpc_unk_1 }, 710 { gf119_grctx_init_gpc_unk_1 },
711 { gk110_grctx_init_setup_0 }, 711 { gk110_grctx_init_setup_0 },
712 { gf100_grctx_init_zcull_0 }, 712 { gf100_grctx_init_zcull_0 },
713 {}
714};
715
716const struct gf100_gr_pack
717gk110_grctx_pack_gpc_1[] = {
713 { gf119_grctx_init_crstr_0 }, 718 { gf119_grctx_init_crstr_0 },
714 { gk104_grctx_init_gpm_0 }, 719 { gk104_grctx_init_gpm_0 },
715 { gk110_grctx_init_gpc_unk_2 }, 720 { gk110_grctx_init_gpc_unk_2 },
@@ -808,12 +813,20 @@ gk110_grctx_pack_ppc[] = {
808 * PGRAPH context implementation 813 * PGRAPH context implementation
809 ******************************************************************************/ 814 ******************************************************************************/
810 815
816void
817gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
818{
819 struct nvkm_device *device = gr->base.engine.subdev.device;
820 nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
821}
822
811const struct gf100_grctx_func 823const struct gf100_grctx_func
812gk110_grctx = { 824gk110_grctx = {
813 .main = gk104_grctx_generate_main, 825 .main = gf100_grctx_generate_main,
814 .unkn = gk104_grctx_generate_unkn, 826 .unkn = gk104_grctx_generate_unkn,
815 .hub = gk110_grctx_pack_hub, 827 .hub = gk110_grctx_pack_hub,
816 .gpc = gk110_grctx_pack_gpc, 828 .gpc_0 = gk110_grctx_pack_gpc_0,
829 .gpc_1 = gk110_grctx_pack_gpc_1,
817 .zcull = gf100_grctx_pack_zcull, 830 .zcull = gf100_grctx_pack_zcull,
818 .tpc = gk110_grctx_pack_tpc, 831 .tpc = gk110_grctx_pack_tpc,
819 .ppc = gk110_grctx_pack_ppc, 832 .ppc = gk110_grctx_pack_ppc,
@@ -830,4 +843,13 @@ gk110_grctx = {
830 .attrib_nr = 0x218, 843 .attrib_nr = 0x218,
831 .alpha_nr_max = 0x7ff, 844 .alpha_nr_max = 0x7ff,
832 .alpha_nr = 0x648, 845 .alpha_nr = 0x648,
846 .patch_ltc = gk104_grctx_generate_patch_ltc,
847 .sm_id = gf100_grctx_generate_sm_id,
848 .tpc_nr = gf100_grctx_generate_tpc_nr,
849 .rop_mapping = gf117_grctx_generate_rop_mapping,
850 .alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
851 .dist_skip_table = gf117_grctx_generate_dist_skip_table,
852 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
853 .r418800 = gk104_grctx_generate_r418800,
854 .r419eb0 = gk110_grctx_generate_r419eb0,
833}; 855};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 048b1152da44..ebb947bd1446 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -71,10 +71,11 @@ gk110b_grctx_pack_tpc[] = {
71 71
72const struct gf100_grctx_func 72const struct gf100_grctx_func
73gk110b_grctx = { 73gk110b_grctx = {
74 .main = gk104_grctx_generate_main, 74 .main = gf100_grctx_generate_main,
75 .unkn = gk104_grctx_generate_unkn, 75 .unkn = gk104_grctx_generate_unkn,
76 .hub = gk110_grctx_pack_hub, 76 .hub = gk110_grctx_pack_hub,
77 .gpc = gk110_grctx_pack_gpc, 77 .gpc_0 = gk110_grctx_pack_gpc_0,
78 .gpc_1 = gk110_grctx_pack_gpc_1,
78 .zcull = gf100_grctx_pack_zcull, 79 .zcull = gf100_grctx_pack_zcull,
79 .tpc = gk110b_grctx_pack_tpc, 80 .tpc = gk110b_grctx_pack_tpc,
80 .ppc = gk110_grctx_pack_ppc, 81 .ppc = gk110_grctx_pack_ppc,
@@ -91,4 +92,13 @@ gk110b_grctx = {
91 .attrib_nr = 0x218, 92 .attrib_nr = 0x218,
92 .alpha_nr_max = 0x7ff, 93 .alpha_nr_max = 0x7ff,
93 .alpha_nr = 0x648, 94 .alpha_nr = 0x648,
95 .patch_ltc = gk104_grctx_generate_patch_ltc,
96 .sm_id = gf100_grctx_generate_sm_id,
97 .tpc_nr = gf100_grctx_generate_tpc_nr,
98 .rop_mapping = gf117_grctx_generate_rop_mapping,
99 .alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
100 .dist_skip_table = gf117_grctx_generate_dist_skip_table,
101 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
102 .r418800 = gk104_grctx_generate_r418800,
103 .r419eb0 = gk110_grctx_generate_r419eb0,
94}; 104};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 67b7a1b43617..4d40512b5c99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -443,12 +443,17 @@ gk208_grctx_init_gpm_0[] = {
443}; 443};
444 444
445static const struct gf100_gr_pack 445static const struct gf100_gr_pack
446gk208_grctx_pack_gpc[] = { 446gk208_grctx_pack_gpc_0[] = {
447 { gf100_grctx_init_gpc_unk_0 }, 447 { gf100_grctx_init_gpc_unk_0 },
448 { gk208_grctx_init_prop_0 }, 448 { gk208_grctx_init_prop_0 },
449 { gk208_grctx_init_gpc_unk_1 }, 449 { gk208_grctx_init_gpc_unk_1 },
450 { gk208_grctx_init_setup_0 }, 450 { gk208_grctx_init_setup_0 },
451 { gf100_grctx_init_zcull_0 }, 451 { gf100_grctx_init_zcull_0 },
452 {}
453};
454
455static const struct gf100_gr_pack
456gk208_grctx_pack_gpc_1[] = {
452 { gk208_grctx_init_crstr_0 }, 457 { gk208_grctx_init_crstr_0 },
453 { gk208_grctx_init_gpm_0 }, 458 { gk208_grctx_init_gpm_0 },
454 { gk110_grctx_init_gpc_unk_2 }, 459 { gk110_grctx_init_gpc_unk_2 },
@@ -532,10 +537,11 @@ gk208_grctx_pack_ppc[] = {
532 537
533const struct gf100_grctx_func 538const struct gf100_grctx_func
534gk208_grctx = { 539gk208_grctx = {
535 .main = gk104_grctx_generate_main, 540 .main = gf100_grctx_generate_main,
536 .unkn = gk104_grctx_generate_unkn, 541 .unkn = gk104_grctx_generate_unkn,
537 .hub = gk208_grctx_pack_hub, 542 .hub = gk208_grctx_pack_hub,
538 .gpc = gk208_grctx_pack_gpc, 543 .gpc_0 = gk208_grctx_pack_gpc_0,
544 .gpc_1 = gk208_grctx_pack_gpc_1,
539 .zcull = gf100_grctx_pack_zcull, 545 .zcull = gf100_grctx_pack_zcull,
540 .tpc = gk208_grctx_pack_tpc, 546 .tpc = gk208_grctx_pack_tpc,
541 .ppc = gk208_grctx_pack_ppc, 547 .ppc = gk208_grctx_pack_ppc,
@@ -552,4 +558,12 @@ gk208_grctx = {
552 .attrib_nr = 0x218, 558 .attrib_nr = 0x218,
553 .alpha_nr_max = 0x7ff, 559 .alpha_nr_max = 0x7ff,
554 .alpha_nr = 0x648, 560 .alpha_nr = 0x648,
561 .patch_ltc = gk104_grctx_generate_patch_ltc,
562 .sm_id = gf100_grctx_generate_sm_id,
563 .tpc_nr = gf100_grctx_generate_tpc_nr,
564 .rop_mapping = gf117_grctx_generate_rop_mapping,
565 .alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
566 .dist_skip_table = gf117_grctx_generate_dist_skip_table,
567 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
568 .r418800 = gk104_grctx_generate_r418800,
555}; 569};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index da7c35a6a3d2..896d473dcc0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -42,10 +42,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
42 42
43 grctx->unkn(gr); 43 grctx->unkn(gr);
44 44
45 gf100_grctx_generate_tpcid(gr); 45 gf100_grctx_generate_floorsweep(gr);
46 gf100_grctx_generate_r406028(gr);
47 gk104_grctx_generate_r418bb8(gr);
48 gf100_grctx_generate_r406800(gr);
49 46
50 for (i = 0; i < 8; i++) 47 for (i = 0; i < 8; i++)
51 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000); 48 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -82,4 +79,8 @@ gk20a_grctx = {
82 .attrib_nr = 0x240, 79 .attrib_nr = 0x240,
83 .alpha_nr_max = 0x648 + (0x648 / 2), 80 .alpha_nr_max = 0x648 + (0x648 / 2),
84 .alpha_nr = 0x648, 81 .alpha_nr = 0x648,
82 .sm_id = gf100_grctx_generate_sm_id,
83 .tpc_nr = gf100_grctx_generate_tpc_nr,
84 .rop_mapping = gf117_grctx_generate_rop_mapping,
85 .alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
85}; 86};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 9b43d4ce3eaa..0b3964e6b36e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -744,12 +744,17 @@ gm107_grctx_init_gpc_unk_2[] = {
744}; 744};
745 745
746static const struct gf100_gr_pack 746static const struct gf100_gr_pack
747gm107_grctx_pack_gpc[] = { 747gm107_grctx_pack_gpc_0[] = {
748 { gm107_grctx_init_gpc_unk_0 }, 748 { gm107_grctx_init_gpc_unk_0 },
749 { gk208_grctx_init_prop_0 }, 749 { gk208_grctx_init_prop_0 },
750 { gm107_grctx_init_gpc_unk_1 }, 750 { gm107_grctx_init_gpc_unk_1 },
751 { gm107_grctx_init_setup_0 }, 751 { gm107_grctx_init_setup_0 },
752 { gf100_grctx_init_zcull_0 }, 752 { gf100_grctx_init_zcull_0 },
753 {}
754};
755
756static const struct gf100_gr_pack
757gm107_grctx_pack_gpc_1[] = {
753 { gk208_grctx_init_crstr_0 }, 758 { gk208_grctx_init_crstr_0 },
754 { gk104_grctx_init_gpm_0 }, 759 { gk104_grctx_init_gpm_0 },
755 { gm107_grctx_init_gpc_unk_2 }, 760 { gm107_grctx_init_gpc_unk_2 },
@@ -860,6 +865,16 @@ gm107_grctx_pack_ppc[] = {
860 * PGRAPH context implementation 865 * PGRAPH context implementation
861 ******************************************************************************/ 866 ******************************************************************************/
862 867
868static void
869gm107_grctx_generate_r419e00(struct gf100_gr *gr)
870{
871 struct nvkm_device *device = gr->base.engine.subdev.device;
872 nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
873 nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
874 nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
875 nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
876}
877
863void 878void
864gm107_grctx_generate_bundle(struct gf100_grctx *info) 879gm107_grctx_generate_bundle(struct gf100_grctx *info)
865{ 880{
@@ -931,75 +946,27 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
931} 946}
932 947
933static void 948static void
934gm107_grctx_generate_tpcid(struct gf100_gr *gr) 949gm107_grctx_generate_r406500(struct gf100_gr *gr)
935{ 950{
936 struct nvkm_device *device = gr->base.engine.subdev.device; 951 nvkm_wr32(gr->base.engine.subdev.device, 0x406500, 0x00000001);
937 int gpc, tpc, id;
938
939 for (tpc = 0, id = 0; tpc < 4; tpc++) {
940 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
941 if (tpc < gr->tpc_nr[gpc]) {
942 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
943 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
944 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
945 id++;
946 }
947
948 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
949 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
950 }
951 }
952} 952}
953 953
954static void 954void
955gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) 955gm107_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
956{ 956{
957 struct nvkm_device *device = gr->base.engine.subdev.device; 957 struct nvkm_device *device = gr->base.engine.subdev.device;
958 const struct gf100_grctx_func *grctx = gr->func->grctx; 958 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), sm);
959 u32 idle_timeout; 959 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm);
960 int i; 960 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
961
962 gf100_gr_mmio(gr, grctx->hub);
963 gf100_gr_mmio(gr, grctx->gpc);
964 gf100_gr_mmio(gr, grctx->zcull);
965 gf100_gr_mmio(gr, grctx->tpc);
966 gf100_gr_mmio(gr, grctx->ppc);
967
968 idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
969
970 grctx->bundle(info);
971 grctx->pagepool(info);
972 grctx->attrib(info);
973 grctx->unkn(gr);
974
975 gm107_grctx_generate_tpcid(gr);
976 gf100_grctx_generate_r406028(gr);
977 gk104_grctx_generate_r418bb8(gr);
978 gf100_grctx_generate_r406800(gr);
979
980 nvkm_wr32(device, 0x4064d0, 0x00000001);
981 for (i = 1; i < 8; i++)
982 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
983 nvkm_wr32(device, 0x406500, 0x00000001);
984
985 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
986
987 gf100_gr_icmd(gr, grctx->icmd);
988 nvkm_wr32(device, 0x404154, idle_timeout);
989 gf100_gr_mthd(gr, grctx->mthd);
990
991 nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
992 nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
993 nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
994 nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
995} 961}
996 962
997const struct gf100_grctx_func 963const struct gf100_grctx_func
998gm107_grctx = { 964gm107_grctx = {
999 .main = gm107_grctx_generate_main, 965 .main = gf100_grctx_generate_main,
1000 .unkn = gk104_grctx_generate_unkn, 966 .unkn = gk104_grctx_generate_unkn,
1001 .hub = gm107_grctx_pack_hub, 967 .hub = gm107_grctx_pack_hub,
1002 .gpc = gm107_grctx_pack_gpc, 968 .gpc_0 = gm107_grctx_pack_gpc_0,
969 .gpc_1 = gm107_grctx_pack_gpc_1,
1003 .zcull = gf100_grctx_pack_zcull, 970 .zcull = gf100_grctx_pack_zcull,
1004 .tpc = gm107_grctx_pack_tpc, 971 .tpc = gm107_grctx_pack_tpc,
1005 .ppc = gm107_grctx_pack_ppc, 972 .ppc = gm107_grctx_pack_ppc,
@@ -1016,4 +983,12 @@ gm107_grctx = {
1016 .attrib_nr = 0xaa0, 983 .attrib_nr = 0xaa0,
1017 .alpha_nr_max = 0x1800, 984 .alpha_nr_max = 0x1800,
1018 .alpha_nr = 0x1000, 985 .alpha_nr = 0x1000,
986 .sm_id = gm107_grctx_generate_sm_id,
987 .tpc_nr = gf100_grctx_generate_tpc_nr,
988 .rop_mapping = gf117_grctx_generate_rop_mapping,
989 .alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
990 .dist_skip_table = gf117_grctx_generate_dist_skip_table,
991 .r406500 = gm107_grctx_generate_r406500,
992 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
993 .r419e00 = gm107_grctx_generate_r419e00,
1019}; 994};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index db209d33f486..013d05a0f0f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -28,47 +28,34 @@
28 ******************************************************************************/ 28 ******************************************************************************/
29 29
30void 30void
31gm200_grctx_generate_tpcid(struct gf100_gr *gr) 31gm200_grctx_generate_r419a3c(struct gf100_gr *gr)
32{ 32{
33 struct nvkm_device *device = gr->base.engine.subdev.device; 33 struct nvkm_device *device = gr->base.engine.subdev.device;
34 int gpc, tpc, id; 34 nvkm_mask(device, 0x419a3c, 0x00000014, 0x00000000);
35}
35 36
36 for (tpc = 0, id = 0; tpc < TPC_MAX_PER_GPC; tpc++) { 37static void
37 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 38gm200_grctx_generate_r418e94(struct gf100_gr *gr)
38 if (tpc < gr->tpc_nr[gpc]) { 39{
39 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id); 40 struct nvkm_device *device = gr->base.engine.subdev.device;
40 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id); 41 nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
41 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id); 42 nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
42 id++;
43 }
44 }
45 }
46} 43}
47 44
48void 45void
49gm200_grctx_generate_405b60(struct gf100_gr *gr) 46gm200_grctx_generate_smid_config(struct gf100_gr *gr)
50{ 47{
51 struct nvkm_device *device = gr->base.engine.subdev.device; 48 struct nvkm_device *device = gr->base.engine.subdev.device;
52 const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4); 49 const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
53 u32 dist[TPC_MAX / 4] = {}; 50 u32 dist[TPC_MAX / 4] = {};
54 u32 gpcs[GPC_MAX] = {}; 51 u32 gpcs[GPC_MAX] = {};
55 u8 tpcnr[GPC_MAX]; 52 u8 sm, i;
56 int tpc, gpc, i;
57 53
58 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 54 for (sm = 0; sm < gr->sm_nr; sm++) {
59 55 const u8 gpc = gr->sm[sm].gpc;
60 /* won't result in the same distribution as the binary driver where 56 const u8 tpc = gr->sm[sm].tpc;
61 * some of the gpcs have more tpcs than others, but this shall do 57 dist[sm / 4] |= ((gpc << 4) | tpc) << ((sm % 4) * 8);
62 * for the moment. the code for earlier gpus has this issue too. 58 gpcs[gpc] |= sm << (tpc * 8);
63 */
64 for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
65 do {
66 gpc = (gpc + 1) % gr->gpc_nr;
67 } while(!tpcnr[gpc]);
68 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
69
70 dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
71 gpcs[gpc] |= i << (tpc * 8);
72 } 59 }
73 60
74 for (i = 0; i < dist_nr; i++) 61 for (i = 0; i < dist_nr; i++)
@@ -77,50 +64,46 @@ gm200_grctx_generate_405b60(struct gf100_gr *gr)
77 nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]); 64 nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
78} 65}
79 66
80static void 67void
81gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) 68gm200_grctx_generate_tpc_mask(struct gf100_gr *gr)
82{ 69{
83 struct nvkm_device *device = gr->base.engine.subdev.device; 70 u32 tmp, i;
84 const struct gf100_grctx_func *grctx = gr->func->grctx;
85 u32 idle_timeout, tmp;
86 int i;
87
88 gf100_gr_mmio(gr, gr->fuc_sw_ctx);
89
90 idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
91
92 grctx->bundle(info);
93 grctx->pagepool(info);
94 grctx->attrib(info);
95 grctx->unkn(gr);
96
97 gm200_grctx_generate_tpcid(gr);
98 gf100_grctx_generate_r406028(gr);
99 gk104_grctx_generate_r418bb8(gr);
100
101 for (i = 0; i < 8; i++)
102 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
103 nvkm_wr32(device, 0x406500, 0x00000000);
104
105 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
106
107 for (tmp = 0, i = 0; i < gr->gpc_nr; i++) 71 for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
108 tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4); 72 tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * gr->func->tpc_nr);
109 nvkm_wr32(device, 0x4041c4, tmp); 73 nvkm_wr32(gr->base.engine.subdev.device, 0x4041c4, tmp);
74}
110 75
111 gm200_grctx_generate_405b60(gr); 76void
77gm200_grctx_generate_r406500(struct gf100_gr *gr)
78{
79 nvkm_wr32(gr->base.engine.subdev.device, 0x406500, 0x00000000);
80}
112 81
113 gf100_gr_icmd(gr, gr->fuc_bundle); 82void
114 nvkm_wr32(device, 0x404154, idle_timeout); 83gm200_grctx_generate_dist_skip_table(struct gf100_gr *gr)
115 gf100_gr_mthd(gr, gr->fuc_method); 84{
85 struct nvkm_device *device = gr->base.engine.subdev.device;
86 u32 data[8] = {};
87 int gpc, ppc, i;
88
89 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
90 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
91 u8 ppc_tpcs = gr->ppc_tpc_nr[gpc][ppc];
92 u8 ppc_tpcm = gr->ppc_tpc_mask[gpc][ppc];
93 while (ppc_tpcs-- > gr->ppc_tpc_min)
94 ppc_tpcm &= ppc_tpcm - 1;
95 ppc_tpcm ^= gr->ppc_tpc_mask[gpc][ppc];
96 ((u8 *)data)[gpc] |= ppc_tpcm;
97 }
98 }
116 99
117 nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000); 100 for (i = 0; i < ARRAY_SIZE(data); i++)
118 nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000); 101 nvkm_wr32(device, 0x4064d0 + (i * 0x04), data[i]);
119} 102}
120 103
121const struct gf100_grctx_func 104const struct gf100_grctx_func
122gm200_grctx = { 105gm200_grctx = {
123 .main = gm200_grctx_generate_main, 106 .main = gf100_grctx_generate_main,
124 .unkn = gk104_grctx_generate_unkn, 107 .unkn = gk104_grctx_generate_unkn,
125 .bundle = gm107_grctx_generate_bundle, 108 .bundle = gm107_grctx_generate_bundle,
126 .bundle_size = 0x3000, 109 .bundle_size = 0x3000,
@@ -133,4 +116,13 @@ gm200_grctx = {
133 .attrib_nr = 0x400, 116 .attrib_nr = 0x400,
134 .alpha_nr_max = 0x1800, 117 .alpha_nr_max = 0x1800,
135 .alpha_nr = 0x1000, 118 .alpha_nr = 0x1000,
119 .sm_id = gm107_grctx_generate_sm_id,
120 .rop_mapping = gf117_grctx_generate_rop_mapping,
121 .dist_skip_table = gm200_grctx_generate_dist_skip_table,
122 .r406500 = gm200_grctx_generate_r406500,
123 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
124 .tpc_mask = gm200_grctx_generate_tpc_mask,
125 .smid_config = gm200_grctx_generate_smid_config,
126 .r418e94 = gm200_grctx_generate_r418e94,
127 .r419a3c = gm200_grctx_generate_r419a3c,
136}; 128};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index e5702e3e0a5a..a1d9e114ebeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -22,20 +22,6 @@
22#include "ctxgf100.h" 22#include "ctxgf100.h"
23 23
24static void 24static void
25gm20b_grctx_generate_r406028(struct gf100_gr *gr)
26{
27 struct nvkm_device *device = gr->base.engine.subdev.device;
28 u32 tpc_per_gpc = 0;
29 int i;
30
31 for (i = 0; i < gr->gpc_nr; i++)
32 tpc_per_gpc |= gr->tpc_nr[i] << (4 * i);
33
34 nvkm_wr32(device, 0x406028, tpc_per_gpc);
35 nvkm_wr32(device, 0x405870, tpc_per_gpc);
36}
37
38static void
39gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) 25gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
40{ 26{
41 struct nvkm_device *device = gr->base.engine.subdev.device; 27 struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -53,9 +39,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
53 39
54 grctx->unkn(gr); 40 grctx->unkn(gr);
55 41
56 gm200_grctx_generate_tpcid(gr); 42 gf100_grctx_generate_floorsweep(gr);
57 gm20b_grctx_generate_r406028(gr);
58 gk104_grctx_generate_r418bb8(gr);
59 43
60 for (i = 0; i < 8; i++) 44 for (i = 0; i < 8; i++)
61 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000); 45 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -68,7 +52,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
68 tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4); 52 tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
69 nvkm_wr32(device, 0x4041c4, tmp); 53 nvkm_wr32(device, 0x4041c4, tmp);
70 54
71 gm200_grctx_generate_405b60(gr); 55 gm200_grctx_generate_smid_config(gr);
72 56
73 gf100_gr_wait_idle(gr); 57 gf100_gr_wait_idle(gr);
74 58
@@ -98,4 +82,6 @@ gm20b_grctx = {
98 .attrib_nr = 0x400, 82 .attrib_nr = 0x400,
99 .alpha_nr_max = 0xc00, 83 .alpha_nr_max = 0xc00,
100 .alpha_nr = 0x800, 84 .alpha_nr = 0x800,
85 .sm_id = gm107_grctx_generate_sm_id,
86 .rop_mapping = gf117_grctx_generate_rop_mapping,
101}; 87};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 88ea322d956c..0b3326262e12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -36,7 +36,7 @@ gp100_grctx_generate_pagepool(struct gf100_grctx *info)
36 const int s = 8; 36 const int s = 8;
37 const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true); 37 const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
38 mmio_refn(info, 0x40800c, 0x00000000, s, b); 38 mmio_refn(info, 0x40800c, 0x00000000, s, b);
39 mmio_wr32(info, 0x408010, 0x80000000); 39 mmio_wr32(info, 0x408010, 0x8007d800);
40 mmio_refn(info, 0x419004, 0x00000000, s, b); 40 mmio_refn(info, 0x419004, 0x00000000, s, b);
41 mmio_wr32(info, 0x419008, 0x00000000); 41 mmio_wr32(info, 0x419008, 0x00000000);
42} 42}
@@ -48,14 +48,17 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
48 const struct gf100_grctx_func *grctx = gr->func->grctx; 48 const struct gf100_grctx_func *grctx = gr->func->grctx;
49 const u32 alpha = grctx->alpha_nr; 49 const u32 alpha = grctx->alpha_nr;
50 const u32 attrib = grctx->attrib_nr; 50 const u32 attrib = grctx->attrib_nr;
51 const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
52 const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
53 const int s = 12; 51 const int s = 12;
54 const int b = mmio_vram(info, size, (1 << s), false);
55 const int max_batches = 0xffff; 52 const int max_batches = 0xffff;
53 u32 size = grctx->alpha_nr_max * gr->tpc_total;
56 u32 ao = 0; 54 u32 ao = 0;
57 u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total; 55 u32 bo = ao + size;
58 int gpc, ppc, n = 0; 56 int gpc, ppc, b, n = 0;
57
58 for (gpc = 0; gpc < gr->gpc_nr; gpc++)
59 size += grctx->attrib_nr_max * gr->ppc_nr[gpc] * gr->ppc_tpc_max;
60 size = ((size * 0x20) + 128) & ~127;
61 b = mmio_vram(info, size, (1 << s), false);
59 62
60 mmio_refn(info, 0x418810, 0x80000000, s, b); 63 mmio_refn(info, 0x418810, 0x80000000, s, b);
61 mmio_refn(info, 0x419848, 0x10000000, s, b); 64 mmio_refn(info, 0x419848, 0x10000000, s, b);
@@ -69,7 +72,7 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
69 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 72 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
70 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { 73 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
71 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc]; 74 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
72 const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc]; 75 const u32 bs = attrib * gr->ppc_tpc_max;
73 const u32 u = 0x418ea0 + (n * 0x04); 76 const u32 u = 0x418ea0 + (n * 0x04);
74 const u32 o = PPC_UNIT(gpc, ppc, 0); 77 const u32 o = PPC_UNIT(gpc, ppc, 0);
75 if (!(gr->ppc_mask[gpc] & (1 << ppc))) 78 if (!(gr->ppc_mask[gpc] & (1 << ppc)))
@@ -77,7 +80,7 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
77 mmio_wr32(info, o + 0xc0, bs); 80 mmio_wr32(info, o + 0xc0, bs);
78 mmio_wr32(info, o + 0xf4, bo); 81 mmio_wr32(info, o + 0xf4, bo);
79 mmio_wr32(info, o + 0xf0, bs); 82 mmio_wr32(info, o + 0xf0, bs);
80 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; 83 bo += grctx->attrib_nr_max * gr->ppc_tpc_max;
81 mmio_wr32(info, o + 0xe4, as); 84 mmio_wr32(info, o + 0xe4, as);
82 mmio_wr32(info, o + 0xf8, ao); 85 mmio_wr32(info, o + 0xf8, ao);
83 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc]; 86 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
@@ -89,79 +92,30 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
89 mmio_wr32(info, 0x41befc, 0x00000000); 92 mmio_wr32(info, 0x41befc, 0x00000000);
90} 93}
91 94
92static void 95void
93gp100_grctx_generate_405b60(struct gf100_gr *gr) 96gp100_grctx_generate_smid_config(struct gf100_gr *gr)
94{ 97{
95 struct nvkm_device *device = gr->base.engine.subdev.device; 98 struct nvkm_device *device = gr->base.engine.subdev.device;
96 const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4); 99 const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
97 u32 dist[TPC_MAX / 4] = {}; 100 u32 dist[TPC_MAX / 4] = {}, gpcs[16] = {};
98 u32 gpcs[GPC_MAX * 2] = {}; 101 u8 sm, i;
99 u8 tpcnr[GPC_MAX]; 102
100 int tpc, gpc, i; 103 for (sm = 0; sm < gr->sm_nr; sm++) {
101 104 const u8 gpc = gr->sm[sm].gpc;
102 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 105 const u8 tpc = gr->sm[sm].tpc;
103 106 dist[sm / 4] |= ((gpc << 4) | tpc) << ((sm % 4) * 8);
104 /* won't result in the same distribution as the binary driver where 107 gpcs[gpc + (gr->func->gpc_nr * (tpc / 4))] |= sm << ((tpc % 4) * 8);
105 * some of the gpcs have more tpcs than others, but this shall do
106 * for the moment. the code for earlier gpus has this issue too.
107 */
108 for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
109 do {
110 gpc = (gpc + 1) % gr->gpc_nr;
111 } while(!tpcnr[gpc]);
112 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
113
114 dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
115 gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
116 } 108 }
117 109
118 for (i = 0; i < dist_nr; i++) 110 for (i = 0; i < dist_nr; i++)
119 nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]); 111 nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
120 for (i = 0; i < gr->gpc_nr * 2; i++) 112 for (i = 0; i < ARRAY_SIZE(gpcs); i++)
121 nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]); 113 nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
122} 114}
123 115
124void
125gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
126{
127 struct nvkm_device *device = gr->base.engine.subdev.device;
128 const struct gf100_grctx_func *grctx = gr->func->grctx;
129 u32 idle_timeout, tmp;
130 int i;
131
132 gf100_gr_mmio(gr, gr->fuc_sw_ctx);
133
134 idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
135
136 grctx->pagepool(info);
137 grctx->bundle(info);
138 grctx->attrib(info);
139 grctx->unkn(gr);
140
141 gm200_grctx_generate_tpcid(gr);
142 gf100_grctx_generate_r406028(gr);
143 gk104_grctx_generate_r418bb8(gr);
144
145 for (i = 0; i < 8; i++)
146 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
147 nvkm_wr32(device, 0x406500, 0x00000000);
148
149 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
150
151 for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
152 tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
153 nvkm_wr32(device, 0x4041c4, tmp);
154
155 gp100_grctx_generate_405b60(gr);
156
157 gf100_gr_icmd(gr, gr->fuc_bundle);
158 nvkm_wr32(device, 0x404154, idle_timeout);
159 gf100_gr_mthd(gr, gr->fuc_method);
160}
161
162const struct gf100_grctx_func 116const struct gf100_grctx_func
163gp100_grctx = { 117gp100_grctx = {
164 .main = gp100_grctx_generate_main, 118 .main = gf100_grctx_generate_main,
165 .unkn = gk104_grctx_generate_unkn, 119 .unkn = gk104_grctx_generate_unkn,
166 .bundle = gm107_grctx_generate_bundle, 120 .bundle = gm107_grctx_generate_bundle,
167 .bundle_size = 0x3000, 121 .bundle_size = 0x3000,
@@ -174,4 +128,12 @@ gp100_grctx = {
174 .attrib_nr = 0x440, 128 .attrib_nr = 0x440,
175 .alpha_nr_max = 0xc00, 129 .alpha_nr_max = 0xc00,
176 .alpha_nr = 0x800, 130 .alpha_nr = 0x800,
131 .sm_id = gm107_grctx_generate_sm_id,
132 .rop_mapping = gf117_grctx_generate_rop_mapping,
133 .dist_skip_table = gm200_grctx_generate_dist_skip_table,
134 .r406500 = gm200_grctx_generate_r406500,
135 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
136 .tpc_mask = gm200_grctx_generate_tpc_mask,
137 .smid_config = gp100_grctx_generate_smid_config,
138 .r419a3c = gm200_grctx_generate_r419a3c,
177}; 139};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 7a66b4c2eb18..daee17bf7d0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -29,6 +29,13 @@
29 * PGRAPH context implementation 29 * PGRAPH context implementation
30 ******************************************************************************/ 30 ******************************************************************************/
31 31
32static void
33gp102_grctx_generate_r408840(struct gf100_gr *gr)
34{
35 struct nvkm_device *device = gr->base.engine.subdev.device;
36 nvkm_mask(device, 0x408840, 0x00000003, 0x00000000);
37}
38
32void 39void
33gp102_grctx_generate_attrib(struct gf100_grctx *info) 40gp102_grctx_generate_attrib(struct gf100_grctx *info)
34{ 41{
@@ -36,14 +43,18 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
36 const struct gf100_grctx_func *grctx = gr->func->grctx; 43 const struct gf100_grctx_func *grctx = gr->func->grctx;
37 const u32 alpha = grctx->alpha_nr; 44 const u32 alpha = grctx->alpha_nr;
38 const u32 attrib = grctx->attrib_nr; 45 const u32 attrib = grctx->attrib_nr;
39 const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max); 46 const u32 gfxp = grctx->gfxp_nr;
40 const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
41 const int s = 12; 47 const int s = 12;
42 const int b = mmio_vram(info, size, (1 << s), false);
43 const int max_batches = 0xffff; 48 const int max_batches = 0xffff;
49 u32 size = grctx->alpha_nr_max * gr->tpc_total;
44 u32 ao = 0; 50 u32 ao = 0;
45 u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total; 51 u32 bo = ao + size;
46 int gpc, ppc, n = 0; 52 int gpc, ppc, b, n = 0;
53
54 for (gpc = 0; gpc < gr->gpc_nr; gpc++)
55 size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max;
56 size = ((size * 0x20) + 128) & ~127;
57 b = mmio_vram(info, size, (1 << s), false);
47 58
48 mmio_refn(info, 0x418810, 0x80000000, s, b); 59 mmio_refn(info, 0x418810, 0x80000000, s, b);
49 mmio_refn(info, 0x419848, 0x10000000, s, b); 60 mmio_refn(info, 0x419848, 0x10000000, s, b);
@@ -57,17 +68,18 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
57 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 68 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
58 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { 69 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
59 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc]; 70 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
60 const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc]; 71 const u32 bs = attrib * gr->ppc_tpc_max;
72 const u32 gs = gfxp * gr->ppc_tpc_max;
61 const u32 u = 0x418ea0 + (n * 0x04); 73 const u32 u = 0x418ea0 + (n * 0x04);
62 const u32 o = PPC_UNIT(gpc, ppc, 0); 74 const u32 o = PPC_UNIT(gpc, ppc, 0);
63 const u32 p = GPC_UNIT(gpc, 0xc44 + (ppc * 4)); 75 const u32 p = GPC_UNIT(gpc, 0xc44 + (ppc * 4));
64 if (!(gr->ppc_mask[gpc] & (1 << ppc))) 76 if (!(gr->ppc_mask[gpc] & (1 << ppc)))
65 continue; 77 continue;
66 mmio_wr32(info, o + 0xc0, bs); 78 mmio_wr32(info, o + 0xc0, gs);
67 mmio_wr32(info, p, bs); 79 mmio_wr32(info, p, bs);
68 mmio_wr32(info, o + 0xf4, bo); 80 mmio_wr32(info, o + 0xf4, bo);
69 mmio_wr32(info, o + 0xf0, bs); 81 mmio_wr32(info, o + 0xf0, bs);
70 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; 82 bo += gs;
71 mmio_wr32(info, o + 0xe4, as); 83 mmio_wr32(info, o + 0xe4, as);
72 mmio_wr32(info, o + 0xf8, ao); 84 mmio_wr32(info, o + 0xf8, ao);
73 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc]; 85 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
@@ -81,7 +93,7 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
81 93
82const struct gf100_grctx_func 94const struct gf100_grctx_func
83gp102_grctx = { 95gp102_grctx = {
84 .main = gp100_grctx_generate_main, 96 .main = gf100_grctx_generate_main,
85 .unkn = gk104_grctx_generate_unkn, 97 .unkn = gk104_grctx_generate_unkn,
86 .bundle = gm107_grctx_generate_bundle, 98 .bundle = gm107_grctx_generate_bundle,
87 .bundle_size = 0x3000, 99 .bundle_size = 0x3000,
@@ -90,8 +102,18 @@ gp102_grctx = {
90 .pagepool = gp100_grctx_generate_pagepool, 102 .pagepool = gp100_grctx_generate_pagepool,
91 .pagepool_size = 0x20000, 103 .pagepool_size = 0x20000,
92 .attrib = gp102_grctx_generate_attrib, 104 .attrib = gp102_grctx_generate_attrib,
93 .attrib_nr_max = 0x5d4, 105 .attrib_nr_max = 0x4b0,
94 .attrib_nr = 0x320, 106 .attrib_nr = 0x320,
95 .alpha_nr_max = 0xc00, 107 .alpha_nr_max = 0xc00,
96 .alpha_nr = 0x800, 108 .alpha_nr = 0x800,
109 .gfxp_nr = 0xba8,
110 .sm_id = gm107_grctx_generate_sm_id,
111 .rop_mapping = gf117_grctx_generate_rop_mapping,
112 .dist_skip_table = gm200_grctx_generate_dist_skip_table,
113 .r406500 = gm200_grctx_generate_r406500,
114 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
115 .tpc_mask = gm200_grctx_generate_tpc_mask,
116 .smid_config = gp100_grctx_generate_smid_config,
117 .r419a3c = gm200_grctx_generate_r419a3c,
118 .r408840 = gp102_grctx_generate_r408840,
97}; 119};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
new file mode 100644
index 000000000000..3b85e3d326b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "ctxgf100.h"
23
24const struct gf100_grctx_func
25gp104_grctx = {
26 .main = gf100_grctx_generate_main,
27 .unkn = gk104_grctx_generate_unkn,
28 .bundle = gm107_grctx_generate_bundle,
29 .bundle_size = 0x3000,
30 .bundle_min_gpm_fifo_depth = 0x180,
31 .bundle_token_limit = 0x900,
32 .pagepool = gp100_grctx_generate_pagepool,
33 .pagepool_size = 0x20000,
34 .attrib = gp102_grctx_generate_attrib,
35 .attrib_nr_max = 0x4b0,
36 .attrib_nr = 0x320,
37 .alpha_nr_max = 0xc00,
38 .alpha_nr = 0x800,
39 .gfxp_nr = 0xba8,
40 .sm_id = gm107_grctx_generate_sm_id,
41 .rop_mapping = gf117_grctx_generate_rop_mapping,
42 .dist_skip_table = gm200_grctx_generate_dist_skip_table,
43 .r406500 = gm200_grctx_generate_r406500,
44 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
45 .tpc_mask = gm200_grctx_generate_tpc_mask,
46 .smid_config = gp100_grctx_generate_smid_config,
47 .r419a3c = gm200_grctx_generate_r419a3c,
48};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index 8da91a0b3bd2..5060c5ee5ce0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -31,7 +31,7 @@
31 31
32const struct gf100_grctx_func 32const struct gf100_grctx_func
33gp107_grctx = { 33gp107_grctx = {
34 .main = gp100_grctx_generate_main, 34 .main = gf100_grctx_generate_main,
35 .unkn = gk104_grctx_generate_unkn, 35 .unkn = gk104_grctx_generate_unkn,
36 .bundle = gm107_grctx_generate_bundle, 36 .bundle = gm107_grctx_generate_bundle,
37 .bundle_size = 0x3000, 37 .bundle_size = 0x3000,
@@ -44,4 +44,13 @@ gp107_grctx = {
44 .attrib_nr = 0x540, 44 .attrib_nr = 0x540,
45 .alpha_nr_max = 0xc00, 45 .alpha_nr_max = 0xc00,
46 .alpha_nr = 0x800, 46 .alpha_nr = 0x800,
47 .gfxp_nr = 0xe94,
48 .sm_id = gm107_grctx_generate_sm_id,
49 .rop_mapping = gf117_grctx_generate_rop_mapping,
50 .dist_skip_table = gm200_grctx_generate_dist_skip_table,
51 .r406500 = gm200_grctx_generate_r406500,
52 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
53 .tpc_mask = gm200_grctx_generate_tpc_mask,
54 .smid_config = gp100_grctx_generate_smid_config,
55 .r419a3c = gm200_grctx_generate_r419a3c,
47}; 56};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
new file mode 100644
index 000000000000..0990765ef191
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "ctxgf100.h"
23
24/*******************************************************************************
25 * PGRAPH context implementation
26 ******************************************************************************/
27
28static const struct gf100_gr_init
29gv100_grctx_init_sw_veid_bundle_init_0[] = {
30 { 0x00001000, 64, 0x00100000, 0x00000008 },
31 { 0x00000941, 64, 0x00100000, 0x00000000 },
32 { 0x0000097e, 64, 0x00100000, 0x00000000 },
33 { 0x0000097f, 64, 0x00100000, 0x00000100 },
34 { 0x0000035c, 64, 0x00100000, 0x00000000 },
35 { 0x0000035d, 64, 0x00100000, 0x00000000 },
36 { 0x00000a08, 64, 0x00100000, 0x00000000 },
37 { 0x00000a09, 64, 0x00100000, 0x00000000 },
38 { 0x00000a0a, 64, 0x00100000, 0x00000000 },
39 { 0x00000352, 64, 0x00100000, 0x00000000 },
40 { 0x00000353, 64, 0x00100000, 0x00000000 },
41 { 0x00000358, 64, 0x00100000, 0x00000000 },
42 { 0x00000359, 64, 0x00100000, 0x00000000 },
43 { 0x00000370, 64, 0x00100000, 0x00000000 },
44 { 0x00000371, 64, 0x00100000, 0x00000000 },
45 { 0x00000372, 64, 0x00100000, 0x000fffff },
46 { 0x00000366, 64, 0x00100000, 0x00000000 },
47 { 0x00000367, 64, 0x00100000, 0x00000000 },
48 { 0x00000368, 64, 0x00100000, 0x00000fff },
49 { 0x00000623, 64, 0x00100000, 0x00000000 },
50 { 0x00000624, 64, 0x00100000, 0x00000000 },
51 { 0x0001e100, 1, 0x00000001, 0x02000001 },
52 {}
53};
54
55static const struct gf100_gr_pack
56gv100_grctx_pack_sw_veid_bundle_init[] = {
57 { gv100_grctx_init_sw_veid_bundle_init_0 },
58 {}
59};
60
61static void
62gv100_grctx_generate_attrib(struct gf100_grctx *info)
63{
64 struct gf100_gr *gr = info->gr;
65 const struct gf100_grctx_func *grctx = gr->func->grctx;
66 const u32 alpha = grctx->alpha_nr;
67 const u32 attrib = grctx->attrib_nr;
68 const u32 gfxp = grctx->gfxp_nr;
69 const int s = 12;
70 const int max_batches = 0xffff;
71 u32 size = grctx->alpha_nr_max * gr->tpc_total;
72 u32 ao = 0;
73 u32 bo = ao + size;
74 int gpc, ppc, b, n = 0;
75
76 size += grctx->gfxp_nr * gr->tpc_total;
77 size = ((size * 0x20) + 128) & ~127;
78 b = mmio_vram(info, size, (1 << s), false);
79
80 mmio_refn(info, 0x418810, 0x80000000, s, b);
81 mmio_refn(info, 0x419848, 0x10000000, s, b);
82 mmio_refn(info, 0x419c2c, 0x10000000, s, b);
83 mmio_refn(info, 0x419e00, 0x00000000, s, b);
84 mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7);
85 mmio_wr32(info, 0x405830, attrib);
86 mmio_wr32(info, 0x40585c, alpha);
87 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
88
89 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
90 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
91 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
92 const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
93 const u32 gs = gfxp * gr->ppc_tpc_nr[gpc][ppc];
94 const u32 u = 0x418ea0 + (n * 0x04);
95 const u32 o = PPC_UNIT(gpc, ppc, 0);
96 if (!(gr->ppc_mask[gpc] & (1 << ppc)))
97 continue;
98 mmio_wr32(info, o + 0xc0, gs);
99 mmio_wr32(info, o + 0xf4, bo);
100 mmio_wr32(info, o + 0xf0, bs);
101 bo += gs;
102 mmio_wr32(info, o + 0xe4, as);
103 mmio_wr32(info, o + 0xf8, ao);
104 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
105 mmio_wr32(info, u, bs);
106 }
107 }
108
109 mmio_wr32(info, 0x4181e4, 0x00000100);
110 mmio_wr32(info, 0x41befc, 0x00000100);
111}
112
113static void
114gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
115{
116 struct nvkm_device *device = gr->base.engine.subdev.device;
117 u32 data;
118 int i, j;
119
120 /* Pack tile map into register format. */
121 nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
122 gr->screen_tile_row_offset);
123 for (i = 0; i < 11; i++) {
124 for (data = 0, j = 0; j < 6; j++)
125 data |= (gr->tile[i * 6 + j] & 0x1f) << (j * 5);
126 nvkm_wr32(device, 0x418b08 + (i * 4), data);
127 nvkm_wr32(device, 0x41bf00 + (i * 4), data);
128 nvkm_wr32(device, 0x40780c + (i * 4), data);
129 }
130
131 /* GPC_BROADCAST.TP_BROADCAST */
132 nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
133 gr->screen_tile_row_offset);
134 for (i = 0, j = 1; i < 5; i++, j += 4) {
135 u8 v19 = (1 << (j + 0)) % gr->tpc_total;
136 u8 v20 = (1 << (j + 1)) % gr->tpc_total;
137 u8 v21 = (1 << (j + 2)) % gr->tpc_total;
138 u8 v22 = (1 << (j + 3)) % gr->tpc_total;
139 nvkm_wr32(device, 0x41bfb0 + (i * 4), (v22 << 24) |
140 (v21 << 16) |
141 (v20 << 8) |
142 v19);
143 }
144
145 /* UNK78xx */
146 nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
147 gr->screen_tile_row_offset);
148}
149
150static void
151gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on)
152{
153 struct nvkm_device *device = gr->base.engine.subdev.device;
154 nvkm_mask(device, 0x400088, 0x00060000, on ? 0x00060000 : 0x00000000);
155}
156
157static void
158gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
159{
160 struct nvkm_device *device = gr->base.engine.subdev.device;
161 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm);
162 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm);
163 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
164}
165
166static void
167gv100_grctx_generate_unkn(struct gf100_gr *gr)
168{
169 struct nvkm_device *device = gr->base.engine.subdev.device;
170 nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
171 nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004);
172 nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
173 nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
174 nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
175}
176
177static void
178gv100_grctx_unkn88c(struct gf100_gr *gr, bool on)
179{
180 struct nvkm_device *device = gr->base.engine.subdev.device;
181 const u32 mask = 0x00000010, data = on ? mask : 0x00000000;
182 nvkm_mask(device, 0x40988c, mask, data);
183 nvkm_rd32(device, 0x40988c);
184 nvkm_mask(device, 0x41a88c, mask, data);
185 nvkm_rd32(device, 0x41a88c);
186 nvkm_mask(device, 0x408a14, mask, data);
187 nvkm_rd32(device, 0x408a14);
188}
189
190const struct gf100_grctx_func
191gv100_grctx = {
192 .unkn88c = gv100_grctx_unkn88c,
193 .main = gf100_grctx_generate_main,
194 .unkn = gv100_grctx_generate_unkn,
195 .sw_veid_bundle_init = gv100_grctx_pack_sw_veid_bundle_init,
196 .bundle = gm107_grctx_generate_bundle,
197 .bundle_size = 0x3000,
198 .bundle_min_gpm_fifo_depth = 0x180,
199 .bundle_token_limit = 0x1680,
200 .pagepool = gp100_grctx_generate_pagepool,
201 .pagepool_size = 0x20000,
202 .attrib = gv100_grctx_generate_attrib,
203 .attrib_nr_max = 0x6c0,
204 .attrib_nr = 0x480,
205 .alpha_nr_max = 0xc00,
206 .alpha_nr = 0x800,
207 .gfxp_nr = 0xd10,
208 .sm_id = gv100_grctx_generate_sm_id,
209 .rop_mapping = gv100_grctx_generate_rop_mapping,
210 .dist_skip_table = gm200_grctx_generate_dist_skip_table,
211 .r406500 = gm200_grctx_generate_r406500,
212 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
213 .smid_config = gp100_grctx_generate_smid_config,
214 .r400088 = gv100_grctx_generate_r400088,
215};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 2f8dc107047d..70d3d41e616c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -32,6 +32,7 @@
32#include <subdev/fb.h> 32#include <subdev/fb.h>
33#include <subdev/mc.h> 33#include <subdev/mc.h>
34#include <subdev/pmu.h> 34#include <subdev/pmu.h>
35#include <subdev/therm.h>
35#include <subdev/timer.h> 36#include <subdev/timer.h>
36#include <engine/fifo.h> 37#include <engine/fifo.h>
37 38
@@ -91,7 +92,7 @@ gf100_gr_zbc_color_get(struct gf100_gr *gr, int format,
91 memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2)); 92 memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2));
92 gr->zbc_color[zbc].format = format; 93 gr->zbc_color[zbc].format = format;
93 nvkm_ltc_zbc_color_get(ltc, zbc, l2); 94 nvkm_ltc_zbc_color_get(ltc, zbc, l2);
94 gf100_gr_zbc_clear_color(gr, zbc); 95 gr->func->zbc->clear_color(gr, zbc);
95 return zbc; 96 return zbc;
96} 97}
97 98
@@ -136,10 +137,16 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
136 gr->zbc_depth[zbc].ds = ds; 137 gr->zbc_depth[zbc].ds = ds;
137 gr->zbc_depth[zbc].l2 = l2; 138 gr->zbc_depth[zbc].l2 = l2;
138 nvkm_ltc_zbc_depth_get(ltc, zbc, l2); 139 nvkm_ltc_zbc_depth_get(ltc, zbc, l2);
139 gf100_gr_zbc_clear_depth(gr, zbc); 140 gr->func->zbc->clear_depth(gr, zbc);
140 return zbc; 141 return zbc;
141} 142}
142 143
144const struct gf100_gr_func_zbc
145gf100_gr_zbc = {
146 .clear_color = gf100_gr_zbc_clear_color,
147 .clear_depth = gf100_gr_zbc_clear_depth,
148};
149
143/******************************************************************************* 150/*******************************************************************************
144 * Graphics object classes 151 * Graphics object classes
145 ******************************************************************************/ 152 ******************************************************************************/
@@ -743,21 +750,31 @@ gf100_gr_zbc_init(struct gf100_gr *gr)
743 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 750 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
744 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; 751 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
745 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 752 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
746 int index; 753 int index, c = ltc->zbc_min, d = ltc->zbc_min, s = ltc->zbc_min;
747 754
748 if (!gr->zbc_color[0].format) { 755 if (!gr->zbc_color[0].format) {
749 gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); 756 gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); c++;
750 gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); 757 gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); c++;
751 gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); 758 gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); c++;
752 gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); 759 gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); c++;
753 gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); 760 gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); d++;
754 gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); 761 gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); d++;
755 } 762 if (gr->func->zbc->stencil_get) {
756 763 gr->func->zbc->stencil_get(gr, 1, 0x00, 0x00); s++;
757 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 764 gr->func->zbc->stencil_get(gr, 1, 0x01, 0x01); s++;
758 gf100_gr_zbc_clear_color(gr, index); 765 gr->func->zbc->stencil_get(gr, 1, 0xff, 0xff); s++;
759 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 766 }
760 gf100_gr_zbc_clear_depth(gr, index); 767 }
768
769 for (index = c; index <= ltc->zbc_max; index++)
770 gr->func->zbc->clear_color(gr, index);
771 for (index = d; index <= ltc->zbc_max; index++)
772 gr->func->zbc->clear_depth(gr, index);
773
774 if (gr->func->zbc->clear_stencil) {
775 for (index = s; index <= ltc->zbc_max; index++)
776 gr->func->zbc->clear_stencil(gr, index);
777 }
761} 778}
762 779
763/** 780/**
@@ -970,7 +987,7 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
970 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 987 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
971} 988}
972 989
973static const struct nvkm_enum gf100_mp_warp_error[] = { 990const struct nvkm_enum gf100_mp_warp_error[] = {
974 { 0x01, "STACK_ERROR" }, 991 { 0x01, "STACK_ERROR" },
975 { 0x02, "API_STACK_ERROR" }, 992 { 0x02, "API_STACK_ERROR" },
976 { 0x03, "RET_EMPTY_STACK_ERROR" }, 993 { 0x03, "RET_EMPTY_STACK_ERROR" },
@@ -995,7 +1012,7 @@ static const struct nvkm_enum gf100_mp_warp_error[] = {
995 {} 1012 {}
996}; 1013};
997 1014
998static const struct nvkm_bitfield gf100_mp_global_error[] = { 1015const struct nvkm_bitfield gf100_mp_global_error[] = {
999 { 0x00000001, "SM_TO_SM_FAULT" }, 1016 { 0x00000001, "SM_TO_SM_FAULT" },
1000 { 0x00000002, "L1_ERROR" }, 1017 { 0x00000002, "L1_ERROR" },
1001 { 0x00000004, "MULTIPLE_WARP_ERRORS" }, 1018 { 0x00000004, "MULTIPLE_WARP_ERRORS" },
@@ -1009,7 +1026,7 @@ static const struct nvkm_bitfield gf100_mp_global_error[] = {
1009 {} 1026 {}
1010}; 1027};
1011 1028
1012static void 1029void
1013gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) 1030gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
1014{ 1031{
1015 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1032 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
@@ -1045,7 +1062,7 @@ gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
1045 } 1062 }
1046 1063
1047 if (stat & 0x00000002) { 1064 if (stat & 0x00000002) {
1048 gf100_gr_trap_mp(gr, gpc, tpc); 1065 gr->func->trap_mp(gr, gpc, tpc);
1049 stat &= ~0x00000002; 1066 stat &= ~0x00000002;
1050 } 1067 }
1051 1068
@@ -1611,7 +1628,8 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
1611 1628
1612 /* load register lists */ 1629 /* load register lists */
1613 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000); 1630 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
1614 gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000); 1631 gf100_gr_init_csdata(gr, grctx->gpc_0, 0x41a000, 0x000, 0x418000);
1632 gf100_gr_init_csdata(gr, grctx->gpc_1, 0x41a000, 0x000, 0x418000);
1615 gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800); 1633 gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
1616 gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00); 1634 gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
1617 1635
@@ -1651,6 +1669,97 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
1651 return ret; 1669 return ret;
1652} 1670}
1653 1671
1672void
1673gf100_gr_oneinit_sm_id(struct gf100_gr *gr)
1674{
1675 int tpc, gpc;
1676 for (tpc = 0; tpc < gr->tpc_max; tpc++) {
1677 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1678 if (tpc < gr->tpc_nr[gpc]) {
1679 gr->sm[gr->sm_nr].gpc = gpc;
1680 gr->sm[gr->sm_nr].tpc = tpc;
1681 gr->sm_nr++;
1682 }
1683 }
1684 }
1685}
1686
1687void
1688gf100_gr_oneinit_tiles(struct gf100_gr *gr)
1689{
1690 static const u8 primes[] = {
1691 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61
1692 };
1693 int init_frac[GPC_MAX], init_err[GPC_MAX], run_err[GPC_MAX], i, j;
1694 u32 mul_factor, comm_denom;
1695 u8 gpc_map[GPC_MAX];
1696 bool sorted;
1697
1698 switch (gr->tpc_total) {
1699 case 15: gr->screen_tile_row_offset = 0x06; break;
1700 case 14: gr->screen_tile_row_offset = 0x05; break;
1701 case 13: gr->screen_tile_row_offset = 0x02; break;
1702 case 11: gr->screen_tile_row_offset = 0x07; break;
1703 case 10: gr->screen_tile_row_offset = 0x06; break;
1704 case 7:
1705 case 5: gr->screen_tile_row_offset = 0x01; break;
1706 case 3: gr->screen_tile_row_offset = 0x02; break;
1707 case 2:
1708 case 1: gr->screen_tile_row_offset = 0x01; break;
1709 default: gr->screen_tile_row_offset = 0x03;
1710 for (i = 0; i < ARRAY_SIZE(primes); i++) {
1711 if (gr->tpc_total % primes[i]) {
1712 gr->screen_tile_row_offset = primes[i];
1713 break;
1714 }
1715 }
1716 break;
1717 }
1718
1719 /* Sort GPCs by TPC count, highest-to-lowest. */
1720 for (i = 0; i < gr->gpc_nr; i++)
1721 gpc_map[i] = i;
1722 sorted = false;
1723
1724 while (!sorted) {
1725 for (sorted = true, i = 0; i < gr->gpc_nr - 1; i++) {
1726 if (gr->tpc_nr[gpc_map[i + 1]] >
1727 gr->tpc_nr[gpc_map[i + 0]]) {
1728 u8 swap = gpc_map[i];
1729 gpc_map[i + 0] = gpc_map[i + 1];
1730 gpc_map[i + 1] = swap;
1731 sorted = false;
1732 }
1733 }
1734 }
1735
1736 /* Determine tile->GPC mapping */
1737 mul_factor = gr->gpc_nr * gr->tpc_max;
1738 if (mul_factor & 1)
1739 mul_factor = 2;
1740 else
1741 mul_factor = 1;
1742
1743 comm_denom = gr->gpc_nr * gr->tpc_max * mul_factor;
1744
1745 for (i = 0; i < gr->gpc_nr; i++) {
1746 init_frac[i] = gr->tpc_nr[gpc_map[i]] * gr->gpc_nr * mul_factor;
1747 init_err[i] = i * gr->tpc_max * mul_factor - comm_denom/2;
1748 run_err[i] = init_frac[i] + init_err[i];
1749 }
1750
1751 for (i = 0; i < gr->tpc_total;) {
1752 for (j = 0; j < gr->gpc_nr; j++) {
1753 if ((run_err[j] * 2) >= comm_denom) {
1754 gr->tile[i++] = gpc_map[j];
1755 run_err[j] += init_frac[j] - comm_denom;
1756 } else {
1757 run_err[j] += init_frac[j];
1758 }
1759 }
1760 }
1761}
1762
1654static int 1763static int
1655gf100_gr_oneinit(struct nvkm_gr *base) 1764gf100_gr_oneinit(struct nvkm_gr *base)
1656{ 1765{
@@ -1674,55 +1783,27 @@ gf100_gr_oneinit(struct nvkm_gr *base)
1674 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f; 1783 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
1675 for (i = 0; i < gr->gpc_nr; i++) { 1784 for (i = 0; i < gr->gpc_nr; i++) {
1676 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608)); 1785 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
1786 gr->tpc_max = max(gr->tpc_max, gr->tpc_nr[i]);
1677 gr->tpc_total += gr->tpc_nr[i]; 1787 gr->tpc_total += gr->tpc_nr[i];
1678 gr->ppc_nr[i] = gr->func->ppc_nr; 1788 gr->ppc_nr[i] = gr->func->ppc_nr;
1679 for (j = 0; j < gr->ppc_nr[i]; j++) { 1789 for (j = 0; j < gr->ppc_nr[i]; j++) {
1680 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); 1790 gr->ppc_tpc_mask[i][j] =
1681 if (mask) 1791 nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
1682 gr->ppc_mask[i] |= (1 << j); 1792 if (gr->ppc_tpc_mask[i][j] == 0)
1683 gr->ppc_tpc_nr[i][j] = hweight8(mask); 1793 continue;
1684 } 1794 gr->ppc_mask[i] |= (1 << j);
1685 } 1795 gr->ppc_tpc_nr[i][j] = hweight8(gr->ppc_tpc_mask[i][j]);
1686 1796 if (gr->ppc_tpc_min == 0 ||
1687 /*XXX: these need figuring out... though it might not even matter */ 1797 gr->ppc_tpc_min > gr->ppc_tpc_nr[i][j])
1688 switch (device->chipset) { 1798 gr->ppc_tpc_min = gr->ppc_tpc_nr[i][j];
1689 case 0xc0: 1799 if (gr->ppc_tpc_max < gr->ppc_tpc_nr[i][j])
1690 if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */ 1800 gr->ppc_tpc_max = gr->ppc_tpc_nr[i][j];
1691 gr->screen_tile_row_offset = 0x07;
1692 } else
1693 if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
1694 gr->screen_tile_row_offset = 0x05;
1695 } else
1696 if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
1697 gr->screen_tile_row_offset = 0x06;
1698 } 1801 }
1699 break;
1700 case 0xc3: /* 450, 4/0/0/0, 2 */
1701 gr->screen_tile_row_offset = 0x03;
1702 break;
1703 case 0xc4: /* 460, 3/4/0/0, 4 */
1704 gr->screen_tile_row_offset = 0x01;
1705 break;
1706 case 0xc1: /* 2/0/0/0, 1 */
1707 gr->screen_tile_row_offset = 0x01;
1708 break;
1709 case 0xc8: /* 4/4/3/4, 5 */
1710 gr->screen_tile_row_offset = 0x06;
1711 break;
1712 case 0xce: /* 4/4/0/0, 4 */
1713 gr->screen_tile_row_offset = 0x03;
1714 break;
1715 case 0xcf: /* 4/0/0/0, 3 */
1716 gr->screen_tile_row_offset = 0x03;
1717 break;
1718 case 0xd7:
1719 case 0xd9: /* 1/0/0/0, 1 */
1720 case 0xea: /* gk20a */
1721 case 0x12b: /* gm20b */
1722 gr->screen_tile_row_offset = 0x01;
1723 break;
1724 } 1802 }
1725 1803
1804 memset(gr->tile, 0xff, sizeof(gr->tile));
1805 gr->func->oneinit_tiles(gr);
1806 gr->func->oneinit_sm_id(gr);
1726 return 0; 1807 return 0;
1727} 1808}
1728 1809
@@ -1914,13 +1995,68 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
1914} 1995}
1915 1996
1916void 1997void
1998gf100_gr_init_400054(struct gf100_gr *gr)
1999{
2000 nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464);
2001}
2002
2003void
2004gf100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
2005{
2006 struct nvkm_device *device = gr->base.engine.subdev.device;
2007 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
2008 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
2009}
2010
2011void
2012gf100_gr_init_tex_hww_esr(struct gf100_gr *gr, int gpc, int tpc)
2013{
2014 struct nvkm_device *device = gr->base.engine.subdev.device;
2015 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
2016}
2017
2018void
2019gf100_gr_init_419eb4(struct gf100_gr *gr)
2020{
2021 struct nvkm_device *device = gr->base.engine.subdev.device;
2022 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
2023}
2024
2025void
2026gf100_gr_init_419cc0(struct gf100_gr *gr)
2027{
2028 struct nvkm_device *device = gr->base.engine.subdev.device;
2029 int gpc, tpc;
2030
2031 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
2032
2033 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
2034 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++)
2035 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
2036 }
2037}
2038
2039void
2040gf100_gr_init_40601c(struct gf100_gr *gr)
2041{
2042 nvkm_wr32(gr->base.engine.subdev.device, 0x40601c, 0xc0000000);
2043}
2044
2045void
2046gf100_gr_init_fecs_exceptions(struct gf100_gr *gr)
2047{
2048 const u32 data = gr->firmware ? 0x000e0000 : 0x000e0001;
2049 nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, data);
2050}
2051
2052void
1917gf100_gr_init_gpc_mmu(struct gf100_gr *gr) 2053gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
1918{ 2054{
1919 struct nvkm_device *device = gr->base.engine.subdev.device; 2055 struct nvkm_device *device = gr->base.engine.subdev.device;
1920 struct nvkm_fb *fb = device->fb; 2056 struct nvkm_fb *fb = device->fb;
1921 2057
1922 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001); 2058 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001);
1923 nvkm_wr32(device, 0x4188a4, 0x00000000); 2059 nvkm_wr32(device, 0x4188a4, 0x03000000);
1924 nvkm_wr32(device, 0x418888, 0x00000000); 2060 nvkm_wr32(device, 0x418888, 0x00000000);
1925 nvkm_wr32(device, 0x41888c, 0x00000000); 2061 nvkm_wr32(device, 0x41888c, 0x00000000);
1926 nvkm_wr32(device, 0x418890, 0x00000000); 2062 nvkm_wr32(device, 0x418890, 0x00000000);
@@ -1929,37 +2065,30 @@ gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
1929 nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8); 2065 nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8);
1930} 2066}
1931 2067
1932int 2068void
1933gf100_gr_init(struct gf100_gr *gr) 2069gf100_gr_init_num_active_ltcs(struct gf100_gr *gr)
1934{ 2070{
1935 struct nvkm_device *device = gr->base.engine.subdev.device; 2071 struct nvkm_device *device = gr->base.engine.subdev.device;
1936 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 2072 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
1937 u32 data[TPC_MAX / 8] = {}; 2073}
1938 u8 tpcnr[GPC_MAX];
1939 int gpc, tpc, rop;
1940 int i;
1941
1942 gr->func->init_gpc_mmu(gr);
1943
1944 gf100_gr_mmio(gr, gr->func->mmio);
1945
1946 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
1947
1948 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1949 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
1950 do {
1951 gpc = (gpc + 1) % gr->gpc_nr;
1952 } while (!tpcnr[gpc]);
1953 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
1954 2074
1955 data[i / 8] |= tpc << ((i % 8) * 4); 2075void
2076gf100_gr_init_zcull(struct gf100_gr *gr)
2077{
2078 struct nvkm_device *device = gr->base.engine.subdev.device;
2079 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
2080 const u8 tile_nr = ALIGN(gr->tpc_total, 32);
2081 u8 bank[GPC_MAX] = {}, gpc, i, j;
2082 u32 data;
2083
2084 for (i = 0; i < tile_nr; i += 8) {
2085 for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) {
2086 data |= bank[gr->tile[i + j]] << (j * 4);
2087 bank[gr->tile[i + j]]++;
2088 }
2089 nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data);
1956 } 2090 }
1957 2091
1958 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
1959 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
1960 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
1961 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
1962
1963 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 2092 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1964 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), 2093 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
1965 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); 2094 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
@@ -1968,29 +2097,88 @@ gf100_gr_init(struct gf100_gr *gr)
1968 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); 2097 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
1969 } 2098 }
1970 2099
1971 if (device->chipset != 0xd7) 2100 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
1972 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918); 2101}
2102
2103void
2104gf100_gr_init_vsc_stream_master(struct gf100_gr *gr)
2105{
2106 struct nvkm_device *device = gr->base.engine.subdev.device;
2107 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
2108}
2109
2110int
2111gf100_gr_init(struct gf100_gr *gr)
2112{
2113 struct nvkm_device *device = gr->base.engine.subdev.device;
2114 int gpc, tpc, rop;
2115
2116 if (gr->func->init_419bd8)
2117 gr->func->init_419bd8(gr);
2118
2119 gr->func->init_gpc_mmu(gr);
2120
2121 if (gr->fuc_sw_nonctx)
2122 gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
1973 else 2123 else
1974 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); 2124 gf100_gr_mmio(gr, gr->func->mmio);
1975 2125
1976 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); 2126 gf100_gr_wait_idle(gr);
2127
2128 if (gr->func->init_r405a14)
2129 gr->func->init_r405a14(gr);
2130
2131 if (gr->func->clkgate_pack)
2132 nvkm_therm_clkgate_init(device->therm, gr->func->clkgate_pack);
2133
2134 if (gr->func->init_bios)
2135 gr->func->init_bios(gr);
2136
2137 gr->func->init_vsc_stream_master(gr);
2138 gr->func->init_zcull(gr);
2139 gr->func->init_num_active_ltcs(gr);
2140 if (gr->func->init_rop_active_fbps)
2141 gr->func->init_rop_active_fbps(gr);
2142 if (gr->func->init_bios_2)
2143 gr->func->init_bios_2(gr);
2144 if (gr->func->init_swdx_pes_mask)
2145 gr->func->init_swdx_pes_mask(gr);
1977 2146
1978 nvkm_wr32(device, 0x400500, 0x00010001); 2147 nvkm_wr32(device, 0x400500, 0x00010001);
1979 2148
1980 nvkm_wr32(device, 0x400100, 0xffffffff); 2149 nvkm_wr32(device, 0x400100, 0xffffffff);
1981 nvkm_wr32(device, 0x40013c, 0xffffffff); 2150 nvkm_wr32(device, 0x40013c, 0xffffffff);
2151 nvkm_wr32(device, 0x400124, 0x00000002);
2152
2153 gr->func->init_fecs_exceptions(gr);
2154 if (gr->func->init_ds_hww_esr_2)
2155 gr->func->init_ds_hww_esr_2(gr);
1982 2156
1983 nvkm_wr32(device, 0x409c24, 0x000f0000);
1984 nvkm_wr32(device, 0x404000, 0xc0000000); 2157 nvkm_wr32(device, 0x404000, 0xc0000000);
1985 nvkm_wr32(device, 0x404600, 0xc0000000); 2158 nvkm_wr32(device, 0x404600, 0xc0000000);
1986 nvkm_wr32(device, 0x408030, 0xc0000000); 2159 nvkm_wr32(device, 0x408030, 0xc0000000);
1987 nvkm_wr32(device, 0x40601c, 0xc0000000); 2160
2161 if (gr->func->init_40601c)
2162 gr->func->init_40601c(gr);
2163
1988 nvkm_wr32(device, 0x404490, 0xc0000000); 2164 nvkm_wr32(device, 0x404490, 0xc0000000);
1989 nvkm_wr32(device, 0x406018, 0xc0000000); 2165 nvkm_wr32(device, 0x406018, 0xc0000000);
2166
2167 if (gr->func->init_sked_hww_esr)
2168 gr->func->init_sked_hww_esr(gr);
2169
1990 nvkm_wr32(device, 0x405840, 0xc0000000); 2170 nvkm_wr32(device, 0x405840, 0xc0000000);
1991 nvkm_wr32(device, 0x405844, 0x00ffffff); 2171 nvkm_wr32(device, 0x405844, 0x00ffffff);
1992 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); 2172
1993 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); 2173 if (gr->func->init_419cc0)
2174 gr->func->init_419cc0(gr);
2175 if (gr->func->init_419eb4)
2176 gr->func->init_419eb4(gr);
2177 if (gr->func->init_419c9c)
2178 gr->func->init_419c9c(gr);
2179
2180 if (gr->func->init_ppc_exceptions)
2181 gr->func->init_ppc_exceptions(gr);
1994 2182
1995 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 2183 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
1996 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 2184 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
@@ -2000,19 +2188,20 @@ gf100_gr_init(struct gf100_gr *gr)
2000 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 2188 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
2001 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 2189 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
2002 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 2190 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
2003 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 2191 if (gr->func->init_tex_hww_esr)
2004 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 2192 gr->func->init_tex_hww_esr(gr, gpc, tpc);
2005 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 2193 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
2006 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); 2194 if (gr->func->init_504430)
2007 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); 2195 gr->func->init_504430(gr, gpc, tpc);
2196 gr->func->init_shader_exceptions(gr, gpc, tpc);
2008 } 2197 }
2009 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 2198 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
2010 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 2199 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
2011 } 2200 }
2012 2201
2013 for (rop = 0; rop < gr->rop_nr; rop++) { 2202 for (rop = 0; rop < gr->rop_nr; rop++) {
2014 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); 2203 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
2015 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); 2204 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
2016 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); 2205 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
2017 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); 2206 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
2018 } 2207 }
@@ -2024,10 +2213,14 @@ gf100_gr_init(struct gf100_gr *gr)
2024 nvkm_wr32(device, 0x40011c, 0xffffffff); 2213 nvkm_wr32(device, 0x40011c, 0xffffffff);
2025 nvkm_wr32(device, 0x400134, 0xffffffff); 2214 nvkm_wr32(device, 0x400134, 0xffffffff);
2026 2215
2027 nvkm_wr32(device, 0x400054, 0x34ce3464); 2216 if (gr->func->init_400054)
2217 gr->func->init_400054(gr);
2028 2218
2029 gf100_gr_zbc_init(gr); 2219 gf100_gr_zbc_init(gr);
2030 2220
2221 if (gr->func->init_4188a4)
2222 gr->func->init_4188a4(gr);
2223
2031 return gf100_gr_init_ctxctl(gr); 2224 return gf100_gr_init_ctxctl(gr);
2032} 2225}
2033 2226
@@ -2053,13 +2246,27 @@ gf100_gr_gpccs_ucode = {
2053 2246
2054static const struct gf100_gr_func 2247static const struct gf100_gr_func
2055gf100_gr = { 2248gf100_gr = {
2249 .oneinit_tiles = gf100_gr_oneinit_tiles,
2250 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
2056 .init = gf100_gr_init, 2251 .init = gf100_gr_init,
2057 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 2252 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
2253 .init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
2254 .init_zcull = gf100_gr_init_zcull,
2255 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
2256 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
2257 .init_40601c = gf100_gr_init_40601c,
2258 .init_419cc0 = gf100_gr_init_419cc0,
2259 .init_419eb4 = gf100_gr_init_419eb4,
2260 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
2261 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
2262 .init_400054 = gf100_gr_init_400054,
2263 .trap_mp = gf100_gr_trap_mp,
2058 .mmio = gf100_gr_pack_mmio, 2264 .mmio = gf100_gr_pack_mmio,
2059 .fecs.ucode = &gf100_gr_fecs_ucode, 2265 .fecs.ucode = &gf100_gr_fecs_ucode,
2060 .gpccs.ucode = &gf100_gr_gpccs_ucode, 2266 .gpccs.ucode = &gf100_gr_gpccs_ucode,
2061 .rops = gf100_gr_rops, 2267 .rops = gf100_gr_rops,
2062 .grctx = &gf100_grctx, 2268 .grctx = &gf100_grctx,
2269 .zbc = &gf100_gr_zbc,
2063 .sclass = { 2270 .sclass = {
2064 { -1, -1, FERMI_TWOD_A }, 2271 { -1, -1, FERMI_TWOD_A },
2065 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 2272 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c8ec3fd97155..dc46cf0131db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -72,6 +72,12 @@ struct gf100_gr_zbc_depth {
72 u32 l2; 72 u32 l2;
73}; 73};
74 74
75struct gf100_gr_zbc_stencil {
76 u32 format;
77 u32 ds;
78 u32 l2;
79};
80
75struct gf100_gr { 81struct gf100_gr {
76 const struct gf100_gr_func *func; 82 const struct gf100_gr_func *func;
77 struct nvkm_gr base; 83 struct nvkm_gr base;
@@ -95,21 +101,33 @@ struct gf100_gr {
95 101
96 struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT]; 102 struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
97 struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT]; 103 struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
104 struct gf100_gr_zbc_stencil zbc_stencil[NVKM_LTC_MAX_ZBC_CNT];
98 105
99 u8 rop_nr; 106 u8 rop_nr;
100 u8 gpc_nr; 107 u8 gpc_nr;
101 u8 tpc_nr[GPC_MAX]; 108 u8 tpc_nr[GPC_MAX];
109 u8 tpc_max;
102 u8 tpc_total; 110 u8 tpc_total;
103 u8 ppc_nr[GPC_MAX]; 111 u8 ppc_nr[GPC_MAX];
104 u8 ppc_mask[GPC_MAX]; 112 u8 ppc_mask[GPC_MAX];
113 u8 ppc_tpc_mask[GPC_MAX][4];
105 u8 ppc_tpc_nr[GPC_MAX][4]; 114 u8 ppc_tpc_nr[GPC_MAX][4];
115 u8 ppc_tpc_min;
116 u8 ppc_tpc_max;
117
118 u8 screen_tile_row_offset;
119 u8 tile[TPC_MAX];
120
121 struct {
122 u8 gpc;
123 u8 tpc;
124 } sm[TPC_MAX];
125 u8 sm_nr;
106 126
107 struct gf100_gr_data mmio_data[4]; 127 struct gf100_gr_data mmio_data[4];
108 struct gf100_gr_mmio mmio_list[4096/8]; 128 struct gf100_gr_mmio mmio_list[4096/8];
109 u32 size; 129 u32 size;
110 u32 *data; 130 u32 *data;
111
112 u8 screen_tile_row_offset;
113}; 131};
114 132
115int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *, 133int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
@@ -118,14 +136,43 @@ int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
118 int, struct nvkm_gr **); 136 int, struct nvkm_gr **);
119void *gf100_gr_dtor(struct nvkm_gr *); 137void *gf100_gr_dtor(struct nvkm_gr *);
120 138
139struct gf100_gr_func_zbc {
140 void (*clear_color)(struct gf100_gr *, int zbc);
141 void (*clear_depth)(struct gf100_gr *, int zbc);
142 int (*stencil_get)(struct gf100_gr *, int format,
143 const u32 ds, const u32 l2);
144 void (*clear_stencil)(struct gf100_gr *, int zbc);
145};
146
121struct gf100_gr_func { 147struct gf100_gr_func {
122 void (*dtor)(struct gf100_gr *); 148 void (*dtor)(struct gf100_gr *);
149 void (*oneinit_tiles)(struct gf100_gr *);
150 void (*oneinit_sm_id)(struct gf100_gr *);
123 int (*init)(struct gf100_gr *); 151 int (*init)(struct gf100_gr *);
152 void (*init_419bd8)(struct gf100_gr *);
124 void (*init_gpc_mmu)(struct gf100_gr *); 153 void (*init_gpc_mmu)(struct gf100_gr *);
154 void (*init_r405a14)(struct gf100_gr *);
155 void (*init_bios)(struct gf100_gr *);
156 void (*init_vsc_stream_master)(struct gf100_gr *);
157 void (*init_zcull)(struct gf100_gr *);
158 void (*init_num_active_ltcs)(struct gf100_gr *);
125 void (*init_rop_active_fbps)(struct gf100_gr *); 159 void (*init_rop_active_fbps)(struct gf100_gr *);
126 void (*init_ppc_exceptions)(struct gf100_gr *); 160 void (*init_bios_2)(struct gf100_gr *);
127 void (*init_swdx_pes_mask)(struct gf100_gr *); 161 void (*init_swdx_pes_mask)(struct gf100_gr *);
128 void (*init_num_active_ltcs)(struct gf100_gr *); 162 void (*init_fecs_exceptions)(struct gf100_gr *);
163 void (*init_ds_hww_esr_2)(struct gf100_gr *);
164 void (*init_40601c)(struct gf100_gr *);
165 void (*init_sked_hww_esr)(struct gf100_gr *);
166 void (*init_419cc0)(struct gf100_gr *);
167 void (*init_419eb4)(struct gf100_gr *);
168 void (*init_419c9c)(struct gf100_gr *);
169 void (*init_ppc_exceptions)(struct gf100_gr *);
170 void (*init_tex_hww_esr)(struct gf100_gr *, int gpc, int tpc);
171 void (*init_504430)(struct gf100_gr *, int gpc, int tpc);
172 void (*init_shader_exceptions)(struct gf100_gr *, int gpc, int tpc);
173 void (*init_400054)(struct gf100_gr *);
174 void (*init_4188a4)(struct gf100_gr *);
175 void (*trap_mp)(struct gf100_gr *, int gpc, int tpc);
129 void (*set_hww_esr_report_mask)(struct gf100_gr *); 176 void (*set_hww_esr_report_mask)(struct gf100_gr *);
130 const struct gf100_gr_pack *mmio; 177 const struct gf100_gr_pack *mmio;
131 struct { 178 struct {
@@ -135,26 +182,60 @@ struct gf100_gr_func {
135 struct gf100_gr_ucode *ucode; 182 struct gf100_gr_ucode *ucode;
136 } gpccs; 183 } gpccs;
137 int (*rops)(struct gf100_gr *); 184 int (*rops)(struct gf100_gr *);
185 int gpc_nr;
186 int tpc_nr;
138 int ppc_nr; 187 int ppc_nr;
139 const struct gf100_grctx_func *grctx; 188 const struct gf100_grctx_func *grctx;
140 const struct nvkm_therm_clkgate_pack *clkgate_pack; 189 const struct nvkm_therm_clkgate_pack *clkgate_pack;
190 const struct gf100_gr_func_zbc *zbc;
141 struct nvkm_sclass sclass[]; 191 struct nvkm_sclass sclass[];
142}; 192};
143 193
144int gf100_gr_init(struct gf100_gr *);
145int gf100_gr_rops(struct gf100_gr *); 194int gf100_gr_rops(struct gf100_gr *);
146 195void gf100_gr_oneinit_tiles(struct gf100_gr *);
147int gk104_gr_init(struct gf100_gr *); 196void gf100_gr_oneinit_sm_id(struct gf100_gr *);
197int gf100_gr_init(struct gf100_gr *);
198void gf100_gr_init_vsc_stream_master(struct gf100_gr *);
199void gf100_gr_init_zcull(struct gf100_gr *);
200void gf100_gr_init_num_active_ltcs(struct gf100_gr *);
201void gf100_gr_init_fecs_exceptions(struct gf100_gr *);
202void gf100_gr_init_40601c(struct gf100_gr *);
203void gf100_gr_init_419cc0(struct gf100_gr *);
204void gf100_gr_init_419eb4(struct gf100_gr *);
205void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
206void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
207void gf100_gr_init_400054(struct gf100_gr *);
208extern const struct gf100_gr_func_zbc gf100_gr_zbc;
209
210void gf117_gr_init_zcull(struct gf100_gr *);
211
212void gk104_gr_init_vsc_stream_master(struct gf100_gr *);
148void gk104_gr_init_rop_active_fbps(struct gf100_gr *); 213void gk104_gr_init_rop_active_fbps(struct gf100_gr *);
149void gk104_gr_init_ppc_exceptions(struct gf100_gr *); 214void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
215void gk104_gr_init_sked_hww_esr(struct gf100_gr *);
216
217void gk110_gr_init_419eb4(struct gf100_gr *);
218
219void gm107_gr_init_504430(struct gf100_gr *, int, int);
220void gm107_gr_init_shader_exceptions(struct gf100_gr *, int, int);
221void gm107_gr_init_400054(struct gf100_gr *);
150 222
151int gk20a_gr_init(struct gf100_gr *); 223int gk20a_gr_init(struct gf100_gr *);
152 224
153int gm200_gr_init(struct gf100_gr *); 225void gm200_gr_oneinit_tiles(struct gf100_gr *);
226void gm200_gr_oneinit_sm_id(struct gf100_gr *);
154int gm200_gr_rops(struct gf100_gr *); 227int gm200_gr_rops(struct gf100_gr *);
228void gm200_gr_init_num_active_ltcs(struct gf100_gr *);
229void gm200_gr_init_ds_hww_esr_2(struct gf100_gr *);
155 230
156int gp100_gr_init(struct gf100_gr *);
157void gp100_gr_init_rop_active_fbps(struct gf100_gr *); 231void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
232void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
233void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
234void gp100_gr_zbc_clear_color(struct gf100_gr *, int);
235void gp100_gr_zbc_clear_depth(struct gf100_gr *, int);
236
237void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
238extern const struct gf100_gr_func_zbc gp102_gr_zbc;
158 239
159#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object) 240#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
160#include <core/object.h> 241#include <core/object.h>
@@ -187,7 +268,7 @@ extern const struct nvkm_object_func gf100_fermi;
187struct gf100_gr_init { 268struct gf100_gr_init {
188 u32 addr; 269 u32 addr;
189 u8 count; 270 u8 count;
190 u8 pitch; 271 u32 pitch;
191 u32 data; 272 u32 data;
192}; 273};
193 274
@@ -257,6 +338,9 @@ extern const struct gf100_gr_init gf100_gr_init_be_0[];
257extern const struct gf100_gr_init gf100_gr_init_fe_1[]; 338extern const struct gf100_gr_init gf100_gr_init_fe_1[];
258extern const struct gf100_gr_init gf100_gr_init_pe_1[]; 339extern const struct gf100_gr_init gf100_gr_init_pe_1[];
259void gf100_gr_init_gpc_mmu(struct gf100_gr *); 340void gf100_gr_init_gpc_mmu(struct gf100_gr *);
341void gf100_gr_trap_mp(struct gf100_gr *, int, int);
342extern const struct nvkm_bitfield gf100_mp_global_error[];
343extern const struct nvkm_enum gf100_mp_warp_error[];
260 344
261extern const struct gf100_gr_init gf104_gr_init_ds_0[]; 345extern const struct gf100_gr_init gf104_gr_init_ds_0[];
262extern const struct gf100_gr_init gf104_gr_init_tex_0[]; 346extern const struct gf100_gr_init gf104_gr_init_tex_0[];
@@ -279,6 +363,7 @@ extern const struct gf100_gr_init gf117_gr_init_wwdx_0[];
279extern const struct gf100_gr_init gf117_gr_init_cbm_0[]; 363extern const struct gf100_gr_init gf117_gr_init_cbm_0[];
280 364
281extern const struct gf100_gr_init gk104_gr_init_main_0[]; 365extern const struct gf100_gr_init gk104_gr_init_main_0[];
366extern const struct gf100_gr_init gk104_gr_init_gpc_unk_2[];
282extern const struct gf100_gr_init gk104_gr_init_tpccs_0[]; 367extern const struct gf100_gr_init gk104_gr_init_tpccs_0[];
283extern const struct gf100_gr_init gk104_gr_init_pe_0[]; 368extern const struct gf100_gr_init gk104_gr_init_pe_0[];
284extern const struct gf100_gr_init gk104_gr_init_be_0[]; 369extern const struct gf100_gr_init gk104_gr_init_be_0[];
@@ -306,8 +391,4 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
306void gm107_gr_init_bios(struct gf100_gr *); 391void gm107_gr_init_bios(struct gf100_gr *);
307 392
308void gm200_gr_init_gpc_mmu(struct gf100_gr *); 393void gm200_gr_init_gpc_mmu(struct gf100_gr *);
309
310void gp100_gr_init_num_active_ltcs(struct gf100_gr *gr);
311
312void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
313#endif 394#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index ec0f11983b23..42c2fd9fc04e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -114,13 +114,27 @@ gf104_gr_pack_mmio[] = {
114 114
115static const struct gf100_gr_func 115static const struct gf100_gr_func
116gf104_gr = { 116gf104_gr = {
117 .oneinit_tiles = gf100_gr_oneinit_tiles,
118 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
117 .init = gf100_gr_init, 119 .init = gf100_gr_init,
118 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 120 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
121 .init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
122 .init_zcull = gf100_gr_init_zcull,
123 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
124 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
125 .init_40601c = gf100_gr_init_40601c,
126 .init_419cc0 = gf100_gr_init_419cc0,
127 .init_419eb4 = gf100_gr_init_419eb4,
128 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
129 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
130 .init_400054 = gf100_gr_init_400054,
131 .trap_mp = gf100_gr_trap_mp,
119 .mmio = gf104_gr_pack_mmio, 132 .mmio = gf104_gr_pack_mmio,
120 .fecs.ucode = &gf100_gr_fecs_ucode, 133 .fecs.ucode = &gf100_gr_fecs_ucode,
121 .gpccs.ucode = &gf100_gr_gpccs_ucode, 134 .gpccs.ucode = &gf100_gr_gpccs_ucode,
122 .rops = gf100_gr_rops, 135 .rops = gf100_gr_rops,
123 .grctx = &gf104_grctx, 136 .grctx = &gf104_grctx,
137 .zbc = &gf100_gr_zbc,
124 .sclass = { 138 .sclass = {
125 { -1, -1, FERMI_TWOD_A }, 139 { -1, -1, FERMI_TWOD_A },
126 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 140 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index cc152eb74123..4731a460adc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -103,15 +103,36 @@ gf108_gr_pack_mmio[] = {
103 * PGRAPH engine/subdev functions 103 * PGRAPH engine/subdev functions
104 ******************************************************************************/ 104 ******************************************************************************/
105 105
106static void
107gf108_gr_init_r405a14(struct gf100_gr *gr)
108{
109 nvkm_wr32(gr->base.engine.subdev.device, 0x405a14, 0x80000000);
110}
111
106static const struct gf100_gr_func 112static const struct gf100_gr_func
107gf108_gr = { 113gf108_gr = {
114 .oneinit_tiles = gf100_gr_oneinit_tiles,
115 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
108 .init = gf100_gr_init, 116 .init = gf100_gr_init,
109 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 117 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
118 .init_r405a14 = gf108_gr_init_r405a14,
119 .init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
120 .init_zcull = gf100_gr_init_zcull,
121 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
122 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
123 .init_40601c = gf100_gr_init_40601c,
124 .init_419cc0 = gf100_gr_init_419cc0,
125 .init_419eb4 = gf100_gr_init_419eb4,
126 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
127 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
128 .init_400054 = gf100_gr_init_400054,
129 .trap_mp = gf100_gr_trap_mp,
110 .mmio = gf108_gr_pack_mmio, 130 .mmio = gf108_gr_pack_mmio,
111 .fecs.ucode = &gf100_gr_fecs_ucode, 131 .fecs.ucode = &gf100_gr_fecs_ucode,
112 .gpccs.ucode = &gf100_gr_gpccs_ucode, 132 .gpccs.ucode = &gf100_gr_gpccs_ucode,
113 .rops = gf100_gr_rops, 133 .rops = gf100_gr_rops,
114 .grctx = &gf108_grctx, 134 .grctx = &gf108_grctx,
135 .zbc = &gf100_gr_zbc,
115 .sclass = { 136 .sclass = {
116 { -1, -1, FERMI_TWOD_A }, 137 { -1, -1, FERMI_TWOD_A },
117 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 138 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 10d2d73ca8c3..cdf759c8cd7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -86,13 +86,27 @@ gf110_gr_pack_mmio[] = {
86 86
87static const struct gf100_gr_func 87static const struct gf100_gr_func
88gf110_gr = { 88gf110_gr = {
89 .oneinit_tiles = gf100_gr_oneinit_tiles,
90 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
89 .init = gf100_gr_init, 91 .init = gf100_gr_init,
90 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 92 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
93 .init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
94 .init_zcull = gf100_gr_init_zcull,
95 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
96 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
97 .init_40601c = gf100_gr_init_40601c,
98 .init_419cc0 = gf100_gr_init_419cc0,
99 .init_419eb4 = gf100_gr_init_419eb4,
100 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
101 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
102 .init_400054 = gf100_gr_init_400054,
103 .trap_mp = gf100_gr_trap_mp,
91 .mmio = gf110_gr_pack_mmio, 104 .mmio = gf110_gr_pack_mmio,
92 .fecs.ucode = &gf100_gr_fecs_ucode, 105 .fecs.ucode = &gf100_gr_fecs_ucode,
93 .gpccs.ucode = &gf100_gr_gpccs_ucode, 106 .gpccs.ucode = &gf100_gr_gpccs_ucode,
94 .rops = gf100_gr_rops, 107 .rops = gf100_gr_rops,
95 .grctx = &gf110_grctx, 108 .grctx = &gf110_grctx,
109 .zbc = &gf100_gr_zbc,
96 .sclass = { 110 .sclass = {
97 { -1, -1, FERMI_TWOD_A }, 111 { -1, -1, FERMI_TWOD_A },
98 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 112 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index ac09a07c4150..a4158f84c649 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -120,16 +120,58 @@ gf117_gr_gpccs_ucode = {
120 .data.size = sizeof(gf117_grgpc_data), 120 .data.size = sizeof(gf117_grgpc_data),
121}; 121};
122 122
123void
124gf117_gr_init_zcull(struct gf100_gr *gr)
125{
126 struct nvkm_device *device = gr->base.engine.subdev.device;
127 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
128 const u8 tile_nr = ALIGN(gr->tpc_total, 32);
129 u8 bank[GPC_MAX] = {}, gpc, i, j;
130 u32 data;
131
132 for (i = 0; i < tile_nr; i += 8) {
133 for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) {
134 data |= bank[gr->tile[i + j]] << (j * 4);
135 bank[gr->tile[i + j]]++;
136 }
137 nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data);
138 }
139
140 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
141 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
142 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
143 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
144 gr->tpc_total);
145 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
146 }
147
148 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
149}
150
123static const struct gf100_gr_func 151static const struct gf100_gr_func
124gf117_gr = { 152gf117_gr = {
153 .oneinit_tiles = gf100_gr_oneinit_tiles,
154 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
125 .init = gf100_gr_init, 155 .init = gf100_gr_init,
126 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 156 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
157 .init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
158 .init_zcull = gf117_gr_init_zcull,
159 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
160 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
161 .init_40601c = gf100_gr_init_40601c,
162 .init_419cc0 = gf100_gr_init_419cc0,
163 .init_419eb4 = gf100_gr_init_419eb4,
164 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
165 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
166 .init_400054 = gf100_gr_init_400054,
167 .trap_mp = gf100_gr_trap_mp,
127 .mmio = gf117_gr_pack_mmio, 168 .mmio = gf117_gr_pack_mmio,
128 .fecs.ucode = &gf117_gr_fecs_ucode, 169 .fecs.ucode = &gf117_gr_fecs_ucode,
129 .gpccs.ucode = &gf117_gr_gpccs_ucode, 170 .gpccs.ucode = &gf117_gr_gpccs_ucode,
130 .rops = gf100_gr_rops, 171 .rops = gf100_gr_rops,
131 .ppc_nr = 1, 172 .ppc_nr = 1,
132 .grctx = &gf117_grctx, 173 .grctx = &gf117_grctx,
174 .zbc = &gf100_gr_zbc,
133 .sclass = { 175 .sclass = {
134 { -1, -1, FERMI_TWOD_A }, 176 { -1, -1, FERMI_TWOD_A },
135 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 177 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 7f449ec6f760..4197844870b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -177,13 +177,27 @@ gf119_gr_pack_mmio[] = {
177 177
178static const struct gf100_gr_func 178static const struct gf100_gr_func
179gf119_gr = { 179gf119_gr = {
180 .oneinit_tiles = gf100_gr_oneinit_tiles,
181 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
180 .init = gf100_gr_init, 182 .init = gf100_gr_init,
181 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 183 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
184 .init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
185 .init_zcull = gf100_gr_init_zcull,
186 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
187 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
188 .init_40601c = gf100_gr_init_40601c,
189 .init_419cc0 = gf100_gr_init_419cc0,
190 .init_419eb4 = gf100_gr_init_419eb4,
191 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
192 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
193 .init_400054 = gf100_gr_init_400054,
194 .trap_mp = gf100_gr_trap_mp,
182 .mmio = gf119_gr_pack_mmio, 195 .mmio = gf119_gr_pack_mmio,
183 .fecs.ucode = &gf100_gr_fecs_ucode, 196 .fecs.ucode = &gf100_gr_fecs_ucode,
184 .gpccs.ucode = &gf100_gr_gpccs_ucode, 197 .gpccs.ucode = &gf100_gr_gpccs_ucode,
185 .rops = gf100_gr_rops, 198 .rops = gf100_gr_rops,
186 .grctx = &gf119_grctx, 199 .grctx = &gf119_grctx,
200 .zbc = &gf100_gr_zbc,
187 .sclass = { 201 .sclass = {
188 { -1, -1, FERMI_TWOD_A }, 202 { -1, -1, FERMI_TWOD_A },
189 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 203 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 1b52fcb2c49a..477fee3e3715 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -83,6 +83,12 @@ gk104_gr_init_gpc_unk_1[] = {
83}; 83};
84 84
85const struct gf100_gr_init 85const struct gf100_gr_init
86gk104_gr_init_gpc_unk_2[] = {
87 { 0x418884, 1, 0x04, 0x00000000 },
88 {}
89};
90
91const struct gf100_gr_init
86gk104_gr_init_tpccs_0[] = { 92gk104_gr_init_tpccs_0[] = {
87 { 0x419d0c, 1, 0x04, 0x00000000 }, 93 { 0x419d0c, 1, 0x04, 0x00000000 },
88 { 0x419d10, 1, 0x04, 0x00000014 }, 94 { 0x419d10, 1, 0x04, 0x00000014 },
@@ -160,6 +166,7 @@ gk104_gr_pack_mmio[] = {
160 { gf119_gr_init_gpm_0 }, 166 { gf119_gr_init_gpm_0 },
161 { gk104_gr_init_gpc_unk_1 }, 167 { gk104_gr_init_gpc_unk_1 },
162 { gf100_gr_init_gcc_0 }, 168 { gf100_gr_init_gcc_0 },
169 { gk104_gr_init_gpc_unk_2 },
163 { gk104_gr_init_tpccs_0 }, 170 { gk104_gr_init_tpccs_0 },
164 { gf119_gr_init_tex_0 }, 171 { gf119_gr_init_tex_0 },
165 { gk104_gr_init_pe_0 }, 172 { gk104_gr_init_pe_0 },
@@ -381,6 +388,21 @@ gk104_clkgate_pack[] = {
381 ******************************************************************************/ 388 ******************************************************************************/
382 389
383void 390void
391gk104_gr_init_sked_hww_esr(struct gf100_gr *gr)
392{
393 nvkm_wr32(gr->base.engine.subdev.device, 0x407020, 0x40000000);
394}
395
396static void
397gk104_gr_init_fecs_exceptions(struct gf100_gr *gr)
398{
399 struct nvkm_device *device = gr->base.engine.subdev.device;
400 nvkm_wr32(device, 0x409ffc, 0x00000000);
401 nvkm_wr32(device, 0x409c14, 0x00003e3e);
402 nvkm_wr32(device, 0x409c24, 0x000f0001);
403}
404
405void
384gk104_gr_init_rop_active_fbps(struct gf100_gr *gr) 406gk104_gr_init_rop_active_fbps(struct gf100_gr *gr)
385{ 407{
386 struct nvkm_device *device = gr->base.engine.subdev.device; 408 struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -404,112 +426,11 @@ gk104_gr_init_ppc_exceptions(struct gf100_gr *gr)
404 } 426 }
405} 427}
406 428
407int 429void
408gk104_gr_init(struct gf100_gr *gr) 430gk104_gr_init_vsc_stream_master(struct gf100_gr *gr)
409{ 431{
410 struct nvkm_device *device = gr->base.engine.subdev.device; 432 struct nvkm_device *device = gr->base.engine.subdev.device;
411 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
412 u32 data[TPC_MAX / 8] = {};
413 u8 tpcnr[GPC_MAX];
414 int gpc, tpc, rop;
415 int i;
416
417 gr->func->init_gpc_mmu(gr);
418
419 gf100_gr_mmio(gr, gr->func->mmio);
420 if (gr->func->clkgate_pack)
421 nvkm_therm_clkgate_init(gr->base.engine.subdev.device->therm,
422 gr->func->clkgate_pack);
423
424 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001); 433 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
425
426 memset(data, 0x00, sizeof(data));
427 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
428 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
429 do {
430 gpc = (gpc + 1) % gr->gpc_nr;
431 } while (!tpcnr[gpc]);
432 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
433
434 data[i / 8] |= tpc << ((i % 8) * 4);
435 }
436
437 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
438 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
439 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
440 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
441
442 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
443 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
444 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
445 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
446 gr->tpc_total);
447 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
448 }
449
450 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
451 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
452
453 gr->func->init_rop_active_fbps(gr);
454
455 nvkm_wr32(device, 0x400500, 0x00010001);
456
457 nvkm_wr32(device, 0x400100, 0xffffffff);
458 nvkm_wr32(device, 0x40013c, 0xffffffff);
459
460 nvkm_wr32(device, 0x409ffc, 0x00000000);
461 nvkm_wr32(device, 0x409c14, 0x00003e3e);
462 nvkm_wr32(device, 0x409c24, 0x000f0001);
463 nvkm_wr32(device, 0x404000, 0xc0000000);
464 nvkm_wr32(device, 0x404600, 0xc0000000);
465 nvkm_wr32(device, 0x408030, 0xc0000000);
466 nvkm_wr32(device, 0x404490, 0xc0000000);
467 nvkm_wr32(device, 0x406018, 0xc0000000);
468 nvkm_wr32(device, 0x407020, 0x40000000);
469 nvkm_wr32(device, 0x405840, 0xc0000000);
470 nvkm_wr32(device, 0x405844, 0x00ffffff);
471 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
472 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
473
474 gr->func->init_ppc_exceptions(gr);
475
476 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
477 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
478 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
479 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
480 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
481 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
482 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
483 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
484 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
485 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
486 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
487 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
488 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
489 }
490 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
491 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
492 }
493
494 for (rop = 0; rop < gr->rop_nr; rop++) {
495 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
496 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
497 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
498 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
499 }
500
501 nvkm_wr32(device, 0x400108, 0xffffffff);
502 nvkm_wr32(device, 0x400138, 0xffffffff);
503 nvkm_wr32(device, 0x400118, 0xffffffff);
504 nvkm_wr32(device, 0x400130, 0xffffffff);
505 nvkm_wr32(device, 0x40011c, 0xffffffff);
506 nvkm_wr32(device, 0x400134, 0xffffffff);
507
508 nvkm_wr32(device, 0x400054, 0x34ce3464);
509
510 gf100_gr_zbc_init(gr);
511
512 return gf100_gr_init_ctxctl(gr);
513} 434}
514 435
515#include "fuc/hubgk104.fuc3.h" 436#include "fuc/hubgk104.fuc3.h"
@@ -534,10 +455,23 @@ gk104_gr_gpccs_ucode = {
534 455
535static const struct gf100_gr_func 456static const struct gf100_gr_func
536gk104_gr = { 457gk104_gr = {
537 .init = gk104_gr_init, 458 .oneinit_tiles = gf100_gr_oneinit_tiles,
459 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
460 .init = gf100_gr_init,
538 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 461 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
462 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
463 .init_zcull = gf117_gr_init_zcull,
464 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
539 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 465 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
466 .init_fecs_exceptions = gk104_gr_init_fecs_exceptions,
467 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
468 .init_419cc0 = gf100_gr_init_419cc0,
469 .init_419eb4 = gf100_gr_init_419eb4,
540 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 470 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
471 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
472 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
473 .init_400054 = gf100_gr_init_400054,
474 .trap_mp = gf100_gr_trap_mp,
541 .mmio = gk104_gr_pack_mmio, 475 .mmio = gk104_gr_pack_mmio,
542 .fecs.ucode = &gk104_gr_fecs_ucode, 476 .fecs.ucode = &gk104_gr_fecs_ucode,
543 .gpccs.ucode = &gk104_gr_gpccs_ucode, 477 .gpccs.ucode = &gk104_gr_gpccs_ucode,
@@ -545,6 +479,7 @@ gk104_gr = {
545 .ppc_nr = 1, 479 .ppc_nr = 1,
546 .grctx = &gk104_grctx, 480 .grctx = &gk104_grctx,
547 .clkgate_pack = gk104_clkgate_pack, 481 .clkgate_pack = gk104_clkgate_pack,
482 .zbc = &gf100_gr_zbc,
548 .sclass = { 483 .sclass = {
549 { -1, -1, FERMI_TWOD_A }, 484 { -1, -1, FERMI_TWOD_A },
550 { -1, -1, KEPLER_INLINE_TO_MEMORY_A }, 485 { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 4da916a9fc73..7cd628c84e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -143,6 +143,7 @@ gk110_gr_pack_mmio[] = {
143 { gf119_gr_init_gpm_0 }, 143 { gf119_gr_init_gpm_0 },
144 { gk110_gr_init_gpc_unk_1 }, 144 { gk110_gr_init_gpc_unk_1 },
145 { gf100_gr_init_gcc_0 }, 145 { gf100_gr_init_gcc_0 },
146 { gk104_gr_init_gpc_unk_2 },
146 { gk104_gr_init_tpccs_0 }, 147 { gk104_gr_init_tpccs_0 },
147 { gk110_gr_init_tex_0 }, 148 { gk110_gr_init_tex_0 },
148 { gk104_gr_init_pe_0 }, 149 { gk104_gr_init_pe_0 },
@@ -334,12 +335,39 @@ gk110_gr_gpccs_ucode = {
334 .data.size = sizeof(gk110_grgpc_data), 335 .data.size = sizeof(gk110_grgpc_data),
335}; 336};
336 337
338void
339gk110_gr_init_419eb4(struct gf100_gr *gr)
340{
341 struct nvkm_device *device = gr->base.engine.subdev.device;
342 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
343 nvkm_mask(device, 0x419eb4, 0x00002000, 0x00002000);
344 nvkm_mask(device, 0x419eb4, 0x00004000, 0x00004000);
345 nvkm_mask(device, 0x419eb4, 0x00008000, 0x00008000);
346 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00000000);
347 nvkm_mask(device, 0x419eb4, 0x00002000, 0x00000000);
348 nvkm_mask(device, 0x419eb4, 0x00004000, 0x00000000);
349 nvkm_mask(device, 0x419eb4, 0x00008000, 0x00000000);
350}
351
337static const struct gf100_gr_func 352static const struct gf100_gr_func
338gk110_gr = { 353gk110_gr = {
339 .init = gk104_gr_init, 354 .oneinit_tiles = gf100_gr_oneinit_tiles,
355 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
356 .init = gf100_gr_init,
340 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 357 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
358 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
359 .init_zcull = gf117_gr_init_zcull,
360 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
341 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 361 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
362 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
363 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
364 .init_419cc0 = gf100_gr_init_419cc0,
365 .init_419eb4 = gk110_gr_init_419eb4,
342 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 366 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
367 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
368 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
369 .init_400054 = gf100_gr_init_400054,
370 .trap_mp = gf100_gr_trap_mp,
343 .mmio = gk110_gr_pack_mmio, 371 .mmio = gk110_gr_pack_mmio,
344 .fecs.ucode = &gk110_gr_fecs_ucode, 372 .fecs.ucode = &gk110_gr_fecs_ucode,
345 .gpccs.ucode = &gk110_gr_gpccs_ucode, 373 .gpccs.ucode = &gk110_gr_gpccs_ucode,
@@ -347,6 +375,7 @@ gk110_gr = {
347 .ppc_nr = 2, 375 .ppc_nr = 2,
348 .grctx = &gk110_grctx, 376 .grctx = &gk110_grctx,
349 .clkgate_pack = gk110_clkgate_pack, 377 .clkgate_pack = gk110_clkgate_pack,
378 .zbc = &gf100_gr_zbc,
350 .sclass = { 379 .sclass = {
351 { -1, -1, FERMI_TWOD_A }, 380 { -1, -1, FERMI_TWOD_A },
352 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 381 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 1912c0bfd7ee..a38faa215635 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -82,6 +82,7 @@ gk110b_gr_pack_mmio[] = {
82 { gf119_gr_init_gpm_0 }, 82 { gf119_gr_init_gpm_0 },
83 { gk110_gr_init_gpc_unk_1 }, 83 { gk110_gr_init_gpc_unk_1 },
84 { gf100_gr_init_gcc_0 }, 84 { gf100_gr_init_gcc_0 },
85 { gk104_gr_init_gpc_unk_2 },
85 { gk104_gr_init_tpccs_0 }, 86 { gk104_gr_init_tpccs_0 },
86 { gk110_gr_init_tex_0 }, 87 { gk110_gr_init_tex_0 },
87 { gk104_gr_init_pe_0 }, 88 { gk104_gr_init_pe_0 },
@@ -102,16 +103,30 @@ gk110b_gr_pack_mmio[] = {
102 103
103static const struct gf100_gr_func 104static const struct gf100_gr_func
104gk110b_gr = { 105gk110b_gr = {
105 .init = gk104_gr_init, 106 .oneinit_tiles = gf100_gr_oneinit_tiles,
107 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
108 .init = gf100_gr_init,
106 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 109 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
110 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
111 .init_zcull = gf117_gr_init_zcull,
112 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
107 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 113 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
114 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
115 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
116 .init_419cc0 = gf100_gr_init_419cc0,
117 .init_419eb4 = gk110_gr_init_419eb4,
108 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 118 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
119 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
120 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
121 .init_400054 = gf100_gr_init_400054,
122 .trap_mp = gf100_gr_trap_mp,
109 .mmio = gk110b_gr_pack_mmio, 123 .mmio = gk110b_gr_pack_mmio,
110 .fecs.ucode = &gk110_gr_fecs_ucode, 124 .fecs.ucode = &gk110_gr_fecs_ucode,
111 .gpccs.ucode = &gk110_gr_gpccs_ucode, 125 .gpccs.ucode = &gk110_gr_gpccs_ucode,
112 .rops = gf100_gr_rops, 126 .rops = gf100_gr_rops,
113 .ppc_nr = 2, 127 .ppc_nr = 2,
114 .grctx = &gk110b_grctx, 128 .grctx = &gk110b_grctx,
129 .zbc = &gf100_gr_zbc,
115 .sclass = { 130 .sclass = {
116 { -1, -1, FERMI_TWOD_A }, 131 { -1, -1, FERMI_TWOD_A },
117 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 132 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 1fc258163f25..58456660e603 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -121,6 +121,7 @@ gk208_gr_pack_mmio[] = {
121 { gf119_gr_init_gpm_0 }, 121 { gf119_gr_init_gpm_0 },
122 { gk110_gr_init_gpc_unk_1 }, 122 { gk110_gr_init_gpc_unk_1 },
123 { gf100_gr_init_gcc_0 }, 123 { gf100_gr_init_gcc_0 },
124 { gk104_gr_init_gpc_unk_2 },
124 { gk104_gr_init_tpccs_0 }, 125 { gk104_gr_init_tpccs_0 },
125 { gk208_gr_init_tex_0 }, 126 { gk208_gr_init_tex_0 },
126 { gk104_gr_init_pe_0 }, 127 { gk104_gr_init_pe_0 },
@@ -161,16 +162,29 @@ gk208_gr_gpccs_ucode = {
161 162
162static const struct gf100_gr_func 163static const struct gf100_gr_func
163gk208_gr = { 164gk208_gr = {
164 .init = gk104_gr_init, 165 .oneinit_tiles = gf100_gr_oneinit_tiles,
166 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
167 .init = gf100_gr_init,
165 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 168 .init_gpc_mmu = gf100_gr_init_gpc_mmu,
169 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
170 .init_zcull = gf117_gr_init_zcull,
171 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
166 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 172 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
173 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
174 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
175 .init_419cc0 = gf100_gr_init_419cc0,
167 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 176 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
177 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
178 .init_shader_exceptions = gf100_gr_init_shader_exceptions,
179 .init_400054 = gf100_gr_init_400054,
180 .trap_mp = gf100_gr_trap_mp,
168 .mmio = gk208_gr_pack_mmio, 181 .mmio = gk208_gr_pack_mmio,
169 .fecs.ucode = &gk208_gr_fecs_ucode, 182 .fecs.ucode = &gk208_gr_fecs_ucode,
170 .gpccs.ucode = &gk208_gr_gpccs_ucode, 183 .gpccs.ucode = &gk208_gr_gpccs_ucode,
171 .rops = gf100_gr_rops, 184 .rops = gf100_gr_rops,
172 .ppc_nr = 1, 185 .ppc_nr = 1,
173 .grctx = &gk208_grctx, 186 .grctx = &gk208_grctx,
187 .zbc = &gf100_gr_zbc,
174 .sclass = { 188 .sclass = {
175 { -1, -1, FERMI_TWOD_A }, 189 { -1, -1, FERMI_TWOD_A },
176 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 190 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index de8b806b88fd..500cb08dd608 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -219,11 +219,7 @@ int
219gk20a_gr_init(struct gf100_gr *gr) 219gk20a_gr_init(struct gf100_gr *gr)
220{ 220{
221 struct nvkm_device *device = gr->base.engine.subdev.device; 221 struct nvkm_device *device = gr->base.engine.subdev.device;
222 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 222 int ret;
223 u32 data[TPC_MAX / 8] = {};
224 u8 tpcnr[GPC_MAX];
225 int gpc, tpc;
226 int ret, i;
227 223
228 /* Clear SCC RAM */ 224 /* Clear SCC RAM */
229 nvkm_wr32(device, 0x40802c, 0x1); 225 nvkm_wr32(device, 0x40802c, 0x1);
@@ -246,31 +242,7 @@ gk20a_gr_init(struct gf100_gr *gr)
246 nvkm_mask(device, 0x503018, 0x1, 0x1); 242 nvkm_mask(device, 0x503018, 0x1, 0x1);
247 243
248 /* Zcull init */ 244 /* Zcull init */
249 memset(data, 0x00, sizeof(data)); 245 gr->func->init_zcull(gr);
250 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
251 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
252 do {
253 gpc = (gpc + 1) % gr->gpc_nr;
254 } while (!tpcnr[gpc]);
255 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
256
257 data[i / 8] |= tpc << ((i % 8) * 4);
258 }
259
260 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
261 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
262 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
263 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
264
265 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
266 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
267 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
268 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
269 gr->tpc_total);
270 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
271 }
272
273 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
274 246
275 gr->func->init_rop_active_fbps(gr); 247 gr->func->init_rop_active_fbps(gr);
276 248
@@ -310,12 +282,17 @@ gk20a_gr_init(struct gf100_gr *gr)
310 282
311static const struct gf100_gr_func 283static const struct gf100_gr_func
312gk20a_gr = { 284gk20a_gr = {
285 .oneinit_tiles = gf100_gr_oneinit_tiles,
286 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
313 .init = gk20a_gr_init, 287 .init = gk20a_gr_init,
288 .init_zcull = gf117_gr_init_zcull,
314 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 289 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
290 .trap_mp = gf100_gr_trap_mp,
315 .set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask, 291 .set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
316 .rops = gf100_gr_rops, 292 .rops = gf100_gr_rops,
317 .ppc_nr = 1, 293 .ppc_nr = 1,
318 .grctx = &gk20a_grctx, 294 .grctx = &gk20a_grctx,
295 .zbc = &gf100_gr_zbc,
319 .sclass = { 296 .sclass = {
320 { -1, -1, FERMI_TWOD_A }, 297 { -1, -1, FERMI_TWOD_A },
321 { -1, -1, KEPLER_INLINE_TO_MEMORY_A }, 298 { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 2c67fac576d1..92e31d397207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -25,6 +25,8 @@
25#include "ctxgf100.h" 25#include "ctxgf100.h"
26 26
27#include <subdev/bios.h> 27#include <subdev/bios.h>
28#include <subdev/bios/bit.h>
29#include <subdev/bios/init.h>
28#include <subdev/bios/P0260.h> 30#include <subdev/bios/P0260.h>
29#include <subdev/fb.h> 31#include <subdev/fb.h>
30 32
@@ -36,6 +38,10 @@
36 38
37static const struct gf100_gr_init 39static const struct gf100_gr_init
38gm107_gr_init_main_0[] = { 40gm107_gr_init_main_0[] = {
41 { 0x40880c, 1, 0x04, 0x00000000 },
42 { 0x408910, 1, 0x04, 0x00000000 },
43 { 0x408984, 1, 0x04, 0x00000000 },
44 { 0x41a8a0, 1, 0x04, 0x00000000 },
39 { 0x400080, 1, 0x04, 0x003003c2 }, 45 { 0x400080, 1, 0x04, 0x003003c2 },
40 { 0x400088, 1, 0x04, 0x0001bfe7 }, 46 { 0x400088, 1, 0x04, 0x0001bfe7 },
41 { 0x40008c, 1, 0x04, 0x00060000 }, 47 { 0x40008c, 1, 0x04, 0x00060000 },
@@ -210,14 +216,13 @@ gm107_gr_init_cbm_0[] = {
210static const struct gf100_gr_init 216static const struct gf100_gr_init
211gm107_gr_init_be_0[] = { 217gm107_gr_init_be_0[] = {
212 { 0x408890, 1, 0x04, 0x000000ff }, 218 { 0x408890, 1, 0x04, 0x000000ff },
213 { 0x40880c, 1, 0x04, 0x00000000 },
214 { 0x408850, 1, 0x04, 0x00000004 }, 219 { 0x408850, 1, 0x04, 0x00000004 },
215 { 0x408878, 1, 0x04, 0x00c81603 }, 220 { 0x408878, 1, 0x04, 0x00c81603 },
216 { 0x40887c, 1, 0x04, 0x80543432 }, 221 { 0x40887c, 1, 0x04, 0x80543432 },
217 { 0x408880, 1, 0x04, 0x0010581e }, 222 { 0x408880, 1, 0x04, 0x0010581e },
218 { 0x408884, 1, 0x04, 0x00001205 }, 223 { 0x408884, 1, 0x04, 0x00001205 },
219 { 0x408974, 1, 0x04, 0x000000ff }, 224 { 0x408974, 1, 0x04, 0x000000ff },
220 { 0x408910, 9, 0x04, 0x00000000 }, 225 { 0x408914, 8, 0x04, 0x00000000 },
221 { 0x408950, 1, 0x04, 0x00000000 }, 226 { 0x408950, 1, 0x04, 0x00000000 },
222 { 0x408954, 1, 0x04, 0x0000ffff }, 227 { 0x408954, 1, 0x04, 0x0000ffff },
223 { 0x408958, 1, 0x04, 0x00000034 }, 228 { 0x408958, 1, 0x04, 0x00000034 },
@@ -227,7 +232,6 @@ gm107_gr_init_be_0[] = {
227 { 0x408968, 1, 0x04, 0x02808833 }, 232 { 0x408968, 1, 0x04, 0x02808833 },
228 { 0x40896c, 1, 0x04, 0x01f02438 }, 233 { 0x40896c, 1, 0x04, 0x01f02438 },
229 { 0x408970, 1, 0x04, 0x00012c00 }, 234 { 0x408970, 1, 0x04, 0x00012c00 },
230 { 0x408984, 1, 0x04, 0x00000000 },
231 { 0x408988, 1, 0x04, 0x08040201 }, 235 { 0x408988, 1, 0x04, 0x08040201 },
232 { 0x40898c, 1, 0x04, 0x80402010 }, 236 { 0x40898c, 1, 0x04, 0x80402010 },
233 {} 237 {}
@@ -260,6 +264,7 @@ gm107_gr_pack_mmio[] = {
260 { gf100_gr_init_gpm_0 }, 264 { gf100_gr_init_gpm_0 },
261 { gm107_gr_init_gpc_unk_1 }, 265 { gm107_gr_init_gpc_unk_1 },
262 { gf100_gr_init_gcc_0 }, 266 { gf100_gr_init_gcc_0 },
267 { gk104_gr_init_gpc_unk_2 },
263 { gm107_gr_init_tpccs_0 }, 268 { gm107_gr_init_tpccs_0 },
264 { gm107_gr_init_tex_0 }, 269 { gm107_gr_init_tex_0 },
265 { gm107_gr_init_pe_0 }, 270 { gm107_gr_init_pe_0 },
@@ -280,6 +285,52 @@ gm107_gr_pack_mmio[] = {
280 ******************************************************************************/ 285 ******************************************************************************/
281 286
282void 287void
288gm107_gr_init_400054(struct gf100_gr *gr)
289{
290 nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x2c350f63);
291}
292
293void
294gm107_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
295{
296 struct nvkm_device *device = gr->base.engine.subdev.device;
297 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
298 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
299}
300
301void
302gm107_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
303{
304 struct nvkm_device *device = gr->base.engine.subdev.device;
305 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
306}
307
308static void
309gm107_gr_init_bios_2(struct gf100_gr *gr)
310{
311 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
312 struct nvkm_device *device = subdev->device;
313 struct nvkm_bios *bios = device->bios;
314 struct bit_entry bit_P;
315 if (!bit_entry(bios, 'P', &bit_P) &&
316 bit_P.version == 2 && bit_P.length >= 0x2c) {
317 u32 data = nvbios_rd32(bios, bit_P.offset + 0x28);
318 if (data) {
319 u8 ver = nvbios_rd08(bios, data + 0x00);
320 u8 hdr = nvbios_rd08(bios, data + 0x01);
321 if (ver == 0x20 && hdr >= 8) {
322 data = nvbios_rd32(bios, data + 0x04);
323 if (data) {
324 u32 save = nvkm_rd32(device, 0x619444);
325 nvbios_init(subdev, data);
326 nvkm_wr32(device, 0x619444, save);
327 }
328 }
329 }
330 }
331}
332
333void
283gm107_gr_init_bios(struct gf100_gr *gr) 334gm107_gr_init_bios(struct gf100_gr *gr)
284{ 335{
285 static const struct { 336 static const struct {
@@ -308,115 +359,17 @@ gm107_gr_init_bios(struct gf100_gr *gr)
308 } 359 }
309} 360}
310 361
311static int 362static void
312gm107_gr_init(struct gf100_gr *gr) 363gm107_gr_init_gpc_mmu(struct gf100_gr *gr)
313{ 364{
314 struct nvkm_device *device = gr->base.engine.subdev.device; 365 struct nvkm_device *device = gr->base.engine.subdev.device;
315 struct nvkm_fb *fb = device->fb; 366 struct nvkm_fb *fb = device->fb;
316 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
317 u32 data[TPC_MAX / 8] = {};
318 u8 tpcnr[GPC_MAX];
319 int gpc, tpc, rop;
320 int i;
321 367
322 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000); 368 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
323 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000); 369 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
324 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000); 370 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
325 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8); 371 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
326 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8); 372 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
327
328 gf100_gr_mmio(gr, gr->func->mmio);
329
330 gm107_gr_init_bios(gr);
331
332 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
333
334 memset(data, 0x00, sizeof(data));
335 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
336 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
337 do {
338 gpc = (gpc + 1) % gr->gpc_nr;
339 } while (!tpcnr[gpc]);
340 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
341
342 data[i / 8] |= tpc << ((i % 8) * 4);
343 }
344
345 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
346 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
347 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
348 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
349
350 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
351 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
352 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
353 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
354 gr->tpc_total);
355 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
356 }
357
358 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
359 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
360
361 gr->func->init_rop_active_fbps(gr);
362
363 nvkm_wr32(device, 0x400500, 0x00010001);
364
365 nvkm_wr32(device, 0x400100, 0xffffffff);
366 nvkm_wr32(device, 0x40013c, 0xffffffff);
367 nvkm_wr32(device, 0x400124, 0x00000002);
368 nvkm_wr32(device, 0x409c24, 0x000e0000);
369
370 nvkm_wr32(device, 0x404000, 0xc0000000);
371 nvkm_wr32(device, 0x404600, 0xc0000000);
372 nvkm_wr32(device, 0x408030, 0xc0000000);
373 nvkm_wr32(device, 0x404490, 0xc0000000);
374 nvkm_wr32(device, 0x406018, 0xc0000000);
375 nvkm_wr32(device, 0x407020, 0x40000000);
376 nvkm_wr32(device, 0x405840, 0xc0000000);
377 nvkm_wr32(device, 0x405844, 0x00ffffff);
378 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
379
380 gr->func->init_ppc_exceptions(gr);
381
382 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
383 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
384 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
385 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
386 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
387 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
388 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
389 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
390 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
391 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
392 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
393 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
394 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
395 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
396 }
397 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
398 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
399 }
400
401 for (rop = 0; rop < gr->rop_nr; rop++) {
402 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
403 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
404 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
405 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
406 }
407
408 nvkm_wr32(device, 0x400108, 0xffffffff);
409 nvkm_wr32(device, 0x400138, 0xffffffff);
410 nvkm_wr32(device, 0x400118, 0xffffffff);
411 nvkm_wr32(device, 0x400130, 0xffffffff);
412 nvkm_wr32(device, 0x40011c, 0xffffffff);
413 nvkm_wr32(device, 0x400134, 0xffffffff);
414
415 nvkm_wr32(device, 0x400054, 0x2c350f63);
416
417 gf100_gr_zbc_init(gr);
418
419 return gf100_gr_init_ctxctl(gr);
420} 373}
421 374
422#include "fuc/hubgm107.fuc5.h" 375#include "fuc/hubgm107.fuc5.h"
@@ -441,15 +394,32 @@ gm107_gr_gpccs_ucode = {
441 394
442static const struct gf100_gr_func 395static const struct gf100_gr_func
443gm107_gr = { 396gm107_gr = {
444 .init = gm107_gr_init, 397 .oneinit_tiles = gf100_gr_oneinit_tiles,
398 .oneinit_sm_id = gf100_gr_oneinit_sm_id,
399 .init = gf100_gr_init,
400 .init_gpc_mmu = gm107_gr_init_gpc_mmu,
401 .init_bios = gm107_gr_init_bios,
402 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
403 .init_zcull = gf117_gr_init_zcull,
404 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
445 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 405 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
406 .init_bios_2 = gm107_gr_init_bios_2,
407 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
408 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
409 .init_419cc0 = gf100_gr_init_419cc0,
446 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 410 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
411 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
412 .init_504430 = gm107_gr_init_504430,
413 .init_shader_exceptions = gm107_gr_init_shader_exceptions,
414 .init_400054 = gm107_gr_init_400054,
415 .trap_mp = gf100_gr_trap_mp,
447 .mmio = gm107_gr_pack_mmio, 416 .mmio = gm107_gr_pack_mmio,
448 .fecs.ucode = &gm107_gr_fecs_ucode, 417 .fecs.ucode = &gm107_gr_fecs_ucode,
449 .gpccs.ucode = &gm107_gr_gpccs_ucode, 418 .gpccs.ucode = &gm107_gr_gpccs_ucode,
450 .rops = gf100_gr_rops, 419 .rops = gf100_gr_rops,
451 .ppc_nr = 2, 420 .ppc_nr = 2,
452 .grctx = &gm107_grctx, 421 .grctx = &gm107_grctx,
422 .zbc = &gf100_gr_zbc,
453 .sclass = { 423 .sclass = {
454 { -1, -1, FERMI_TWOD_A }, 424 { -1, -1, FERMI_TWOD_A },
455 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 425 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 6435f1257572..eff30662b984 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -39,6 +39,22 @@ gm200_gr_rops(struct gf100_gr *gr)
39} 39}
40 40
41void 41void
42gm200_gr_init_ds_hww_esr_2(struct gf100_gr *gr)
43{
44 struct nvkm_device *device = gr->base.engine.subdev.device;
45 nvkm_wr32(device, 0x405848, 0xc0000000);
46 nvkm_mask(device, 0x40584c, 0x00000001, 0x00000001);
47}
48
49void
50gm200_gr_init_num_active_ltcs(struct gf100_gr *gr)
51{
52 struct nvkm_device *device = gr->base.engine.subdev.device;
53 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
54 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
55}
56
57void
42gm200_gr_init_gpc_mmu(struct gf100_gr *gr) 58gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
43{ 59{
44 struct nvkm_device *device = gr->base.engine.subdev.device; 60 struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -61,111 +77,51 @@ gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
61 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ 77 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
62} 78}
63 79
64int 80static u8
65gm200_gr_init(struct gf100_gr *gr) 81gm200_gr_tile_map_6_24[] = {
66{ 82 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2,
67 struct nvkm_device *device = gr->base.engine.subdev.device; 83};
68 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
69 u32 data[TPC_MAX / 8] = {};
70 u8 tpcnr[GPC_MAX];
71 int gpc, tpc, rop;
72 int i;
73
74 gr->func->init_gpc_mmu(gr);
75
76 gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
77
78 gm107_gr_init_bios(gr);
79
80 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
81
82 memset(data, 0x00, sizeof(data));
83 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
84 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
85 do {
86 gpc = (gpc + 1) % gr->gpc_nr;
87 } while (!tpcnr[gpc]);
88 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
89
90 data[i / 8] |= tpc << ((i % 8) * 4);
91 }
92
93 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
94 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
95 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
96 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
97
98 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
99 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
100 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
101 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
102 gr->tpc_total);
103 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
104 }
105 84
106 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); 85static u8
107 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); 86gm200_gr_tile_map_4_16[] = {
108 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); 87 0, 1, 2, 3, 2, 3, 0, 1, 3, 0, 1, 2, 1, 2, 3, 0,
88};
109 89
110 gr->func->init_rop_active_fbps(gr); 90static u8
91gm200_gr_tile_map_2_8[] = {
92 0, 1, 1, 0, 0, 1, 1, 0,
93};
111 94
112 nvkm_wr32(device, 0x400500, 0x00010001); 95void
113 nvkm_wr32(device, 0x400100, 0xffffffff); 96gm200_gr_oneinit_sm_id(struct gf100_gr *gr)
114 nvkm_wr32(device, 0x40013c, 0xffffffff); 97{
115 nvkm_wr32(device, 0x400124, 0x00000002); 98 /*XXX: There's a different algorithm here I've not yet figured out. */
116 nvkm_wr32(device, 0x409c24, 0x000e0000); 99 gf100_gr_oneinit_sm_id(gr);
117 nvkm_wr32(device, 0x405848, 0xc0000000); 100}
118 nvkm_wr32(device, 0x40584c, 0x00000001);
119 nvkm_wr32(device, 0x404000, 0xc0000000);
120 nvkm_wr32(device, 0x404600, 0xc0000000);
121 nvkm_wr32(device, 0x408030, 0xc0000000);
122 nvkm_wr32(device, 0x404490, 0xc0000000);
123 nvkm_wr32(device, 0x406018, 0xc0000000);
124 nvkm_wr32(device, 0x407020, 0x40000000);
125 nvkm_wr32(device, 0x405840, 0xc0000000);
126 nvkm_wr32(device, 0x405844, 0x00ffffff);
127 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
128
129 gr->func->init_ppc_exceptions(gr);
130
131 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
132 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
133 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
134 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
135 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
136 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
137 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
138 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
139 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
140 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
141 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
142 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
143 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
144 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
145 }
146 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
147 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
148 }
149 101
150 for (rop = 0; rop < gr->rop_nr; rop++) { 102void
151 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000); 103gm200_gr_oneinit_tiles(struct gf100_gr *gr)
152 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000); 104{
153 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); 105 /*XXX: Not sure what this is about. The algorithm from NVGPU
154 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); 106 * seems to work for all boards I tried from earlier (and
107 * later) GPUs except in these specific configurations.
108 *
109 * Let's just hardcode them for now.
110 */
111 if (gr->gpc_nr == 2 && gr->tpc_total == 8) {
112 memcpy(gr->tile, gm200_gr_tile_map_2_8, gr->tpc_total);
113 gr->screen_tile_row_offset = 1;
114 } else
115 if (gr->gpc_nr == 4 && gr->tpc_total == 16) {
116 memcpy(gr->tile, gm200_gr_tile_map_4_16, gr->tpc_total);
117 gr->screen_tile_row_offset = 4;
118 } else
119 if (gr->gpc_nr == 6 && gr->tpc_total == 24) {
120 memcpy(gr->tile, gm200_gr_tile_map_6_24, gr->tpc_total);
121 gr->screen_tile_row_offset = 5;
122 } else {
123 gf100_gr_oneinit_tiles(gr);
155 } 124 }
156
157 nvkm_wr32(device, 0x400108, 0xffffffff);
158 nvkm_wr32(device, 0x400138, 0xffffffff);
159 nvkm_wr32(device, 0x400118, 0xffffffff);
160 nvkm_wr32(device, 0x400130, 0xffffffff);
161 nvkm_wr32(device, 0x40011c, 0xffffffff);
162 nvkm_wr32(device, 0x400134, 0xffffffff);
163
164 nvkm_wr32(device, 0x400054, 0x2c350f63);
165
166 gf100_gr_zbc_init(gr);
167
168 return gf100_gr_init_ctxctl(gr);
169} 125}
170 126
171int 127int
@@ -208,13 +164,30 @@ gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
208 164
209static const struct gf100_gr_func 165static const struct gf100_gr_func
210gm200_gr = { 166gm200_gr = {
211 .init = gm200_gr_init, 167 .oneinit_tiles = gm200_gr_oneinit_tiles,
168 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
169 .init = gf100_gr_init,
212 .init_gpc_mmu = gm200_gr_init_gpc_mmu, 170 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
171 .init_bios = gm107_gr_init_bios,
172 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
173 .init_zcull = gf117_gr_init_zcull,
174 .init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
213 .init_rop_active_fbps = gm200_gr_init_rop_active_fbps, 175 .init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
176 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
177 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
178 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
179 .init_419cc0 = gf100_gr_init_419cc0,
214 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 180 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
181 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
182 .init_504430 = gm107_gr_init_504430,
183 .init_shader_exceptions = gm107_gr_init_shader_exceptions,
184 .init_400054 = gm107_gr_init_400054,
185 .trap_mp = gf100_gr_trap_mp,
215 .rops = gm200_gr_rops, 186 .rops = gm200_gr_rops,
187 .tpc_nr = 4,
216 .ppc_nr = 2, 188 .ppc_nr = 2,
217 .grctx = &gm200_grctx, 189 .grctx = &gm200_grctx,
190 .zbc = &gf100_gr_zbc,
218 .sclass = { 191 .sclass = {
219 { -1, -1, FERMI_TWOD_A }, 192 { -1, -1, FERMI_TWOD_A },
220 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 193 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 69479af1d829..a667770ce3cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -64,13 +64,18 @@ gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
64 64
65static const struct gf100_gr_func 65static const struct gf100_gr_func
66gm20b_gr = { 66gm20b_gr = {
67 .oneinit_tiles = gm200_gr_oneinit_tiles,
68 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
67 .init = gk20a_gr_init, 69 .init = gk20a_gr_init,
70 .init_zcull = gf117_gr_init_zcull,
68 .init_gpc_mmu = gm20b_gr_init_gpc_mmu, 71 .init_gpc_mmu = gm20b_gr_init_gpc_mmu,
69 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 72 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
73 .trap_mp = gf100_gr_trap_mp,
70 .set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask, 74 .set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
71 .rops = gm200_gr_rops, 75 .rops = gm200_gr_rops,
72 .ppc_nr = 1, 76 .ppc_nr = 1,
73 .grctx = &gm20b_grctx, 77 .grctx = &gm20b_grctx,
78 .zbc = &gf100_gr_zbc,
74 .sclass = { 79 .sclass = {
75 { -1, -1, FERMI_TWOD_A }, 80 { -1, -1, FERMI_TWOD_A },
76 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 81 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 867a5f7cc5bc..9d0521ce309a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -29,143 +29,103 @@
29/******************************************************************************* 29/*******************************************************************************
30 * PGRAPH engine/subdev functions 30 * PGRAPH engine/subdev functions
31 ******************************************************************************/ 31 ******************************************************************************/
32
33void 32void
34gp100_gr_init_rop_active_fbps(struct gf100_gr *gr) 33gp100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
35{ 34{
36 struct nvkm_device *device = gr->base.engine.subdev.device; 35 struct nvkm_device *device = gr->base.engine.subdev.device;
37 /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */ 36 const int znum = zbc - 1;
38 const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f; 37 const u32 zoff = znum * 4;
39 nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */ 38
40 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ 39 if (gr->zbc_color[zbc].format) {
40 nvkm_wr32(device, 0x418010 + zoff, gr->zbc_color[zbc].ds[0]);
41 nvkm_wr32(device, 0x41804c + zoff, gr->zbc_color[zbc].ds[1]);
42 nvkm_wr32(device, 0x418088 + zoff, gr->zbc_color[zbc].ds[2]);
43 nvkm_wr32(device, 0x4180c4 + zoff, gr->zbc_color[zbc].ds[3]);
44 }
45
46 nvkm_mask(device, 0x418100 + ((znum / 4) * 4),
47 0x0000007f << ((znum % 4) * 7),
48 gr->zbc_color[zbc].format << ((znum % 4) * 7));
41} 49}
42 50
43void 51void
44gp100_gr_init_num_active_ltcs(struct gf100_gr *gr) 52gp100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
45{ 53{
46 struct nvkm_device *device = gr->base.engine.subdev.device; 54 struct nvkm_device *device = gr->base.engine.subdev.device;
47 55 const int znum = zbc - 1;
48 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); 56 const u32 zoff = znum * 4;
49 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); 57
58 if (gr->zbc_depth[zbc].format)
59 nvkm_wr32(device, 0x418110 + zoff, gr->zbc_depth[zbc].ds);
60 nvkm_mask(device, 0x41814c + ((znum / 4) * 4),
61 0x0000007f << ((znum % 4) * 7),
62 gr->zbc_depth[zbc].format << ((znum % 4) * 7));
50} 63}
51 64
52int 65static const struct gf100_gr_func_zbc
53gp100_gr_init(struct gf100_gr *gr) 66gp100_gr_zbc = {
67 .clear_color = gp100_gr_zbc_clear_color,
68 .clear_depth = gp100_gr_zbc_clear_depth,
69};
70
71void
72gp100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
54{ 73{
55 struct nvkm_device *device = gr->base.engine.subdev.device; 74 struct nvkm_device *device = gr->base.engine.subdev.device;
56 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 75 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
57 u32 data[TPC_MAX / 8] = {}; 76 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
58 u8 tpcnr[GPC_MAX]; 77}
59 int gpc, tpc, rop;
60 int i;
61
62 gr->func->init_gpc_mmu(gr);
63
64 gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
65
66 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
67
68 memset(data, 0x00, sizeof(data));
69 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
70 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
71 do {
72 gpc = (gpc + 1) % gr->gpc_nr;
73 } while (!tpcnr[gpc]);
74 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
75
76 data[i / 8] |= tpc << ((i % 8) * 4);
77 }
78
79 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
80 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
81 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
82 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
83
84 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
85 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
86 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
87 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
88 gr->tpc_total);
89 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
90 }
91
92 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
93 gr->func->init_num_active_ltcs(gr);
94
95 gr->func->init_rop_active_fbps(gr);
96 if (gr->func->init_swdx_pes_mask)
97 gr->func->init_swdx_pes_mask(gr);
98
99 nvkm_wr32(device, 0x400500, 0x00010001);
100 nvkm_wr32(device, 0x400100, 0xffffffff);
101 nvkm_wr32(device, 0x40013c, 0xffffffff);
102 nvkm_wr32(device, 0x400124, 0x00000002);
103 nvkm_wr32(device, 0x409c24, 0x000f0002);
104 nvkm_wr32(device, 0x405848, 0xc0000000);
105 nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
106 nvkm_wr32(device, 0x404000, 0xc0000000);
107 nvkm_wr32(device, 0x404600, 0xc0000000);
108 nvkm_wr32(device, 0x408030, 0xc0000000);
109 nvkm_wr32(device, 0x404490, 0xc0000000);
110 nvkm_wr32(device, 0x406018, 0xc0000000);
111 nvkm_wr32(device, 0x407020, 0x40000000);
112 nvkm_wr32(device, 0x405840, 0xc0000000);
113 nvkm_wr32(device, 0x405844, 0x00ffffff);
114 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
115 78
79static void
80gp100_gr_init_419c9c(struct gf100_gr *gr)
81{
82 struct nvkm_device *device = gr->base.engine.subdev.device;
116 nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000); 83 nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
117 nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000); 84 nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
85}
118 86
119 gr->func->init_ppc_exceptions(gr); 87void
120 88gp100_gr_init_fecs_exceptions(struct gf100_gr *gr)
121 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 89{
122 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 90 nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x000f0002);
123 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 91}
124 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
125 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
126 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
127 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
128 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
129 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
130 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
131 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
132 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
133 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
134 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
135 }
136 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
137 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
138 }
139
140 for (rop = 0; rop < gr->rop_nr; rop++) {
141 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
142 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
143 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
144 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
145 }
146
147 nvkm_wr32(device, 0x400108, 0xffffffff);
148 nvkm_wr32(device, 0x400138, 0xffffffff);
149 nvkm_wr32(device, 0x400118, 0xffffffff);
150 nvkm_wr32(device, 0x400130, 0xffffffff);
151 nvkm_wr32(device, 0x40011c, 0xffffffff);
152 nvkm_wr32(device, 0x400134, 0xffffffff);
153
154 gf100_gr_zbc_init(gr);
155 92
156 return gf100_gr_init_ctxctl(gr); 93void
94gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
95{
96 struct nvkm_device *device = gr->base.engine.subdev.device;
97 /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */
98 const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f;
99 nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
100 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
157} 101}
158 102
159static const struct gf100_gr_func 103static const struct gf100_gr_func
160gp100_gr = { 104gp100_gr = {
161 .init = gp100_gr_init, 105 .oneinit_tiles = gm200_gr_oneinit_tiles,
106 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
107 .init = gf100_gr_init,
162 .init_gpc_mmu = gm200_gr_init_gpc_mmu, 108 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
109 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
110 .init_zcull = gf117_gr_init_zcull,
111 .init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
163 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, 112 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
113 .init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
114 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
115 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
116 .init_419cc0 = gf100_gr_init_419cc0,
117 .init_419c9c = gp100_gr_init_419c9c,
164 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 118 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
165 .init_num_active_ltcs = gp100_gr_init_num_active_ltcs, 119 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
120 .init_504430 = gm107_gr_init_504430,
121 .init_shader_exceptions = gp100_gr_init_shader_exceptions,
122 .trap_mp = gf100_gr_trap_mp,
166 .rops = gm200_gr_rops, 123 .rops = gm200_gr_rops,
124 .gpc_nr = 6,
125 .tpc_nr = 5,
167 .ppc_nr = 2, 126 .ppc_nr = 2,
168 .grctx = &gp100_grctx, 127 .grctx = &gp100_grctx,
128 .zbc = &gp100_gr_zbc,
169 .sclass = { 129 .sclass = {
170 { -1, -1, FERMI_TWOD_A }, 130 { -1, -1, FERMI_TWOD_A },
171 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 131 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 61e3a0b08559..37f7d739bf80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -26,6 +26,62 @@
26 26
27#include <nvif/class.h> 27#include <nvif/class.h>
28 28
29static void
30gp102_gr_zbc_clear_stencil(struct gf100_gr *gr, int zbc)
31{
32 struct nvkm_device *device = gr->base.engine.subdev.device;
33 const int znum = zbc - 1;
34 const u32 zoff = znum * 4;
35
36 if (gr->zbc_stencil[zbc].format)
37 nvkm_wr32(device, 0x41815c + zoff, gr->zbc_stencil[zbc].ds);
38 nvkm_mask(device, 0x418198 + ((znum / 4) * 4),
39 0x0000007f << ((znum % 4) * 7),
40 gr->zbc_stencil[zbc].format << ((znum % 4) * 7));
41}
42
43static int
44gp102_gr_zbc_stencil_get(struct gf100_gr *gr, int format,
45 const u32 ds, const u32 l2)
46{
47 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
48 int zbc = -ENOSPC, i;
49
50 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
51 if (gr->zbc_stencil[i].format) {
52 if (gr->zbc_stencil[i].format != format)
53 continue;
54 if (gr->zbc_stencil[i].ds != ds)
55 continue;
56 if (gr->zbc_stencil[i].l2 != l2) {
57 WARN_ON(1);
58 return -EINVAL;
59 }
60 return i;
61 } else {
62 zbc = (zbc < 0) ? i : zbc;
63 }
64 }
65
66 if (zbc < 0)
67 return zbc;
68
69 gr->zbc_stencil[zbc].format = format;
70 gr->zbc_stencil[zbc].ds = ds;
71 gr->zbc_stencil[zbc].l2 = l2;
72 nvkm_ltc_zbc_stencil_get(ltc, zbc, l2);
73 gr->func->zbc->clear_stencil(gr, zbc);
74 return zbc;
75}
76
77const struct gf100_gr_func_zbc
78gp102_gr_zbc = {
79 .clear_color = gp100_gr_zbc_clear_color,
80 .clear_depth = gp100_gr_zbc_clear_depth,
81 .stencil_get = gp102_gr_zbc_stencil_get,
82 .clear_stencil = gp102_gr_zbc_clear_stencil,
83};
84
29void 85void
30gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr) 86gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
31{ 87{
@@ -42,15 +98,30 @@ gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
42 98
43static const struct gf100_gr_func 99static const struct gf100_gr_func
44gp102_gr = { 100gp102_gr = {
45 .init = gp100_gr_init, 101 .oneinit_tiles = gm200_gr_oneinit_tiles,
102 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
103 .init = gf100_gr_init,
46 .init_gpc_mmu = gm200_gr_init_gpc_mmu, 104 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
105 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
106 .init_zcull = gf117_gr_init_zcull,
107 .init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
47 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, 108 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
48 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
49 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask, 109 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
50 .init_num_active_ltcs = gp100_gr_init_num_active_ltcs, 110 .init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
111 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
112 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
113 .init_419cc0 = gf100_gr_init_419cc0,
114 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
115 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
116 .init_504430 = gm107_gr_init_504430,
117 .init_shader_exceptions = gp100_gr_init_shader_exceptions,
118 .trap_mp = gf100_gr_trap_mp,
51 .rops = gm200_gr_rops, 119 .rops = gm200_gr_rops,
120 .gpc_nr = 6,
121 .tpc_nr = 5,
52 .ppc_nr = 3, 122 .ppc_nr = 3,
53 .grctx = &gp102_grctx, 123 .grctx = &gp102_grctx,
124 .zbc = &gp102_gr_zbc,
54 .sclass = { 125 .sclass = {
55 { -1, -1, FERMI_TWOD_A }, 126 { -1, -1, FERMI_TWOD_A },
56 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 127 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
new file mode 100644
index 000000000000..4573c914c021
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "gf100.h"
23#include "ctxgf100.h"
24
25#include <nvif/class.h>
26
27static const struct gf100_gr_func
28gp104_gr = {
29 .oneinit_tiles = gm200_gr_oneinit_tiles,
30 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
31 .init = gf100_gr_init,
32 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
33 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
34 .init_zcull = gf117_gr_init_zcull,
35 .init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
36 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
37 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
38 .init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
39 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
40 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
41 .init_419cc0 = gf100_gr_init_419cc0,
42 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
43 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
44 .init_504430 = gm107_gr_init_504430,
45 .init_shader_exceptions = gp100_gr_init_shader_exceptions,
46 .trap_mp = gf100_gr_trap_mp,
47 .rops = gm200_gr_rops,
48 .gpc_nr = 6,
49 .tpc_nr = 5,
50 .ppc_nr = 3,
51 .grctx = &gp104_grctx,
52 .zbc = &gp102_gr_zbc,
53 .sclass = {
54 { -1, -1, FERMI_TWOD_A },
55 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
56 { -1, -1, PASCAL_B, &gf100_fermi },
57 { -1, -1, PASCAL_COMPUTE_B },
58 {}
59 }
60};
61
62int
63gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
64{
65 return gm200_gr_new_(&gp104_gr, device, index, pgr);
66}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index f7272323f694..812aba91653f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -28,15 +28,30 @@
28 28
29static const struct gf100_gr_func 29static const struct gf100_gr_func
30gp107_gr = { 30gp107_gr = {
31 .init = gp100_gr_init, 31 .oneinit_tiles = gm200_gr_oneinit_tiles,
32 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
33 .init = gf100_gr_init,
32 .init_gpc_mmu = gm200_gr_init_gpc_mmu, 34 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
35 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
36 .init_zcull = gf117_gr_init_zcull,
37 .init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
33 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, 38 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
34 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
35 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask, 39 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
36 .init_num_active_ltcs = gp100_gr_init_num_active_ltcs, 40 .init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
41 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
42 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
43 .init_419cc0 = gf100_gr_init_419cc0,
44 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
45 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
46 .init_504430 = gm107_gr_init_504430,
47 .init_shader_exceptions = gp100_gr_init_shader_exceptions,
48 .trap_mp = gf100_gr_trap_mp,
37 .rops = gm200_gr_rops, 49 .rops = gm200_gr_rops,
50 .gpc_nr = 2,
51 .tpc_nr = 3,
38 .ppc_nr = 1, 52 .ppc_nr = 1,
39 .grctx = &gp107_grctx, 53 .grctx = &gp107_grctx,
54 .zbc = &gp102_gr_zbc,
40 .sclass = { 55 .sclass = {
41 { -1, -1, FERMI_TWOD_A }, 56 { -1, -1, FERMI_TWOD_A },
42 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 57 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 5f3d161a0842..303dceddd4a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -25,24 +25,31 @@
25 25
26#include <nvif/class.h> 26#include <nvif/class.h>
27 27
28static void
29gp10b_gr_init_num_active_ltcs(struct gf100_gr *gr)
30{
31 struct nvkm_device *device = gr->base.engine.subdev.device;
32
33 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
34}
35
36static const struct gf100_gr_func 28static const struct gf100_gr_func
37gp10b_gr = { 29gp10b_gr = {
38 .init = gp100_gr_init, 30 .oneinit_tiles = gm200_gr_oneinit_tiles,
31 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
32 .init = gf100_gr_init,
39 .init_gpc_mmu = gm200_gr_init_gpc_mmu, 33 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
34 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
35 .init_zcull = gf117_gr_init_zcull,
36 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
40 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, 37 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
38 .init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
39 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
40 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
41 .init_419cc0 = gf100_gr_init_419cc0,
41 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 42 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
42 .init_num_active_ltcs = gp10b_gr_init_num_active_ltcs, 43 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
44 .init_504430 = gm107_gr_init_504430,
45 .init_shader_exceptions = gp100_gr_init_shader_exceptions,
46 .trap_mp = gf100_gr_trap_mp,
43 .rops = gm200_gr_rops, 47 .rops = gm200_gr_rops,
48 .gpc_nr = 1,
49 .tpc_nr = 2,
44 .ppc_nr = 1, 50 .ppc_nr = 1,
45 .grctx = &gp102_grctx, 51 .grctx = &gp102_grctx,
52 .zbc = &gp102_gr_zbc,
46 .sclass = { 53 .sclass = {
47 { -1, -1, FERMI_TWOD_A }, 54 { -1, -1, FERMI_TWOD_A },
48 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 55 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
new file mode 100644
index 000000000000..19173ea19096
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "gf100.h"
23#include "ctxgf100.h"
24
25#include <nvif/class.h>
26
27static void
28gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
29{
30 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
31 struct nvkm_device *device = subdev->device;
32 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730));
33 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734));
34 const struct nvkm_enum *warp;
35 char glob[128];
36
37 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
38 warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
39
40 nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
41 "global %08x [%s] warp %04x [%s]\n",
42 gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
43
44 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730), 0x00000000);
45 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734), gerr);
46}
47
48static void
49gv100_gr_init_4188a4(struct gf100_gr *gr)
50{
51 struct nvkm_device *device = gr->base.engine.subdev.device;
52 nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000);
53}
54
55static void
56gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
57{
58 struct nvkm_device *device = gr->base.engine.subdev.device;
59 int sm;
60 for (sm = 0; sm < 0x100; sm += 0x80) {
61 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x728 + sm), 0x0085eb64);
62 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x610), 0x00000001);
63 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x72c + sm), 0x00000004);
64 }
65}
66
67static void
68gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
69{
70 struct nvkm_device *device = gr->base.engine.subdev.device;
71 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000);
72}
73
74static void
75gv100_gr_init_419bd8(struct gf100_gr *gr)
76{
77 struct nvkm_device *device = gr->base.engine.subdev.device;
78 nvkm_mask(device, 0x419bd8, 0x00000700, 0x00000000);
79}
80
81static const struct gf100_gr_func
82gv100_gr = {
83 .oneinit_tiles = gm200_gr_oneinit_tiles,
84 .oneinit_sm_id = gm200_gr_oneinit_sm_id,
85 .init = gf100_gr_init,
86 .init_419bd8 = gv100_gr_init_419bd8,
87 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
88 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
89 .init_zcull = gf117_gr_init_zcull,
90 .init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
91 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
92 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
93 .init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
94 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
95 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
96 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
97 .init_504430 = gv100_gr_init_504430,
98 .init_shader_exceptions = gv100_gr_init_shader_exceptions,
99 .init_4188a4 = gv100_gr_init_4188a4,
100 .trap_mp = gv100_gr_trap_mp,
101 .rops = gm200_gr_rops,
102 .gpc_nr = 6,
103 .tpc_nr = 5,
104 .ppc_nr = 3,
105 .grctx = &gv100_grctx,
106 .zbc = &gp102_gr_zbc,
107 .sclass = {
108 { -1, -1, FERMI_TWOD_A },
109 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
110 { -1, -1, VOLTA_A, &gf100_fermi },
111 { -1, -1, VOLTA_COMPUTE_A },
112 {}
113 }
114};
115
116int
117gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
118{
119 return gm200_gr_new_(&gv100_gr, device, index, pgr);
120}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 58a59b7db2e5..771e16a16267 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -506,6 +506,7 @@ nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
506 break; 506 break;
507 case 0x0148cdec: 507 case 0x0148cdec:
508 case 0x015ccf3e: 508 case 0x015ccf3e:
509 case 0x0167d263:
509 ret = msgqueue_0148cdec_new(falcon, sb, queue); 510 ret = msgqueue_0148cdec_new(falcon, sb, queue);
510 break; 511 break;
511 default: 512 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 3f5d38d74fba..cfdffef1afb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -3,6 +3,7 @@ include $(src)/nvkm/subdev/bios/Kbuild
3include $(src)/nvkm/subdev/bus/Kbuild 3include $(src)/nvkm/subdev/bus/Kbuild
4include $(src)/nvkm/subdev/clk/Kbuild 4include $(src)/nvkm/subdev/clk/Kbuild
5include $(src)/nvkm/subdev/devinit/Kbuild 5include $(src)/nvkm/subdev/devinit/Kbuild
6include $(src)/nvkm/subdev/fault/Kbuild
6include $(src)/nvkm/subdev/fb/Kbuild 7include $(src)/nvkm/subdev/fb/Kbuild
7include $(src)/nvkm/subdev/fuse/Kbuild 8include $(src)/nvkm/subdev/fuse/Kbuild
8include $(src)/nvkm/subdev/gpio/Kbuild 9include $(src)/nvkm/subdev/gpio/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 7c7efa4ea0d0..3133b28f849c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -25,7 +25,7 @@
25#include <subdev/bios/bit.h> 25#include <subdev/bios/bit.h>
26#include <subdev/bios/dp.h> 26#include <subdev/bios/dp.h>
27 27
28static u16 28u16
29nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 29nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
30{ 30{
31 struct bit_entry d; 31 struct bit_entry d;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index 2ca23a9157ab..e6e804cee2bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -193,7 +193,10 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
193 data += hdr; 193 data += hdr;
194 while (cnt--) { 194 while (cnt--) {
195 if (nvbios_rd08(bios, data + 0) == type) { 195 if (nvbios_rd08(bios, data + 0) == type) {
196 *reg = nvbios_rd32(bios, data + 3); 196 if (*ver < 0x50)
197 *reg = nvbios_rd32(bios, data + 3);
198 else
199 *reg = 0;
197 return data; 200 return data;
198 } 201 }
199 data += *len; 202 data += *len;
@@ -361,6 +364,20 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
361 info->min_p = nvbios_rd08(bios, data + 12); 364 info->min_p = nvbios_rd08(bios, data + 12);
362 info->max_p = nvbios_rd08(bios, data + 13); 365 info->max_p = nvbios_rd08(bios, data + 13);
363 break; 366 break;
367 case 0x50:
368 info->refclk = nvbios_rd16(bios, data + 1) * 1000;
369 /* info->refclk_alt = nvbios_rd16(bios, data + 3) * 1000; */
370 info->vco1.min_freq = nvbios_rd16(bios, data + 5) * 1000;
371 info->vco1.max_freq = nvbios_rd16(bios, data + 7) * 1000;
372 info->vco1.min_inputfreq = nvbios_rd16(bios, data + 9) * 1000;
373 info->vco1.max_inputfreq = nvbios_rd16(bios, data + 11) * 1000;
374 info->vco1.min_m = nvbios_rd08(bios, data + 13);
375 info->vco1.max_m = nvbios_rd08(bios, data + 14);
376 info->vco1.min_n = nvbios_rd08(bios, data + 15);
377 info->vco1.max_n = nvbios_rd08(bios, data + 16);
378 info->min_p = nvbios_rd08(bios, data + 17);
379 info->max_p = nvbios_rd08(bios, data + 18);
380 break;
364 default: 381 default:
365 nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver); 382 nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver);
366 return -EINVAL; 383 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index 0f537c22804c..3634cd0630b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -78,7 +78,10 @@ pramin_init(struct nvkm_bios *bios, const char *name)
78 * important as we don't want to be touching vram on an 78 * important as we don't want to be touching vram on an
79 * uninitialised board 79 * uninitialised board
80 */ 80 */
81 addr = nvkm_rd32(device, 0x619f04); 81 if (device->card_type >= GV100)
82 addr = nvkm_rd32(device, 0x625f04);
83 else
84 addr = nvkm_rd32(device, 0x619f04);
82 if (!(addr & 0x00000008)) { 85 if (!(addr & 0x00000008)) {
83 nvkm_debug(subdev, "... not enabled\n"); 86 nvkm_debug(subdev, "... not enabled\n");
84 return ERR_PTR(-ENODEV); 87 return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 81c3567d4e67..ba6a868d4c95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -109,18 +109,17 @@ nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
109 109
110static struct nvkm_cstate * 110static struct nvkm_cstate *
111nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate, 111nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
112 struct nvkm_cstate *start) 112 struct nvkm_cstate *cstate)
113{ 113{
114 struct nvkm_device *device = clk->subdev.device; 114 struct nvkm_device *device = clk->subdev.device;
115 struct nvkm_volt *volt = device->volt; 115 struct nvkm_volt *volt = device->volt;
116 struct nvkm_cstate *cstate;
117 int max_volt; 116 int max_volt;
118 117
119 if (!pstate || !start) 118 if (!pstate || !cstate)
120 return NULL; 119 return NULL;
121 120
122 if (!volt) 121 if (!volt)
123 return start; 122 return cstate;
124 123
125 max_volt = volt->max_uv; 124 max_volt = volt->max_uv;
126 if (volt->max0_id != 0xff) 125 if (volt->max0_id != 0xff)
@@ -133,8 +132,7 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
133 max_volt = min(max_volt, 132 max_volt = min(max_volt,
134 nvkm_volt_map(volt, volt->max2_id, clk->temp)); 133 nvkm_volt_map(volt, volt->max2_id, clk->temp));
135 134
136 for (cstate = start; &cstate->head != &pstate->list; 135 list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
137 cstate = list_prev_entry(cstate, head)) {
138 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp)) 136 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
139 break; 137 break;
140 } 138 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index eac88e3dc6e5..50a436926484 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/devinit/mcp89.o
12nvkm-y += nvkm/subdev/devinit/gf100.o 12nvkm-y += nvkm/subdev/devinit/gf100.o
13nvkm-y += nvkm/subdev/devinit/gm107.o 13nvkm-y += nvkm/subdev/devinit/gm107.o
14nvkm-y += nvkm/subdev/devinit/gm200.o 14nvkm-y += nvkm/subdev/devinit/gm200.o
15nvkm-y += nvkm/subdev/devinit/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 1730371933df..b80618e35491 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -107,7 +107,7 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
107 return pmu_exec(init, pmu.init_addr_pmu), 0; 107 return pmu_exec(init, pmu.init_addr_pmu), 0;
108} 108}
109 109
110static int 110int
111gm200_devinit_post(struct nvkm_devinit *base, bool post) 111gm200_devinit_post(struct nvkm_devinit *base, bool post)
112{ 112{
113 struct nv50_devinit *init = nv50_devinit(base); 113 struct nv50_devinit *init = nv50_devinit(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
new file mode 100644
index 000000000000..fbde6828bd38
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
@@ -0,0 +1,79 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "nv50.h"
23
24#include <subdev/bios.h>
25#include <subdev/bios/pll.h>
26#include <subdev/clk/pll.h>
27
28static int
29gv100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
30{
31 struct nvkm_subdev *subdev = &init->subdev;
32 struct nvkm_device *device = subdev->device;
33 struct nvbios_pll info;
34 int head = type - PLL_VPLL0;
35 int N, fN, M, P;
36 int ret;
37
38 ret = nvbios_pll_parse(device->bios, type, &info);
39 if (ret)
40 return ret;
41
42 ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
43 if (ret < 0)
44 return ret;
45
46 switch (info.type) {
47 case PLL_VPLL0:
48 case PLL_VPLL1:
49 case PLL_VPLL2:
50 case PLL_VPLL3:
51 nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16);
52 nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) |
53 (N << 8) |
54 (M << 0));
55 break;
56 default:
57 nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
58 ret = -EINVAL;
59 break;
60 }
61
62 return ret;
63}
64
65static const struct nvkm_devinit_func
66gv100_devinit = {
67 .preinit = gf100_devinit_preinit,
68 .init = nv50_devinit_init,
69 .post = gm200_devinit_post,
70 .pll_set = gv100_devinit_pll_set,
71 .disable = gm107_devinit_disable,
72};
73
74int
75gv100_devinit_new(struct nvkm_device *device, int index,
76 struct nvkm_devinit **pinit)
77{
78 return nv50_devinit_new_(&gv100_devinit, device, index, pinit);
79}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 315ebaff1165..9b9f0dc1e192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -24,4 +24,6 @@ int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
24void gf100_devinit_preinit(struct nvkm_devinit *); 24void gf100_devinit_preinit(struct nvkm_devinit *);
25 25
26u64 gm107_devinit_disable(struct nvkm_devinit *); 26u64 gm107_devinit_disable(struct nvkm_devinit *);
27
28int gm200_devinit_post(struct nvkm_devinit *, bool);
27#endif 29#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
new file mode 100644
index 000000000000..45bb46fb0929
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -0,0 +1,3 @@
1nvkm-y += nvkm/subdev/fault/base.o
2nvkm-y += nvkm/subdev/fault/gp100.o
3nvkm-y += nvkm/subdev/fault/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
new file mode 100644
index 000000000000..007bf4af33b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <core/memory.h>
25#include <core/notify.h>
26#include <subdev/bar.h>
27#include <subdev/mmu.h>
28
29static void
30nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
31{
32 struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
33 fault->func->buffer.fini(fault->buffer[index]);
34}
35
36static void
37nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
38{
39 struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
40 fault->func->buffer.init(fault->buffer[index]);
41}
42
43static int
44nvkm_fault_ntfy_ctor(struct nvkm_object *object, void *argv, u32 argc,
45 struct nvkm_notify *notify)
46{
47 struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
48 if (argc == 0) {
49 notify->size = 0;
50 notify->types = 1;
51 notify->index = buffer->id;
52 return 0;
53 }
54 return -ENOSYS;
55}
56
57static const struct nvkm_event_func
58nvkm_fault_ntfy = {
59 .ctor = nvkm_fault_ntfy_ctor,
60 .init = nvkm_fault_ntfy_init,
61 .fini = nvkm_fault_ntfy_fini,
62};
63
64static void
65nvkm_fault_intr(struct nvkm_subdev *subdev)
66{
67 struct nvkm_fault *fault = nvkm_fault(subdev);
68 return fault->func->intr(fault);
69}
70
71static int
72nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend)
73{
74 struct nvkm_fault *fault = nvkm_fault(subdev);
75 if (fault->func->fini)
76 fault->func->fini(fault);
77 return 0;
78}
79
80static int
81nvkm_fault_init(struct nvkm_subdev *subdev)
82{
83 struct nvkm_fault *fault = nvkm_fault(subdev);
84 if (fault->func->init)
85 fault->func->init(fault);
86 return 0;
87}
88
89static int
90nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
91{
92 struct nvkm_subdev *subdev = &fault->subdev;
93 struct nvkm_device *device = subdev->device;
94 struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(device);
95 struct nvkm_fault_buffer *buffer;
96 int ret;
97
98 if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL)))
99 return -ENOMEM;
100 buffer->fault = fault;
101 buffer->id = id;
102 buffer->entries = fault->func->buffer.entries(buffer);
103 fault->buffer[id] = buffer;
104
105 nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
106
107 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries *
108 fault->func->buffer.entry_size, 0x1000, true,
109 &buffer->mem);
110 if (ret)
111 return ret;
112
113 ret = nvkm_vmm_get(bar2, 12, nvkm_memory_size(buffer->mem),
114 &buffer->vma);
115 if (ret)
116 return ret;
117
118 return nvkm_memory_map(buffer->mem, 0, bar2, buffer->vma, NULL, 0);
119}
120
121static int
122nvkm_fault_oneinit(struct nvkm_subdev *subdev)
123{
124 struct nvkm_fault *fault = nvkm_fault(subdev);
125 int ret, i;
126
127 for (i = 0; i < ARRAY_SIZE(fault->buffer); i++) {
128 if (i < fault->func->buffer.nr) {
129 ret = nvkm_fault_oneinit_buffer(fault, i);
130 if (ret)
131 return ret;
132 fault->buffer_nr = i + 1;
133 }
134 }
135
136 return nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
137 &fault->event);
138}
139
140static void *
141nvkm_fault_dtor(struct nvkm_subdev *subdev)
142{
143 struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(subdev->device);
144 struct nvkm_fault *fault = nvkm_fault(subdev);
145 int i;
146
147 nvkm_event_fini(&fault->event);
148
149 for (i = 0; i < fault->buffer_nr; i++) {
150 if (fault->buffer[i]) {
151 nvkm_vmm_put(bar2, &fault->buffer[i]->vma);
152 nvkm_memory_unref(&fault->buffer[i]->mem);
153 kfree(fault->buffer[i]);
154 }
155 }
156
157 return fault;
158}
159
160static const struct nvkm_subdev_func
161nvkm_fault = {
162 .dtor = nvkm_fault_dtor,
163 .oneinit = nvkm_fault_oneinit,
164 .init = nvkm_fault_init,
165 .fini = nvkm_fault_fini,
166 .intr = nvkm_fault_intr,
167};
168
169int
170nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
171 int index, struct nvkm_fault **pfault)
172{
173 struct nvkm_fault *fault;
174 if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL)))
175 return -ENOMEM;
176 nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
177 fault->func = func;
178 return 0;
179}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
new file mode 100644
index 000000000000..5e71db2e8d75
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <subdev/mmu.h>
25
26static void
27gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
28{
29 struct nvkm_device *device = buffer->fault->subdev.device;
30 nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
31}
32
33static void
34gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
35{
36 struct nvkm_device *device = buffer->fault->subdev.device;
37 nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->vma->addr));
38 nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->vma->addr));
39 nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
40}
41
42static u32
43gp100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
44{
45 return nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
46}
47
48static void
49gp100_fault_intr(struct nvkm_fault *fault)
50{
51 nvkm_event_send(&fault->event, 1, 0, NULL, 0);
52}
53
54static const struct nvkm_fault_func
55gp100_fault = {
56 .intr = gp100_fault_intr,
57 .buffer.nr = 1,
58 .buffer.entry_size = 32,
59 .buffer.entries = gp100_fault_buffer_entries,
60 .buffer.init = gp100_fault_buffer_init,
61 .buffer.fini = gp100_fault_buffer_fini,
62};
63
64int
65gp100_fault_new(struct nvkm_device *device, int index,
66 struct nvkm_fault **pfault)
67{
68 return nvkm_fault_new_(&gp100_fault, device, index, pfault);
69}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
new file mode 100644
index 000000000000..73c7728b5969
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -0,0 +1,206 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <core/memory.h>
25#include <subdev/mmu.h>
26#include <engine/fifo.h>
27
28static void
29gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
30{
31 struct nvkm_device *device = buffer->fault->subdev.device;
32 struct nvkm_memory *mem = buffer->mem;
33 const u32 foff = buffer->id * 0x14;
34 u32 get = nvkm_rd32(device, 0x100e2c + foff);
35 u32 put = nvkm_rd32(device, 0x100e30 + foff);
36 if (put == get)
37 return;
38
39 nvkm_kmap(mem);
40 while (get != put) {
41 const u32 base = get * buffer->fault->func->buffer.entry_size;
42 const u32 instlo = nvkm_ro32(mem, base + 0x00);
43 const u32 insthi = nvkm_ro32(mem, base + 0x04);
44 const u32 addrlo = nvkm_ro32(mem, base + 0x08);
45 const u32 addrhi = nvkm_ro32(mem, base + 0x0c);
46 const u32 timelo = nvkm_ro32(mem, base + 0x10);
47 const u32 timehi = nvkm_ro32(mem, base + 0x14);
48 const u32 info0 = nvkm_ro32(mem, base + 0x18);
49 const u32 info1 = nvkm_ro32(mem, base + 0x1c);
50 struct nvkm_fault_data info;
51
52 if (++get == buffer->entries)
53 get = 0;
54 nvkm_wr32(device, 0x100e2c + foff, get);
55
56 info.addr = ((u64)addrhi << 32) | addrlo;
57 info.inst = ((u64)insthi << 32) | instlo;
58 info.time = ((u64)timehi << 32) | timelo;
59 info.engine = (info0 & 0x000000ff);
60 info.valid = (info1 & 0x80000000) >> 31;
61 info.gpc = (info1 & 0x1f000000) >> 24;
62 info.hub = (info1 & 0x00100000) >> 20;
63 info.access = (info1 & 0x000f0000) >> 16;
64 info.client = (info1 & 0x00007f00) >> 8;
65 info.reason = (info1 & 0x0000001f);
66
67 nvkm_fifo_fault(device->fifo, &info);
68 }
69 nvkm_done(mem);
70}
71
72static void
73gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
74{
75 struct nvkm_device *device = buffer->fault->subdev.device;
76 const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
77 const u32 foff = buffer->id * 0x14;
78
79 nvkm_mask(device, 0x100a34, intr, intr);
80 nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000);
81}
82
83static void
84gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
85{
86 struct nvkm_device *device = buffer->fault->subdev.device;
87 const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
88 const u32 foff = buffer->id * 0x14;
89
90 nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000);
91 nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->vma->addr));
92 nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->vma->addr));
93 nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000);
94 nvkm_mask(device, 0x100a2c, intr, intr);
95}
96
97static u32
98gv100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
99{
100 struct nvkm_device *device = buffer->fault->subdev.device;
101 const u32 foff = buffer->id * 0x14;
102 nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000);
103 return nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
104}
105
106static int
107gv100_fault_ntfy_nrpfb(struct nvkm_notify *notify)
108{
109 struct nvkm_fault *fault = container_of(notify, typeof(*fault), nrpfb);
110 gv100_fault_buffer_process(fault->buffer[0]);
111 return NVKM_NOTIFY_KEEP;
112}
113
114static void
115gv100_fault_intr_fault(struct nvkm_fault *fault)
116{
117 struct nvkm_subdev *subdev = &fault->subdev;
118 struct nvkm_device *device = subdev->device;
119 struct nvkm_fault_data info;
120 const u32 addrlo = nvkm_rd32(device, 0x100e4c);
121 const u32 addrhi = nvkm_rd32(device, 0x100e50);
122 const u32 info0 = nvkm_rd32(device, 0x100e54);
123 const u32 insthi = nvkm_rd32(device, 0x100e58);
124 const u32 info1 = nvkm_rd32(device, 0x100e5c);
125
126 info.addr = ((u64)addrhi << 32) | addrlo;
127 info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000);
128 info.time = 0;
129 info.engine = (info0 & 0x000000ff);
130 info.valid = (info1 & 0x80000000) >> 31;
131 info.gpc = (info1 & 0x1f000000) >> 24;
132 info.hub = (info1 & 0x00100000) >> 20;
133 info.access = (info1 & 0x000f0000) >> 16;
134 info.client = (info1 & 0x00007f00) >> 8;
135 info.reason = (info1 & 0x0000001f);
136
137 nvkm_fifo_fault(device->fifo, &info);
138}
139
140static void
141gv100_fault_intr(struct nvkm_fault *fault)
142{
143 struct nvkm_subdev *subdev = &fault->subdev;
144 struct nvkm_device *device = subdev->device;
145 u32 stat = nvkm_rd32(device, 0x100a20);
146
147 if (stat & 0x80000000) {
148 gv100_fault_intr_fault(fault);
149 nvkm_wr32(device, 0x100e60, 0x80000000);
150 stat &= ~0x80000000;
151 }
152
153 if (stat & 0x20000000) {
154 if (fault->buffer[0]) {
155 nvkm_event_send(&fault->event, 1, 0, NULL, 0);
156 stat &= ~0x20000000;
157 }
158 }
159
160 if (stat) {
161 nvkm_debug(subdev, "intr %08x\n", stat);
162 }
163}
164
165static void
166gv100_fault_fini(struct nvkm_fault *fault)
167{
168 nvkm_notify_put(&fault->nrpfb);
169 nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
170}
171
172static void
173gv100_fault_init(struct nvkm_fault *fault)
174{
175 nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
176 nvkm_notify_get(&fault->nrpfb);
177}
178
179static const struct nvkm_fault_func
180gv100_fault = {
181 .init = gv100_fault_init,
182 .fini = gv100_fault_fini,
183 .intr = gv100_fault_intr,
184 .buffer.nr = 2,
185 .buffer.entry_size = 32,
186 .buffer.entries = gv100_fault_buffer_entries,
187 .buffer.init = gv100_fault_buffer_init,
188 .buffer.fini = gv100_fault_buffer_fini,
189};
190
191int
192gv100_fault_new(struct nvkm_device *device, int index,
193 struct nvkm_fault **pfault)
194{
195 struct nvkm_fault *fault;
196 int ret;
197
198 ret = nvkm_fault_new_(&gv100_fault, device, index, &fault);
199 *pfault = fault;
200 if (ret)
201 return ret;
202
203 return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
204 gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
205 &fault->nrpfb);
206}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
new file mode 100644
index 000000000000..44843ecf12b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -0,0 +1,34 @@
1#ifndef __NVKM_FAULT_PRIV_H__
2#define __NVKM_FAULT_PRIV_H__
3#define nvkm_fault_buffer(p) container_of((p), struct nvkm_fault_buffer, object)
4#define nvkm_fault(p) container_of((p), struct nvkm_fault, subdev)
5#include <subdev/fault.h>
6
7#include <core/event.h>
8#include <core/object.h>
9
10struct nvkm_fault_buffer {
11 struct nvkm_object object;
12 struct nvkm_fault *fault;
13 int id;
14 int entries;
15 struct nvkm_memory *mem;
16 struct nvkm_vma *vma;
17};
18
19int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
20 int index, struct nvkm_fault **);
21
22struct nvkm_fault_func {
23 void (*init)(struct nvkm_fault *);
24 void (*fini)(struct nvkm_fault *);
25 void (*intr)(struct nvkm_fault *);
26 struct {
27 int nr;
28 u32 entry_size;
29 u32 (*entries)(struct nvkm_fault_buffer *);
30 void (*init)(struct nvkm_fault_buffer *);
31 void (*fini)(struct nvkm_fault_buffer *);
32 } buffer;
33};
34#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index b4f22cce5d43..969610951263 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -30,6 +30,7 @@ nvkm-y += nvkm/subdev/fb/gm20b.o
30nvkm-y += nvkm/subdev/fb/gp100.o 30nvkm-y += nvkm/subdev/fb/gp100.o
31nvkm-y += nvkm/subdev/fb/gp102.o 31nvkm-y += nvkm/subdev/fb/gp102.o
32nvkm-y += nvkm/subdev/fb/gp10b.o 32nvkm-y += nvkm/subdev/fb/gp10b.o
33nvkm-y += nvkm/subdev/fb/gv100.o
33 34
34nvkm-y += nvkm/subdev/fb/ram.o 35nvkm-y += nvkm/subdev/fb/ram.o
35nvkm-y += nvkm/subdev/fb/ramnv04.o 36nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index cdc4e0a2cc6b..e8dc4e913494 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -46,10 +46,10 @@ gf100_fb_oneinit(struct nvkm_fb *base)
46{ 46{
47 struct gf100_fb *fb = gf100_fb(base); 47 struct gf100_fb *fb = gf100_fb(base);
48 struct nvkm_device *device = fb->base.subdev.device; 48 struct nvkm_device *device = fb->base.subdev.device;
49 int ret, size = 0x1000; 49 int ret, size = 1 << (fb->base.page ? fb->base.page : 17);
50 50
51 size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size); 51 size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
52 size = min(size, 0x1000); 52 size = max(size, 0x1000);
53 53
54 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, 54 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
55 true, &fb->base.mmu_rd); 55 true, &fb->base.mmu_rd);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 8137e19d3292..d3b8c3367152 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -49,8 +49,6 @@ gm200_fb_init(struct nvkm_fb *base)
49 if (fb->r100c10_page) 49 if (fb->r100c10_page)
50 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); 50 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
51 51
52 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
53
54 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8); 52 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
55 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8); 53 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
56 nvkm_mask(device, 0x100cc4, 0x00060000, 54 nvkm_mask(device, 0x100cc4, 0x00060000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index 147f69b30cd8..dffe1f5e1071 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -26,7 +26,7 @@
26 26
27#include <core/memory.h> 27#include <core/memory.h>
28 28
29static void 29void
30gp100_fb_init_unkn(struct nvkm_fb *base) 30gp100_fb_init_unkn(struct nvkm_fb *base)
31{ 31{
32 struct nvkm_device *device = gf100_fb(base)->base.subdev.device; 32 struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
@@ -48,7 +48,7 @@ gp100_fb_init(struct nvkm_fb *base)
48 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8); 48 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
49 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8); 49 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
50 nvkm_mask(device, 0x100cc4, 0x00060000, 50 nvkm_mask(device, 0x100cc4, 0x00060000,
51 max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17); 51 min(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
52} 52}
53 53
54static const struct nvkm_fb_func 54static const struct nvkm_fb_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
new file mode 100644
index 000000000000..3c5e02e9794a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -0,0 +1,46 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "gf100.h"
23#include "ram.h"
24
25static int
26gv100_fb_init_page(struct nvkm_fb *fb)
27{
28 return (fb->page == 16) ? 0 : -EINVAL;
29}
30
31static const struct nvkm_fb_func
32gv100_fb = {
33 .dtor = gf100_fb_dtor,
34 .oneinit = gf100_fb_oneinit,
35 .init = gp100_fb_init,
36 .init_page = gv100_fb_init_page,
37 .init_unkn = gp100_fb_init_unkn,
38 .ram_new = gp100_ram_new,
39 .default_bigpage = 16,
40};
41
42int
43gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
44{
45 return gf100_fb_new_(&gv100_fb, device, index, pfb);
46}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 414a423e0e55..2857f31466bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -68,4 +68,6 @@ int gf100_fb_oneinit(struct nvkm_fb *);
68int gf100_fb_init_page(struct nvkm_fb *); 68int gf100_fb_init_page(struct nvkm_fb *);
69 69
70int gm200_fb_init_page(struct nvkm_fb *); 70int gm200_fb_init_page(struct nvkm_fb *);
71
72void gp100_fb_init_unkn(struct nvkm_fb *);
71#endif 73#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 12d6f4f102cb..290ff1c425a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -4,3 +4,4 @@ nvkm-y += nvkm/subdev/ltc/gk104.o
4nvkm-y += nvkm/subdev/ltc/gm107.o 4nvkm-y += nvkm/subdev/ltc/gm107.o
5nvkm-y += nvkm/subdev/ltc/gm200.o 5nvkm-y += nvkm/subdev/ltc/gm200.o
6nvkm-y += nvkm/subdev/ltc/gp100.o 6nvkm-y += nvkm/subdev/ltc/gp100.o
7nvkm-y += nvkm/subdev/ltc/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 1f185274d3e6..23242179e600 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -55,6 +55,14 @@ nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
55 return index; 55 return index;
56} 56}
57 57
58int
59nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *ltc, int index, const u32 stencil)
60{
61 ltc->zbc_stencil[index] = stencil;
62 ltc->func->zbc_clear_stencil(ltc, index, stencil);
63 return index;
64}
65
58void 66void
59nvkm_ltc_invalidate(struct nvkm_ltc *ltc) 67nvkm_ltc_invalidate(struct nvkm_ltc *ltc)
60{ 68{
@@ -92,6 +100,8 @@ nvkm_ltc_init(struct nvkm_subdev *subdev)
92 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 100 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
93 ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]); 101 ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
94 ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]); 102 ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
103 if (ltc->func->zbc_clear_stencil)
104 ltc->func->zbc_clear_stencil(ltc, i, ltc->zbc_stencil[i]);
95 } 105 }
96 106
97 ltc->func->init(ltc); 107 ltc->func->init(ltc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
index e34d42108019..e923ed76d37a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -23,7 +23,7 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26static void 26void
27gp100_ltc_intr(struct nvkm_ltc *ltc) 27gp100_ltc_intr(struct nvkm_ltc *ltc)
28{ 28{
29 struct nvkm_device *device = ltc->subdev.device; 29 struct nvkm_device *device = ltc->subdev.device;
@@ -38,7 +38,7 @@ gp100_ltc_intr(struct nvkm_ltc *ltc)
38 } 38 }
39} 39}
40 40
41static int 41int
42gp100_ltc_oneinit(struct nvkm_ltc *ltc) 42gp100_ltc_oneinit(struct nvkm_ltc *ltc)
43{ 43{
44 struct nvkm_device *device = ltc->subdev.device; 44 struct nvkm_device *device = ltc->subdev.device;
@@ -48,7 +48,7 @@ gp100_ltc_oneinit(struct nvkm_ltc *ltc)
48 return 0; 48 return 0;
49} 49}
50 50
51static void 51void
52gp100_ltc_init(struct nvkm_ltc *ltc) 52gp100_ltc_init(struct nvkm_ltc *ltc)
53{ 53{
54 /*XXX: PMU LS call to setup tagram address */ 54 /*XXX: PMU LS call to setup tagram address */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
new file mode 100644
index 000000000000..601747ada655
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24void
25gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *ltc, int i, const u32 stencil)
26{
27 struct nvkm_device *device = ltc->subdev.device;
28 nvkm_mask(device, 0x17e338, 0x0000000f, i);
29 nvkm_wr32(device, 0x17e204, stencil);
30}
31
32static const struct nvkm_ltc_func
33gp102_ltc = {
34 .oneinit = gp100_ltc_oneinit,
35 .init = gp100_ltc_init,
36 .intr = gp100_ltc_intr,
37 .cbc_clear = gm107_ltc_cbc_clear,
38 .cbc_wait = gm107_ltc_cbc_wait,
39 .zbc = 16,
40 .zbc_clear_color = gm107_ltc_zbc_clear_color,
41 .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
42 .zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
43 .invalidate = gf100_ltc_invalidate,
44 .flush = gf100_ltc_flush,
45};
46
47int
48gp102_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
49{
50 return nvkm_ltc_new_(&gp102_ltc, device, index, pltc);
51}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index e71cc25cc775..9dcde43c0f3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -19,6 +19,7 @@ struct nvkm_ltc_func {
19 int zbc; 19 int zbc;
20 void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]); 20 void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]);
21 void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32); 21 void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32);
22 void (*zbc_clear_stencil)(struct nvkm_ltc *, int, const u32);
22 23
23 void (*invalidate)(struct nvkm_ltc *); 24 void (*invalidate)(struct nvkm_ltc *);
24 void (*flush)(struct nvkm_ltc *); 25 void (*flush)(struct nvkm_ltc *);
@@ -41,4 +42,8 @@ void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
41void gm107_ltc_cbc_wait(struct nvkm_ltc *); 42void gm107_ltc_cbc_wait(struct nvkm_ltc *);
42void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]); 43void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
43void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); 44void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
45
46int gp100_ltc_oneinit(struct nvkm_ltc *);
47void gp100_ltc_init(struct nvkm_ltc *);
48void gp100_ltc_intr(struct nvkm_ltc *);
44#endif 49#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
index 7321ad3758c3..43db245eec9a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -75,10 +75,28 @@ gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
75 spin_unlock_irqrestore(&mc->lock, flags); 75 spin_unlock_irqrestore(&mc->lock, flags);
76} 76}
77 77
78const struct nvkm_mc_map
79gp100_mc_intr[] = {
80 { 0x04000000, NVKM_ENGINE_DISP },
81 { 0x00000100, NVKM_ENGINE_FIFO },
82 { 0x00000200, NVKM_SUBDEV_FAULT },
83 { 0x40000000, NVKM_SUBDEV_IBUS },
84 { 0x10000000, NVKM_SUBDEV_BUS },
85 { 0x08000000, NVKM_SUBDEV_FB },
86 { 0x02000000, NVKM_SUBDEV_LTC },
87 { 0x01000000, NVKM_SUBDEV_PMU },
88 { 0x00200000, NVKM_SUBDEV_GPIO },
89 { 0x00200000, NVKM_SUBDEV_I2C },
90 { 0x00100000, NVKM_SUBDEV_TIMER },
91 { 0x00040000, NVKM_SUBDEV_THERM },
92 { 0x00002000, NVKM_SUBDEV_FB },
93 {},
94};
95
78static const struct nvkm_mc_func 96static const struct nvkm_mc_func
79gp100_mc = { 97gp100_mc = {
80 .init = nv50_mc_init, 98 .init = nv50_mc_init,
81 .intr = gk104_mc_intr, 99 .intr = gp100_mc_intr,
82 .intr_unarm = gp100_mc_intr_unarm, 100 .intr_unarm = gp100_mc_intr_unarm,
83 .intr_rearm = gp100_mc_intr_rearm, 101 .intr_rearm = gp100_mc_intr_rearm,
84 .intr_mask = gp100_mc_intr_mask, 102 .intr_mask = gp100_mc_intr_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
index 2283e3b74277..ff8629de97d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
@@ -34,7 +34,7 @@ gp10b_mc_init(struct nvkm_mc *mc)
34static const struct nvkm_mc_func 34static const struct nvkm_mc_func
35gp10b_mc = { 35gp10b_mc = {
36 .init = gp10b_mc_init, 36 .init = gp10b_mc_init,
37 .intr = gk104_mc_intr, 37 .intr = gp100_mc_intr,
38 .intr_unarm = gp100_mc_intr_unarm, 38 .intr_unarm = gp100_mc_intr_unarm,
39 .intr_rearm = gp100_mc_intr_rearm, 39 .intr_rearm = gp100_mc_intr_rearm,
40 .intr_mask = gp100_mc_intr_mask, 40 .intr_mask = gp100_mc_intr_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 8869d79c2b59..d9e3691d45b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -57,4 +57,6 @@ int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int,
57 57
58extern const struct nvkm_mc_map gk104_mc_intr[]; 58extern const struct nvkm_mc_map gk104_mc_intr[];
59extern const struct nvkm_mc_map gk104_mc_reset[]; 59extern const struct nvkm_mc_map gk104_mc_reset[];
60
61extern const struct nvkm_mc_map gp100_mc_intr[];
60#endif 62#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 67ee983bb026..58a24e3a0598 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -12,6 +12,7 @@ nvkm-y += nvkm/subdev/mmu/gm200.o
12nvkm-y += nvkm/subdev/mmu/gm20b.o 12nvkm-y += nvkm/subdev/mmu/gm20b.o
13nvkm-y += nvkm/subdev/mmu/gp100.o 13nvkm-y += nvkm/subdev/mmu/gp100.o
14nvkm-y += nvkm/subdev/mmu/gp10b.o 14nvkm-y += nvkm/subdev/mmu/gp10b.o
15nvkm-y += nvkm/subdev/mmu/gv100.o
15 16
16nvkm-y += nvkm/subdev/mmu/mem.o 17nvkm-y += nvkm/subdev/mmu/mem.o
17nvkm-y += nvkm/subdev/mmu/memnv04.o 18nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -31,6 +32,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgm200.o
31nvkm-y += nvkm/subdev/mmu/vmmgm20b.o 32nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
32nvkm-y += nvkm/subdev/mmu/vmmgp100.o 33nvkm-y += nvkm/subdev/mmu/vmmgp100.o
33nvkm-y += nvkm/subdev/mmu/vmmgp10b.o 34nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
35nvkm-y += nvkm/subdev/mmu/vmmgv100.o
34 36
35nvkm-y += nvkm/subdev/mmu/umem.o 37nvkm-y += nvkm/subdev/mmu/umem.o
36nvkm-y += nvkm/subdev/mmu/ummu.o 38nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
new file mode 100644
index 000000000000..f666cb57f69e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "mem.h"
23#include "vmm.h"
24
25#include <core/option.h>
26
27#include <nvif/class.h>
28
29static const struct nvkm_mmu_func
30gv100_mmu = {
31 .dma_bits = 47,
32 .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
33 .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
34 .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
35 .kind = gm200_mmu_kind,
36 .kind_sys = true,
37};
38
39int
40gv100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
41{
42 return nvkm_mmu_new_(&gv100_mmu, device, index, pmmu);
43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index da06e64d8a7d..1a3b0a3724ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -236,6 +236,9 @@ int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
236int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, 236int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
237 struct lock_class_key *, const char *, 237 struct lock_class_key *, const char *,
238 struct nvkm_vmm **); 238 struct nvkm_vmm **);
239int gv100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
240 struct lock_class_key *, const char *,
241 struct nvkm_vmm **);
239 242
240#define VMM_PRINT(l,v,p,f,a...) do { \ 243#define VMM_PRINT(l,v,p,f,a...) do { \
241 struct nvkm_vmm *_vmm = (v); \ 244 struct nvkm_vmm *_vmm = (v); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
new file mode 100644
index 000000000000..2fa40c16e6d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "vmm.h"
23
24#include <subdev/fb.h>
25#include <subdev/ltc.h>
26
27#include <nvif/ifc00d.h>
28#include <nvif/unpack.h>
29
30int
31gv100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
32{
33 u64 data[2], mask;
34 int ret = gp100_vmm_join(vmm, inst), i;
35 if (ret)
36 return ret;
37
38 nvkm_kmap(inst);
39 data[0] = nvkm_ro32(inst, 0x200);
40 data[1] = nvkm_ro32(inst, 0x204);
41 mask = BIT_ULL(0);
42
43 nvkm_wo32(inst, 0x21c, 0x00000000);
44
45 for (i = 0; i < 64; i++) {
46 if (mask & BIT_ULL(i)) {
47 nvkm_wo32(inst, 0x2a4 + (i * 0x10), data[1]);
48 nvkm_wo32(inst, 0x2a0 + (i * 0x10), data[0]);
49 } else {
50 nvkm_wo32(inst, 0x2a4 + (i * 0x10), 0x00000001);
51 nvkm_wo32(inst, 0x2a0 + (i * 0x10), 0x00000001);
52 }
53 nvkm_wo32(inst, 0x2a8 + (i * 0x10), 0x00000000);
54 }
55
56 nvkm_wo32(inst, 0x298, lower_32_bits(mask));
57 nvkm_wo32(inst, 0x29c, upper_32_bits(mask));
58 nvkm_done(inst);
59 return 0;
60}
61
62static const struct nvkm_vmm_func
63gv100_vmm = {
64 .join = gv100_vmm_join,
65 .part = gf100_vmm_part,
66 .aper = gf100_vmm_aper,
67 .valid = gp100_vmm_valid,
68 .flush = gp100_vmm_flush,
69 .page = {
70 { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
71 { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
72 { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
73 { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
74 { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
75 { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
76 {}
77 }
78};
79
80int
81gv100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
82 struct lock_class_key *key, const char *name,
83 struct nvkm_vmm **pvmm)
84{
85 return nv04_vmm_new_(&gv100_vmm, mmu, 0, addr, size,
86 argv, argc, key, name, pvmm);
87}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
index e8c27ec700de..737a8d50a1f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
@@ -65,3 +65,24 @@ MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
65MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); 65MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
66MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); 66MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
67MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); 67MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
68
69MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
70MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
71MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
72MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
73MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
74MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
75MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
76MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
77MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
78MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
79MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
80MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
81MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
82MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
83MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
84MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
85MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
86MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
87MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
88MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index 6f10b098676c..1e1f1c635cab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -80,12 +80,11 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
80 struct nvkm_falcon *falcon, u32 addr_args) 80 struct nvkm_falcon *falcon, u32 addr_args)
81{ 81{
82 struct nvkm_device *device = falcon->owner->device; 82 struct nvkm_device *device = falcon->owner->device;
83 u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE; 83 u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE];
84 u8 buf[cmdline_size];
85 84
86 memset(buf, 0, cmdline_size); 85 memset(buf, 0, sizeof(buf));
87 nvkm_msgqueue_write_cmdline(queue, buf); 86 nvkm_msgqueue_write_cmdline(queue, buf);
88 nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0); 87 nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0);
89 /* rearm the queue so it will wait for the init message */ 88 /* rearm the queue so it will wait for the init message */
90 nvkm_msgqueue_reinit(queue); 89 nvkm_msgqueue_reinit(queue);
91 90
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index fea4957291da..4f1f3e890650 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -48,7 +48,8 @@ gk104_top_oneinit(struct nvkm_top *top)
48 case 0x00000001: /* DATA */ 48 case 0x00000001: /* DATA */
49 inst = (data & 0x3c000000) >> 26; 49 inst = (data & 0x3c000000) >> 26;
50 info->addr = (data & 0x00fff000); 50 info->addr = (data & 0x00fff000);
51 info->fault = (data & 0x000000f8) >> 3; 51 if (data & 0x00000004)
52 info->fault = (data & 0x000003f8) >> 3;
52 break; 53 break;
53 case 0x00000002: /* ENUM */ 54 case 0x00000002: /* ENUM */
54 if (data & 0x00000020) 55 if (data & 0x00000020)