aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2012-07-19 18:17:34 -0400
committerBen Skeggs <bskeggs@redhat.com>2012-10-02 23:12:56 -0400
commitebb945a94bba2ce8dff7b0942ff2b3f2a52a0a69 (patch)
tree07cad59be501458e6ae1304b7c0352e322ac3387 /drivers/gpu/drm
parentac1499d9573f4aadd1d2beac11fe23af8ce90c24 (diff)
drm/nouveau: port all engines to new engine module format
This is a HUGE commit, but it's not nearly as bad as it looks - any problems can be isolated to a particular chipset and engine combination. It was simply too difficult to port each one at a time, the compat layers are *already* ridiculous. Most of the changes here are simply to the glue, the process for each of the engine modules was to start with a standard skeleton and copy+paste the old code into the appropriate places, fixing up variable names etc as needed. v2: Marcin Slusarz <marcin.slusarz@gmail.com> - fix find/replace bug in license header v3: Ben Skeggs <bskeggs@redhat.com> - bump indirect pushbuf size to 8KiB, 4KiB barely enough for userspace and left no space for kernel's requirements during GEM pushbuf submission. - fix duplicate assignments noticed by clang v4: Marcin Slusarz <marcin.slusarz@gmail.com> - add sparse annotations to nv04_fifo_pause/nv04_fifo_start - use ioread32_native/iowrite32_native for fifo control registers v5: Ben Skeggs <bskeggs@redhat.com> - rebase on v3.6-rc4, modified to keep copy engine fix intact - nv10/fence: unmap fence bo before destroying - fixed fermi regression when using nvidia gr fuc - fixed typo in supported dma_mask checking Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/nouveau/Makefile99
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c297
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c268
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c350
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c252
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c255
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c125
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c176
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c165
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c736
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h178
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c228
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c279
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c394
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c554
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c490
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c604
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c594
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctx.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c559
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c202
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1325
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1053
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c1072
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c134
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c681
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c907
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c1074
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c843
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h269
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c448
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c305
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c180
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ramht.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h123
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h72
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h61
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c421
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c347
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c387
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c408
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_compat.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_compat.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c195
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c211
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c86
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h407
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c225
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c170
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c518
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c140
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c151
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_revcompat.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_revcompat.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c391
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c163
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h35
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c139
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c63
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c55
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c174
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c75
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c181
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c11
146 files changed, 14219 insertions, 11099 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1855699a1ef1..d8f3ad4285d2 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -4,9 +4,11 @@
4 4
5ccflags-y := -Iinclude/drm -DCONFIG_NOUVEAU_DEBUG=7 -DCONFIG_NOUVEAU_DEBUG_DEFAULT=3 5ccflags-y := -Iinclude/drm -DCONFIG_NOUVEAU_DEBUG=7 -DCONFIG_NOUVEAU_DEBUG_DEFAULT=3
6ccflags-y += -I$(src)/core/include 6ccflags-y += -I$(src)/core/include
7ccflags-y += -I$(src)/core
7ccflags-y += -I$(src) 8ccflags-y += -I$(src)
8 9
9nouveau-y := core/core/client.o 10nouveau-y := core/core/client.o
11nouveau-y += core/core/engctx.o
10nouveau-y += core/core/engine.o 12nouveau-y += core/core/engine.o
11nouveau-y += core/core/enum.o 13nouveau-y += core/core/enum.o
12nouveau-y += core/core/gpuobj.o 14nouveau-y += core/core/gpuobj.o
@@ -90,12 +92,20 @@ nouveau-y += core/subdev/vm/nv44.o
90nouveau-y += core/subdev/vm/nv50.o 92nouveau-y += core/subdev/vm/nv50.o
91nouveau-y += core/subdev/vm/nvc0.o 93nouveau-y += core/subdev/vm/nvc0.o
92 94
95nouveau-y += core/engine/dmaobj/base.o
96nouveau-y += core/engine/dmaobj/nv04.o
97nouveau-y += core/engine/dmaobj/nv50.o
98nouveau-y += core/engine/dmaobj/nvc0.o
93nouveau-y += core/engine/bsp/nv84.o 99nouveau-y += core/engine/bsp/nv84.o
94nouveau-y += core/engine/copy/nva3.o 100nouveau-y += core/engine/copy/nva3.o
95nouveau-y += core/engine/copy/nvc0.o 101nouveau-y += core/engine/copy/nvc0.o
96nouveau-y += core/engine/crypt/nv84.o 102nouveau-y += core/engine/crypt/nv84.o
97nouveau-y += core/engine/crypt/nv98.o 103nouveau-y += core/engine/crypt/nv98.o
104nouveau-y += core/engine/disp/nv04.o
105nouveau-y += core/engine/disp/nv50.o
106nouveau-y += core/engine/disp/nvd0.o
98nouveau-y += core/engine/disp/vga.o 107nouveau-y += core/engine/disp/vga.o
108nouveau-y += core/engine/fifo/base.o
99nouveau-y += core/engine/fifo/nv04.o 109nouveau-y += core/engine/fifo/nv04.o
100nouveau-y += core/engine/fifo/nv10.o 110nouveau-y += core/engine/fifo/nv10.o
101nouveau-y += core/engine/fifo/nv17.o 111nouveau-y += core/engine/fifo/nv17.o
@@ -111,41 +121,82 @@ nouveau-y += core/engine/graph/ctxnve0.o
111nouveau-y += core/engine/graph/nv04.o 121nouveau-y += core/engine/graph/nv04.o
112nouveau-y += core/engine/graph/nv10.o 122nouveau-y += core/engine/graph/nv10.o
113nouveau-y += core/engine/graph/nv20.o 123nouveau-y += core/engine/graph/nv20.o
124nouveau-y += core/engine/graph/nv25.o
125nouveau-y += core/engine/graph/nv2a.o
126nouveau-y += core/engine/graph/nv30.o
127nouveau-y += core/engine/graph/nv34.o
128nouveau-y += core/engine/graph/nv35.o
114nouveau-y += core/engine/graph/nv40.o 129nouveau-y += core/engine/graph/nv40.o
115nouveau-y += core/engine/graph/nv50.o 130nouveau-y += core/engine/graph/nv50.o
116nouveau-y += core/engine/graph/nvc0.o 131nouveau-y += core/engine/graph/nvc0.o
117nouveau-y += core/engine/graph/nve0.o 132nouveau-y += core/engine/graph/nve0.o
118nouveau-y += core/engine/mpeg/nv31.o 133nouveau-y += core/engine/mpeg/nv31.o
134nouveau-y += core/engine/mpeg/nv40.o
119nouveau-y += core/engine/mpeg/nv50.o 135nouveau-y += core/engine/mpeg/nv50.o
136nouveau-y += core/engine/mpeg/nv84.o
120nouveau-y += core/engine/ppp/nv98.o 137nouveau-y += core/engine/ppp/nv98.o
138nouveau-y += core/engine/software/nv04.o
139nouveau-y += core/engine/software/nv10.o
140nouveau-y += core/engine/software/nv50.o
141nouveau-y += core/engine/software/nvc0.o
121nouveau-y += core/engine/vp/nv84.o 142nouveau-y += core/engine/vp/nv84.o
122 143
123nouveau-y += nouveau_drm.o nouveau_compat.o \ 144# drm/compat - will go away
124 nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ 145nouveau-y += nouveau_compat.o nouveau_revcompat.o
125 nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \ 146
126 nouveau_sgdma.o nouveau_dma.o nouveau_util.o \ 147# drm/core
127 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ 148nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
128 nouveau_hw.o nouveau_calc.o \ 149nouveau-y += nouveau_agp.o
129 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ 150nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
130 nouveau_hdmi.o nouveau_dp.o \ 151
131 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ 152nouveau-y += nouveau_abi16.o
132 nouveau_mxm.o nouveau_agp.o \ 153nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
133 nouveau_abi16.o \ 154
134 nouveau_bios.o \ 155# drm/kms/common
135 nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o \ 156nouveau-y += nouveau_fbcon.o
136 nv04_software.o nv50_software.o nvc0_software.o \ 157
137 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 158# drm/kms/nv04:nv50
138 nv04_crtc.o nv04_display.o nv04_cursor.o \ 159nouveau-y += nv04_fbcon.o
139 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ 160
140 nv50_cursor.o nv50_display.o \ 161# drm/kms/nv50:nvd9
141 nvd0_display.o \ 162nouveau-y += nv50_fbcon.o nvc0_fbcon.o
142 nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \ 163
143 nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \ 164# drm/kms/nvd9-
144 nouveau_prime.o 165
145 166##
146nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 167## unported bits below
168##
169
170# drm/core
171nouveau-y += nouveau_drv.o nouveau_state.o nouveau_irq.o
172nouveau-y += nouveau_prime.o
173
174# drm/kms/bios
175nouveau-y += nouveau_mxm.o nouveau_bios.o
176
177# drm/kms/common
178nouveau-y += nouveau_display.o nouveau_connector.o
179nouveau-y += nouveau_hdmi.o nouveau_dp.o
180
181# drm/kms/nv04:nv50
182nouveau-y += nouveau_hw.o nouveau_calc.o
183nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
184nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
185
186# drm/kms/nv50-
187nouveau-y += nv50_display.o nvd0_display.o
188nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
189nouveau-y += nv50_evo.o
190
191# drm/pm
192nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o
193nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
194nouveau-y += nouveau_mem.o
195
196# optional stuff
147nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 197nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
148nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o 198nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
149nouveau-$(CONFIG_ACPI) += nouveau_acpi.o 199nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
150 200
201
151obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o 202obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
index 5c22864fbd2c..86a64045dd60 100644
--- a/drivers/gpu/drm/nouveau/core/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,289 +18,92 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */ 21 */
24 22
25#include "drmP.h" 23#include <core/object.h>
26
27#include "nouveau_drv.h"
28#include <core/ramht.h> 24#include <core/ramht.h>
25#include <core/math.h>
26
27#include <subdev/bar.h>
29 28
30static u32 29static u32
31nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle) 30nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
32{ 31{
33 struct drm_device *dev = chan->dev;
34 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_ramht *ramht = chan->ramht;
36 u32 hash = 0; 32 u32 hash = 0;
37 int i;
38 33
39 NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle); 34 while (handle) {
40
41 for (i = 32; i > 0; i -= ramht->bits) {
42 hash ^= (handle & ((1 << ramht->bits) - 1)); 35 hash ^= (handle & ((1 << ramht->bits) - 1));
43 handle >>= ramht->bits; 36 handle >>= ramht->bits;
44 } 37 }
45 38
46 if (dev_priv->card_type < NV_50) 39 hash ^= chid << (ramht->bits - 4);
47 hash ^= chan->id << (ramht->bits - 4); 40 hash = hash << 3;
48 hash <<= 3;
49
50 NV_DEBUG(dev, "hash=0x%08x\n", hash);
51 return hash; 41 return hash;
52} 42}
53 43
54static int
55nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
56 u32 offset)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 u32 ctx = nv_ro32(ramht, offset + 4);
60
61 if (dev_priv->card_type < NV_40)
62 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
63 return (ctx != 0);
64}
65
66static int
67nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
68 struct nouveau_gpuobj *ramht, u32 offset)
69{
70 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
71 u32 ctx = nv_ro32(ramht, offset + 4);
72
73 if (dev_priv->card_type >= NV_50)
74 return true;
75 else if (dev_priv->card_type >= NV_40)
76 return chan->id ==
77 ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
78 else
79 return chan->id ==
80 ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
81}
82
83int 44int
84nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, 45nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
85 struct nouveau_gpuobj *gpuobj) 46 u32 handle, u32 context)
86{ 47{
87 struct drm_device *dev = chan->dev; 48 struct nouveau_bar *bar = nouveau_bar(ramht);
88 struct drm_nouveau_private *dev_priv = dev->dev_private; 49 u32 co, ho;
89 struct nouveau_ramht_entry *entry;
90 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
91 unsigned long flags;
92 u32 ctx, co, ho;
93
94 if (nouveau_ramht_find(chan, handle))
95 return -EEXIST;
96
97 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
98 if (!entry)
99 return -ENOMEM;
100 entry->channel = chan;
101 entry->gpuobj = NULL;
102 entry->handle = handle;
103 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
104
105 if (dev_priv->card_type < NV_40) {
106 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->addr >> 4) |
107 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
108 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
109 } else
110 if (dev_priv->card_type < NV_50) {
111 ctx = (gpuobj->addr >> 4) |
112 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
113 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
114 } else {
115 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
116 ctx = (gpuobj->node->offset << 10) |
117 (chan->id << 28) |
118 chan->id; /* HASH_TAG */
119 } else {
120 ctx = (gpuobj->node->offset >> 4) |
121 ((gpuobj->engine <<
122 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
123 }
124 }
125
126 spin_lock_irqsave(&chan->ramht->lock, flags);
127 list_add(&entry->head, &chan->ramht->entries);
128 50
129 co = ho = nouveau_ramht_hash_handle(chan, handle); 51 co = ho = nouveau_ramht_hash(ramht, chid, handle);
130 do { 52 do {
131 if (!nouveau_ramht_entry_valid(dev, ramht, co)) { 53 if (!nv_ro32(ramht, co + 4)) {
132 NV_DEBUG(dev,
133 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
134 chan->id, co, handle, ctx);
135 nv_wo32(ramht, co + 0, handle); 54 nv_wo32(ramht, co + 0, handle);
136 nv_wo32(ramht, co + 4, ctx); 55 nv_wo32(ramht, co + 4, context);
137 56 if (bar)
138 spin_unlock_irqrestore(&chan->ramht->lock, flags); 57 bar->flush(bar);
139 nvimem_flush(dev); 58 return co;
140 return 0;
141 } 59 }
142 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
143 chan->id, co, nv_ro32(ramht, co));
144 60
145 co += 8; 61 co += 8;
146 if (co >= ramht->size) 62 if (co >= nv_gpuobj(ramht)->size)
147 co = 0; 63 co = 0;
148 } while (co != ho); 64 } while (co != ho);
149 65
150 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
151 list_del(&entry->head);
152 spin_unlock_irqrestore(&chan->ramht->lock, flags);
153 kfree(entry);
154 return -ENOMEM; 66 return -ENOMEM;
155} 67}
156 68
157static struct nouveau_ramht_entry * 69void
158nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle) 70nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
159{
160 struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
161 struct nouveau_ramht_entry *entry;
162 unsigned long flags;
163
164 if (!ramht)
165 return NULL;
166
167 spin_lock_irqsave(&ramht->lock, flags);
168 list_for_each_entry(entry, &ramht->entries, head) {
169 if (entry->channel == chan &&
170 (!handle || entry->handle == handle)) {
171 list_del(&entry->head);
172 spin_unlock_irqrestore(&ramht->lock, flags);
173
174 return entry;
175 }
176 }
177 spin_unlock_irqrestore(&ramht->lock, flags);
178
179 return NULL;
180}
181
182static void
183nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
184{
185 struct drm_device *dev = chan->dev;
186 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
187 unsigned long flags;
188 u32 co, ho;
189
190 spin_lock_irqsave(&chan->ramht->lock, flags);
191 co = ho = nouveau_ramht_hash_handle(chan, handle);
192 do {
193 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
194 nouveau_ramht_entry_same_channel(chan, ramht, co) &&
195 (handle == nv_ro32(ramht, co))) {
196 NV_DEBUG(dev,
197 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
198 chan->id, co, handle, nv_ro32(ramht, co + 4));
199 nv_wo32(ramht, co + 0, 0x00000000);
200 nv_wo32(ramht, co + 4, 0x00000000);
201 nvimem_flush(dev);
202 goto out;
203 }
204
205 co += 8;
206 if (co >= ramht->size)
207 co = 0;
208 } while (co != ho);
209
210 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
211 chan->id, handle);
212out:
213 spin_unlock_irqrestore(&chan->ramht->lock, flags);
214}
215
216int
217nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
218{ 71{
219 struct nouveau_ramht_entry *entry; 72 struct nouveau_bar *bar = nouveau_bar(ramht);
220 73 nv_wo32(ramht, cookie + 0, 0x00000000);
221 entry = nouveau_ramht_remove_entry(chan, handle); 74 nv_wo32(ramht, cookie + 4, 0x00000000);
222 if (!entry) 75 if (bar)
223 return -ENOENT; 76 bar->flush(bar);
224
225 nouveau_ramht_remove_hash(chan, entry->handle);
226 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
227 kfree(entry);
228 return 0;
229} 77}
230 78
231struct nouveau_gpuobj * 79static struct nouveau_oclass
232nouveau_ramht_find(struct nouveau_channel *chan, u32 handle) 80nouveau_ramht_oclass = {
233{ 81 .handle = 0x0000abcd,
234 struct nouveau_ramht *ramht = chan->ramht; 82 .ofuncs = &(struct nouveau_ofuncs) {
235 struct nouveau_ramht_entry *entry; 83 .ctor = NULL,
236 struct nouveau_gpuobj *gpuobj = NULL; 84 .dtor = _nouveau_gpuobj_dtor,
237 unsigned long flags; 85 .init = _nouveau_gpuobj_init,
238 86 .fini = _nouveau_gpuobj_fini,
239 if (unlikely(!chan->ramht)) 87 .rd32 = _nouveau_gpuobj_rd32,
240 return NULL; 88 .wr32 = _nouveau_gpuobj_wr32,
241 89 },
242 spin_lock_irqsave(&ramht->lock, flags); 90};
243 list_for_each_entry(entry, &chan->ramht->entries, head) {
244 if (entry->channel == chan && entry->handle == handle) {
245 gpuobj = entry->gpuobj;
246 break;
247 }
248 }
249 spin_unlock_irqrestore(&ramht->lock, flags);
250
251 return gpuobj;
252}
253 91
254int 92int
255nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, 93nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
256 struct nouveau_ramht **pramht) 94 u32 size, u32 align, struct nouveau_ramht **pramht)
257{ 95{
258 struct nouveau_ramht *ramht; 96 struct nouveau_ramht *ramht;
97 int ret;
259 98
260 ramht = kzalloc(sizeof(*ramht), GFP_KERNEL); 99 ret = nouveau_gpuobj_create(parent, parent->engine ?
261 if (!ramht) 100 parent->engine : parent, /* <nv50 ramht */
262 return -ENOMEM; 101 &nouveau_ramht_oclass, 0, pargpu, size,
263 102 align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
264 ramht->dev = dev;
265 kref_init(&ramht->refcount);
266 ramht->bits = drm_order(gpuobj->size / 8);
267 INIT_LIST_HEAD(&ramht->entries);
268 spin_lock_init(&ramht->lock);
269 nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
270
271 *pramht = ramht; 103 *pramht = ramht;
272 return 0; 104 if (ret)
273} 105 return ret;
274 106
275static void 107 ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
276nouveau_ramht_del(struct kref *ref) 108 return 0;
277{
278 struct nouveau_ramht *ramht =
279 container_of(ref, struct nouveau_ramht, refcount);
280
281 nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
282 kfree(ramht);
283}
284
285void
286nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
287 struct nouveau_channel *chan)
288{
289 struct nouveau_ramht_entry *entry;
290 struct nouveau_ramht *ramht;
291
292 if (ref)
293 kref_get(&ref->refcount);
294
295 ramht = *ptr;
296 if (ramht) {
297 while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
298 nouveau_ramht_remove_hash(chan, entry->handle);
299 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
300 kfree(entry);
301 }
302
303 kref_put(&ramht->refcount, nouveau_ramht_del);
304 }
305 *ptr = ref;
306} 109}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 4b809319e831..66f7dfd907ee 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,61 +22,154 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27#include "nouveau_util.h" 27#include <core/engctx.h>
28#include <core/ramht.h>
29 28
30/*XXX: This stub is currently used on NV98+ also, as soon as this becomes 29#include <engine/bsp.h>
31 * more than just an enable/disable stub this needs to be split out to 30
32 * nv98_bsp.c... 31struct nv84_bsp_priv {
33 */ 32 struct nouveau_bsp base;
33};
34 34
35struct nv84_bsp_engine { 35struct nv84_bsp_chan {
36 struct nouveau_exec_engine base; 36 struct nouveau_bsp_chan base;
37}; 37};
38 38
39/*******************************************************************************
40 * BSP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv84_bsp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * BSP context
50 ******************************************************************************/
51
39static int 52static int
40nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend) 53nv84_bsp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
41{ 57{
42 if (!(nv_rd32(dev, 0x000200) & 0x00008000)) 58 struct nv84_bsp_chan *priv;
43 return 0; 59 int ret;
60
61 ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
44 66
45 nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
46 return 0; 67 return 0;
47} 68}
48 69
70static void
71nv84_bsp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_bsp_chan *priv = (void *)object;
74 nouveau_bsp_context_destroy(&priv->base);
75}
76
49static int 77static int
50nv84_bsp_init(struct drm_device *dev, int engine) 78nv84_bsp_context_init(struct nouveau_object *object)
51{ 79{
52 nv_mask(dev, 0x000200, 0x00008000, 0x00000000); 80 struct nv84_bsp_chan *priv = (void *)object;
53 nv_mask(dev, 0x000200, 0x00008000, 0x00008000); 81 int ret;
82
83 ret = nouveau_bsp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
54 return 0; 87 return 0;
55} 88}
56 89
90static int
91nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_bsp_chan *priv = (void *)object;
94 return nouveau_bsp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv84_bsp_cclass = {
99 .handle = NV_ENGCTX(BSP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_bsp_context_ctor,
102 .dtor = nv84_bsp_context_dtor,
103 .init = nv84_bsp_context_init,
104 .fini = nv84_bsp_context_fini,
105 .rd32 = _nouveau_bsp_context_rd32,
106 .wr32 = _nouveau_bsp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * BSP engine/subdev functions
112 ******************************************************************************/
113
57static void 114static void
58nv84_bsp_destroy(struct drm_device *dev, int engine) 115nv84_bsp_intr(struct nouveau_subdev *subdev)
59{ 116{
60 struct nv84_bsp_engine *pbsp = nv_engine(dev, engine); 117}
61 118
62 NVOBJ_ENGINE_DEL(dev, BSP); 119static int
120nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv84_bsp_priv *priv;
125 int ret;
126
127 ret = nouveau_bsp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
63 131
64 kfree(pbsp); 132 nv_subdev(priv)->unit = 0x04008000;
133 nv_subdev(priv)->intr = nv84_bsp_intr;
134 nv_engine(priv)->cclass = &nv84_bsp_cclass;
135 nv_engine(priv)->sclass = nv84_bsp_sclass;
136 return 0;
65} 137}
66 138
67int 139static void
68nv84_bsp_create(struct drm_device *dev) 140nv84_bsp_dtor(struct nouveau_object *object)
69{ 141{
70 struct nv84_bsp_engine *pbsp; 142 struct nv84_bsp_priv *priv = (void *)object;
143 nouveau_bsp_destroy(&priv->base);
144}
71 145
72 pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL); 146static int
73 if (!pbsp) 147nv84_bsp_init(struct nouveau_object *object)
74 return -ENOMEM; 148{
149 struct nv84_bsp_priv *priv = (void *)object;
150 int ret;
75 151
76 pbsp->base.destroy = nv84_bsp_destroy; 152 ret = nouveau_bsp_init(&priv->base);
77 pbsp->base.init = nv84_bsp_init; 153 if (ret)
78 pbsp->base.fini = nv84_bsp_fini; 154 return ret;
79 155
80 NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
81 return 0; 156 return 0;
82} 157}
158
159static int
160nv84_bsp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_bsp_priv *priv = (void *)object;
163 return nouveau_bsp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv84_bsp_oclass = {
168 .handle = NV_ENGINE(BSP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_bsp_ctor,
171 .dtor = nv84_bsp_dtor,
172 .init = nv84_bsp_init,
173 .fini = nv84_bsp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 9150c5ed16c3..debb82830b66 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,112 +22,75 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <linux/firmware.h> 25#include <core/os.h>
26#include "drmP.h" 26#include <core/enum.h>
27#include "nouveau_drv.h" 27#include <core/class.h>
28#include "nouveau_util.h" 28#include <core/engctx.h>
29#include <core/ramht.h>
30#include "fuc/nva3.fuc.h"
31
32struct nva3_copy_engine {
33 struct nouveau_exec_engine base;
34};
35
36static int
37nva3_copy_context_new(struct nouveau_channel *chan, int engine)
38{
39 struct drm_device *dev = chan->dev;
40 struct nouveau_gpuobj *ramin = chan->ramin;
41 struct nouveau_gpuobj *ctx = NULL;
42 int ret;
43
44 NV_DEBUG(dev, "ch%d\n", chan->id);
45 29
46 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | 30#include <subdev/fb.h>
47 NVOBJ_FLAG_ZERO_FREE, &ctx); 31#include <subdev/vm.h>
48 if (ret)
49 return ret;
50 32
51 nv_wo32(ramin, 0xc0, 0x00190000); 33#include <engine/copy.h>
52 nv_wo32(ramin, 0xc4, ctx->addr + ctx->size - 1);
53 nv_wo32(ramin, 0xc8, ctx->addr);
54 nv_wo32(ramin, 0xcc, 0x00000000);
55 nv_wo32(ramin, 0xd0, 0x00000000);
56 nv_wo32(ramin, 0xd4, 0x00000000);
57 nvimem_flush(dev);
58 34
59 nvvm_engref(chan->vm, engine, 1); 35#include "fuc/nva3.fuc.h"
60 chan->engctx[engine] = ctx;
61 return 0;
62}
63
64static int
65nva3_copy_object_new(struct nouveau_channel *chan, int engine,
66 u32 handle, u16 class)
67{
68 struct nouveau_gpuobj *ctx = chan->engctx[engine];
69 36
70 /* fuc engine doesn't need an object, our ramht code does.. */ 37struct nva3_copy_priv {
71 ctx->engine = 3; 38 struct nouveau_copy base;
72 ctx->class = class; 39};
73 return nouveau_ramht_insert(chan, handle, ctx);
74}
75 40
76static void 41struct nva3_copy_chan {
77nva3_copy_context_del(struct nouveau_channel *chan, int engine) 42 struct nouveau_copy_chan base;
78{ 43};
79 struct nouveau_gpuobj *ctx = chan->engctx[engine];
80 int i;
81 44
82 for (i = 0xc0; i <= 0xd4; i += 4) 45/*******************************************************************************
83 nv_wo32(chan->ramin, i, 0x00000000); 46 * Copy object classes
47 ******************************************************************************/
84 48
85 nvvm_engref(chan->vm, engine, -1); 49static struct nouveau_oclass
86 nouveau_gpuobj_ref(NULL, &ctx); 50nva3_copy_sclass[] = {
87 chan->engctx[engine] = ctx; 51 { 0x85b5, &nouveau_object_ofuncs },
88} 52 {}
53};
89 54
90static void 55/*******************************************************************************
91nva3_copy_tlb_flush(struct drm_device *dev, int engine) 56 * PCOPY context
92{ 57 ******************************************************************************/
93 nv50_vm_flush_engine(dev, 0x0d);
94}
95 58
96static int 59static int
97nva3_copy_init(struct drm_device *dev, int engine) 60nva3_copy_context_ctor(struct nouveau_object *parent,
61 struct nouveau_object *engine,
62 struct nouveau_oclass *oclass, void *data, u32 size,
63 struct nouveau_object **pobject)
98{ 64{
99 int i; 65 struct nva3_copy_chan *priv;
100 66 int ret;
101 nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
102 nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
103 nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
104
105 /* upload ucode */
106 nv_wr32(dev, 0x1041c0, 0x01000000);
107 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
108 nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
109 67
110 nv_wr32(dev, 0x104180, 0x01000000); 68 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
111 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) { 69 NVOBJ_FLAG_ZERO_ALLOC, &priv);
112 if ((i & 0x3f) == 0) 70 *pobject = nv_object(priv);
113 nv_wr32(dev, 0x104188, i >> 6); 71 if (ret)
114 nv_wr32(dev, 0x104184, nva3_pcopy_code[i]); 72 return ret;
115 }
116 73
117 /* start it running */
118 nv_wr32(dev, 0x10410c, 0x00000000);
119 nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
120 nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
121 return 0; 74 return 0;
122} 75}
123 76
124static int 77static struct nouveau_oclass
125nva3_copy_fini(struct drm_device *dev, int engine, bool suspend) 78nva3_copy_cclass = {
126{ 79 .handle = NV_ENGCTX(COPY0, 0xa3),
127 nv_mask(dev, 0x104048, 0x00000003, 0x00000000); 80 .ofuncs = &(struct nouveau_ofuncs) {
128 nv_wr32(dev, 0x104014, 0xffffffff); 81 .ctor = nva3_copy_context_ctor,
129 return 0; 82 .dtor = _nouveau_copy_context_dtor,
130} 83 .init = _nouveau_copy_context_init,
84 .fini = _nouveau_copy_context_fini,
85 .rd32 = _nouveau_copy_context_rd32,
86 .wr32 = _nouveau_copy_context_wr32,
87
88 },
89};
90
91/*******************************************************************************
92 * PCOPY engine/subdev functions
93 ******************************************************************************/
131 94
132static struct nouveau_enum nva3_copy_isr_error_name[] = { 95static struct nouveau_enum nva3_copy_isr_error_name[] = {
133 { 0x0001, "ILLEGAL_MTHD" }, 96 { 0x0001, "ILLEGAL_MTHD" },
@@ -137,65 +100,114 @@ static struct nouveau_enum nva3_copy_isr_error_name[] = {
137}; 100};
138 101
139static void 102static void
140nva3_copy_isr(struct drm_device *dev) 103nva3_copy_intr(struct nouveau_subdev *subdev)
141{ 104{
142 u32 dispatch = nv_rd32(dev, 0x10401c); 105 struct nva3_copy_priv *priv = (void *)subdev;
143 u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16); 106 u32 dispatch = nv_rd32(priv, 0x10401c);
144 u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff; 107 u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
145 u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff; 108 u32 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
146 u32 addr = nv_rd32(dev, 0x104040) >> 16; 109 u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
110 u32 addr = nv_rd32(priv, 0x104040) >> 16;
147 u32 mthd = (addr & 0x07ff) << 2; 111 u32 mthd = (addr & 0x07ff) << 2;
148 u32 subc = (addr & 0x3800) >> 11; 112 u32 subc = (addr & 0x3800) >> 11;
149 u32 data = nv_rd32(dev, 0x104044); 113 u32 data = nv_rd32(priv, 0x104044);
150 int chid = nv50_graph_isr_chid(dev, inst);
151 114
152 if (stat & 0x00000040) { 115 if (stat & 0x00000040) {
153 NV_INFO(dev, "PCOPY: DISPATCH_ERROR ["); 116 nv_error(priv, "DISPATCH_ERROR [");
154 nouveau_enum_print(nva3_copy_isr_error_name, ssta); 117 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
155 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n", 118 printk("] ch 0x%08x subc %d mthd 0x%04x data 0x%08x\n",
156 chid, inst, subc, mthd, data); 119 inst, subc, mthd, data);
157 nv_wr32(dev, 0x104004, 0x00000040); 120 nv_wr32(priv, 0x104004, 0x00000040);
158 stat &= ~0x00000040; 121 stat &= ~0x00000040;
159 } 122 }
160 123
161 if (stat) { 124 if (stat) {
162 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat); 125 nv_error(priv, "unhandled intr 0x%08x\n", stat);
163 nv_wr32(dev, 0x104004, stat); 126 nv_wr32(priv, 0x104004, stat);
164 } 127 }
165 nv50_fb_vm_trap(dev, 1); 128
129 nv50_fb_trap(nouveau_fb(priv), 1);
166} 130}
167 131
168static void 132static int
169nva3_copy_destroy(struct drm_device *dev, int engine) 133nva3_copy_tlb_flush(struct nouveau_engine *engine)
170{ 134{
171 struct nva3_copy_engine *pcopy = nv_engine(dev, engine); 135 nv50_vm_flush_engine(&engine->base, 0x0d);
136 return 0;
137}
172 138
173 nouveau_irq_unregister(dev, 22); 139static int
140nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
141 struct nouveau_oclass *oclass, void *data, u32 size,
142 struct nouveau_object **pobject)
143{
144 bool enable = (nv_device(parent)->chipset != 0xaf);
145 struct nva3_copy_priv *priv;
146 int ret;
174 147
175 NVOBJ_ENGINE_DEL(dev, COPY0); 148 ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
176 kfree(pcopy); 149 *pobject = nv_object(priv);
150 if (ret)
151 return ret;
152
153 nv_subdev(priv)->unit = 0x00802000;
154 nv_subdev(priv)->intr = nva3_copy_intr;
155 nv_engine(priv)->cclass = &nva3_copy_cclass;
156 nv_engine(priv)->sclass = nva3_copy_sclass;
157 nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
158 return 0;
177} 159}
178 160
179int 161static int
180nva3_copy_create(struct drm_device *dev) 162nva3_copy_init(struct nouveau_object *object)
181{ 163{
182 struct nva3_copy_engine *pcopy; 164 struct nva3_copy_priv *priv = (void *)object;
165 int ret, i;
183 166
184 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL); 167 ret = nouveau_copy_init(&priv->base);
185 if (!pcopy) 168 if (ret)
186 return -ENOMEM; 169 return ret;
187 170
188 pcopy->base.destroy = nva3_copy_destroy; 171 /* disable all interrupts */
189 pcopy->base.init = nva3_copy_init; 172 nv_wr32(priv, 0x104014, 0xffffffff);
190 pcopy->base.fini = nva3_copy_fini;
191 pcopy->base.context_new = nva3_copy_context_new;
192 pcopy->base.context_del = nva3_copy_context_del;
193 pcopy->base.object_new = nva3_copy_object_new;
194 pcopy->base.tlb_flush = nva3_copy_tlb_flush;
195 173
196 nouveau_irq_register(dev, 22, nva3_copy_isr); 174 /* upload ucode */
175 nv_wr32(priv, 0x1041c0, 0x01000000);
176 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
177 nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
178
179 nv_wr32(priv, 0x104180, 0x01000000);
180 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
181 if ((i & 0x3f) == 0)
182 nv_wr32(priv, 0x104188, i >> 6);
183 nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
184 }
197 185
198 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base); 186 /* start it running */
199 NVOBJ_CLASS(dev, 0x85b5, COPY0); 187 nv_wr32(priv, 0x10410c, 0x00000000);
188 nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
189 nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
200 return 0; 190 return 0;
201} 191}
192
193static int
194nva3_copy_fini(struct nouveau_object *object, bool suspend)
195{
196 struct nva3_copy_priv *priv = (void *)object;
197
198 nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
199 nv_wr32(priv, 0x104014, 0xffffffff);
200
201 return nouveau_copy_fini(&priv->base, suspend);
202}
203
204struct nouveau_oclass
205nva3_copy_oclass = {
206 .handle = NV_ENGINE(COPY0, 0xa3),
207 .ofuncs = &(struct nouveau_ofuncs) {
208 .ctor = nva3_copy_ctor,
209 .dtor = _nouveau_copy_dtor,
210 .init = nva3_copy_init,
211 .fini = nva3_copy_fini,
212 },
213};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index f39de5a593d6..ecc8faac3a2a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,138 +22,86 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <linux/firmware.h> 25#include <core/os.h>
26#include "drmP.h" 26#include <core/enum.h>
27#include "nouveau_drv.h" 27#include <core/class.h>
28#include "nouveau_util.h" 28#include <core/engctx.h>
29#include <core/ramht.h> 29
30#include <engine/copy.h>
31
30#include "fuc/nvc0.fuc.h" 32#include "fuc/nvc0.fuc.h"
31 33
32struct nvc0_copy_engine { 34struct nvc0_copy_priv {
33 struct nouveau_exec_engine base; 35 struct nouveau_copy base;
34 u32 irq;
35 u32 pmc;
36 u32 fuc;
37 u32 ctx;
38}; 36};
39 37
40struct nvc0_copy_chan { 38struct nvc0_copy_chan {
41 struct nouveau_gpuobj *mem; 39 struct nouveau_copy_chan base;
42 struct nouveau_vma vma;
43}; 40};
44 41
45static int 42/*******************************************************************************
46nvc0_copy_context_new(struct nouveau_channel *chan, int engine) 43 * Copy object classes
47{ 44 ******************************************************************************/
48 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
49 struct nvc0_copy_chan *cctx;
50 struct drm_device *dev = chan->dev;
51 struct nouveau_gpuobj *ramin = chan->ramin;
52 int ret;
53
54 cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
55 if (!cctx)
56 return -ENOMEM;
57 45
58 ret = nouveau_gpuobj_new(dev, NULL, 256, 256, 46static struct nouveau_oclass
59 NVOBJ_FLAG_ZERO_ALLOC, &cctx->mem); 47nvc0_copy0_sclass[] = {
60 if (ret) 48 { 0x90b5, &nouveau_object_ofuncs },
61 return ret; 49 {},
50};
62 51
63 ret = nouveau_gpuobj_map_vm(cctx->mem, chan->vm, NV_MEM_ACCESS_RW, 52static struct nouveau_oclass
64 &cctx->vma); 53nvc0_copy1_sclass[] = {
65 if (ret) 54 { 0x90b8, &nouveau_object_ofuncs },
66 return ret; 55 {},
56};
67 57
68 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(cctx->vma.offset)); 58/*******************************************************************************
69 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(cctx->vma.offset)); 59 * PCOPY context
70 nvimem_flush(dev); 60 ******************************************************************************/
71 return 0;
72}
73 61
74static int 62static int
75nvc0_copy_object_new(struct nouveau_channel *chan, int engine, 63nvc0_copy_context_ctor(struct nouveau_object *parent,
76 u32 handle, u16 class) 64 struct nouveau_object *engine,
77{ 65 struct nouveau_oclass *oclass, void *data, u32 size,
78 return 0; 66 struct nouveau_object **pobject)
79}
80
81static void
82nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
83{ 67{
84 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine); 68 struct nvc0_copy_chan *priv;
85 struct nvc0_copy_chan *cctx = chan->engctx[engine]; 69 int ret;
86 struct drm_device *dev = chan->dev;
87 u32 inst;
88
89 inst = (chan->ramin->addr >> 12);
90 inst |= 0x40000000;
91
92 /* disable fifo access */
93 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
94 /* mark channel as unloaded if it's currently active */
95 if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
96 nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
97 /* mark next channel as invalid if it's about to be loaded */
98 if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
99 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
100 /* restore fifo access */
101 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
102
103 nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
104 nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
105
106 nouveau_gpuobj_unmap(&cctx->vma);
107 nouveau_gpuobj_ref(NULL, &cctx->mem);
108
109 kfree(cctx);
110 chan->engctx[engine] = NULL;
111}
112
113static int
114nvc0_copy_init(struct drm_device *dev, int engine)
115{
116 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
117 int i;
118
119 nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
120 nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
121 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
122
123 nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
124 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
125 nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
126 70
127 nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000); 71 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
128 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) { 72 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
129 if ((i & 0x3f) == 0) 73 *pobject = nv_object(priv);
130 nv_wr32(dev, pcopy->fuc + 0x188, i >> 6); 74 if (ret)
131 nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]); 75 return ret;
132 }
133 76
134 nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
135 nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
136 nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
137 nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
138 return 0; 77 return 0;
139} 78}
140 79
141static int 80static struct nouveau_ofuncs
142nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend) 81nvc0_copy_context_ofuncs = {
143{ 82 .ctor = nvc0_copy_context_ctor,
144 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 83 .dtor = _nouveau_copy_context_dtor,
84 .init = _nouveau_copy_context_init,
85 .fini = _nouveau_copy_context_fini,
86 .rd32 = _nouveau_copy_context_rd32,
87 .wr32 = _nouveau_copy_context_wr32,
88};
145 89
146 nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000); 90static struct nouveau_oclass
91nvc0_copy0_cclass = {
92 .handle = NV_ENGCTX(COPY0, 0xc0),
93 .ofuncs = &nvc0_copy_context_ofuncs,
94};
147 95
148 /* trigger fuc context unload */ 96static struct nouveau_oclass
149 nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000); 97nvc0_copy1_cclass = {
150 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000); 98 .handle = NV_ENGCTX(COPY1, 0xc0),
151 nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008); 99 .ofuncs = &nvc0_copy_context_ofuncs,
152 nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000); 100};
153 101
154 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff); 102/*******************************************************************************
155 return 0; 103 * PCOPY engine/subdev functions
156} 104 ******************************************************************************/
157 105
158static struct nouveau_enum nvc0_copy_isr_error_name[] = { 106static struct nouveau_enum nvc0_copy_isr_error_name[] = {
159 { 0x0001, "ILLEGAL_MTHD" }, 107 { 0x0001, "ILLEGAL_MTHD" },
@@ -163,93 +111,145 @@ static struct nouveau_enum nvc0_copy_isr_error_name[] = {
163}; 111};
164 112
165static void 113static void
166nvc0_copy_isr(struct drm_device *dev, int engine) 114nvc0_copy_intr(struct nouveau_subdev *subdev)
167{ 115{
168 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 116 int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
169 u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c); 117 struct nvc0_copy_priv *priv = (void *)subdev;
170 u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16); 118 u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
171 u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12; 119 u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
172 u32 chid = nvc0_graph_isr_chid(dev, inst); 120 u32 stat = intr & disp & ~(disp >> 16);
173 u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff; 121 u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
174 u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16; 122 u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
123 u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
175 u32 mthd = (addr & 0x07ff) << 2; 124 u32 mthd = (addr & 0x07ff) << 2;
176 u32 subc = (addr & 0x3800) >> 11; 125 u32 subc = (addr & 0x3800) >> 11;
177 u32 data = nv_rd32(dev, pcopy->fuc + 0x044); 126 u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
178 127
179 if (stat & 0x00000040) { 128 if (stat & 0x00000040) {
180 NV_INFO(dev, "PCOPY: DISPATCH_ERROR ["); 129 nv_error(priv, "DISPATCH_ERROR [");
181 nouveau_enum_print(nvc0_copy_isr_error_name, ssta); 130 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
182 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", 131 printk("] ch 0x%010llx subc %d mthd 0x%04x data 0x%08x\n",
183 chid, inst, subc, mthd, data); 132 (u64)inst << 12, subc, mthd, data);
184 nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040); 133 nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
185 stat &= ~0x00000040; 134 stat &= ~0x00000040;
186 } 135 }
187 136
188 if (stat) { 137 if (stat) {
189 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat); 138 nv_error(priv, "unhandled intr 0x%08x\n", stat);
190 nv_wr32(dev, pcopy->fuc + 0x004, stat); 139 nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
191 } 140 }
192} 141}
193 142
194static void 143static int
195nvc0_copy_isr_0(struct drm_device *dev) 144nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
145 struct nouveau_oclass *oclass, void *data, u32 size,
146 struct nouveau_object **pobject)
196{ 147{
197 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0); 148 struct nvc0_copy_priv *priv;
198} 149 int ret;
199 150
200static void 151 if (nv_rd32(parent, 0x022500) & 0x00000100)
201nvc0_copy_isr_1(struct drm_device *dev) 152 return -ENODEV;
202{ 153
203 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1); 154 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
155 *pobject = nv_object(priv);
156 if (ret)
157 return ret;
158
159 nv_subdev(priv)->unit = 0x00000040;
160 nv_subdev(priv)->intr = nvc0_copy_intr;
161 nv_engine(priv)->cclass = &nvc0_copy0_cclass;
162 nv_engine(priv)->sclass = nvc0_copy0_sclass;
163 return 0;
204} 164}
205 165
206static void 166static int
207nvc0_copy_destroy(struct drm_device *dev, int engine) 167nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
168 struct nouveau_oclass *oclass, void *data, u32 size,
169 struct nouveau_object **pobject)
208{ 170{
209 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 171 struct nvc0_copy_priv *priv;
172 int ret;
210 173
211 nouveau_irq_unregister(dev, pcopy->irq); 174 if (nv_rd32(parent, 0x022500) & 0x00000200)
175 return -ENODEV;
212 176
213 if (engine == NVOBJ_ENGINE_COPY0) 177 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
214 NVOBJ_ENGINE_DEL(dev, COPY0); 178 *pobject = nv_object(priv);
215 else 179 if (ret)
216 NVOBJ_ENGINE_DEL(dev, COPY1); 180 return ret;
217 kfree(pcopy); 181
182 nv_subdev(priv)->unit = 0x00000080;
183 nv_subdev(priv)->intr = nvc0_copy_intr;
184 nv_engine(priv)->cclass = &nvc0_copy1_cclass;
185 nv_engine(priv)->sclass = nvc0_copy1_sclass;
186 return 0;
218} 187}
219 188
220int 189static int
221nvc0_copy_create(struct drm_device *dev, int engine) 190nvc0_copy_init(struct nouveau_object *object)
222{ 191{
223 struct nvc0_copy_engine *pcopy; 192 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
224 193 struct nvc0_copy_priv *priv = (void *)object;
225 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL); 194 int ret, i;
226 if (!pcopy) 195
227 return -ENOMEM; 196 ret = nouveau_copy_init(&priv->base);
228 197 if (ret)
229 pcopy->base.destroy = nvc0_copy_destroy; 198 return ret;
230 pcopy->base.init = nvc0_copy_init; 199
231 pcopy->base.fini = nvc0_copy_fini; 200 /* disable all interrupts */
232 pcopy->base.context_new = nvc0_copy_context_new; 201 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
233 pcopy->base.context_del = nvc0_copy_context_del; 202
234 pcopy->base.object_new = nvc0_copy_object_new; 203 /* upload ucode */
235 204 nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
236 if (engine == 0) { 205 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
237 pcopy->irq = 5; 206 nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
238 pcopy->pmc = 0x00000040; 207
239 pcopy->fuc = 0x104000; 208 nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
240 pcopy->ctx = 0x0230; 209 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
241 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0); 210 if ((i & 0x3f) == 0)
242 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base); 211 nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
243 NVOBJ_CLASS(dev, 0x90b5, COPY0); 212 nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
244 } else {
245 pcopy->irq = 6;
246 pcopy->pmc = 0x00000080;
247 pcopy->fuc = 0x105000;
248 pcopy->ctx = 0x0240;
249 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
250 NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
251 NVOBJ_CLASS(dev, 0x90b8, COPY1);
252 } 213 }
253 214
215 /* start it running */
216 nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
217 nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
218 nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
219 nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
254 return 0; 220 return 0;
255} 221}
222
223static int
224nvc0_copy_fini(struct nouveau_object *object, bool suspend)
225{
226 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
227 struct nvc0_copy_priv *priv = (void *)object;
228
229 nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
230 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
231
232 return nouveau_copy_fini(&priv->base, suspend);
233}
234
235struct nouveau_oclass
236nvc0_copy0_oclass = {
237 .handle = NV_ENGINE(COPY0, 0xc0),
238 .ofuncs = &(struct nouveau_ofuncs) {
239 .ctor = nvc0_copy0_ctor,
240 .dtor = _nouveau_copy_dtor,
241 .init = nvc0_copy_init,
242 .fini = nvc0_copy_fini,
243 },
244};
245
246struct nouveau_oclass
247nvc0_copy1_oclass = {
248 .handle = NV_ENGINE(COPY1, 0xc0),
249 .ofuncs = &(struct nouveau_ofuncs) {
250 .ctor = nvc0_copy1_ctor,
251 .dtor = _nouveau_copy_dtor,
252 .init = nvc0_copy_init,
253 .fini = nvc0_copy_fini,
254 },
255};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 63051ab0ecca..a0e5e39638bc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,99 +22,106 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include "nouveau_drv.h" 26#include <core/enum.h>
27#include "nouveau_util.h" 27#include <core/class.h>
28#include <core/ramht.h> 28#include <core/engctx.h>
29#include <core/gpuobj.h>
29 30
30struct nv84_crypt_engine { 31#include <subdev/fb.h>
31 struct nouveau_exec_engine base; 32
33#include <engine/crypt.h>
34
35struct nv84_crypt_priv {
36 struct nouveau_crypt base;
32}; 37};
33 38
39struct nv84_crypt_chan {
40 struct nouveau_crypt_chan base;
41};
42
43/*******************************************************************************
44 * Crypt object classes
45 ******************************************************************************/
46
34static int 47static int
35nv84_crypt_context_new(struct nouveau_channel *chan, int engine) 48nv84_crypt_object_ctor(struct nouveau_object *parent,
49 struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
36{ 52{
37 struct drm_device *dev = chan->dev; 53 struct nouveau_gpuobj *obj;
38 struct nouveau_gpuobj *ramin = chan->ramin;
39 struct nouveau_gpuobj *ctx;
40 int ret; 54 int ret;
41 55
42 NV_DEBUG(dev, "ch%d\n", chan->id); 56 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
43 57 16, 16, 0, &obj);
44 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | 58 *pobject = nv_object(obj);
45 NVOBJ_FLAG_ZERO_FREE, &ctx);
46 if (ret) 59 if (ret)
47 return ret; 60 return ret;
48 61
49 nv_wo32(ramin, 0xa0, 0x00190000); 62 nv_wo32(obj, 0x00, nv_mclass(obj));
50 nv_wo32(ramin, 0xa4, ctx->addr + ctx->size - 1); 63 nv_wo32(obj, 0x04, 0x00000000);
51 nv_wo32(ramin, 0xa8, ctx->addr); 64 nv_wo32(obj, 0x08, 0x00000000);
52 nv_wo32(ramin, 0xac, 0); 65 nv_wo32(obj, 0x0c, 0x00000000);
53 nv_wo32(ramin, 0xb0, 0);
54 nv_wo32(ramin, 0xb4, 0);
55 nvimem_flush(dev);
56
57 nvvm_engref(chan->vm, engine, 1);
58 chan->engctx[engine] = ctx;
59 return 0; 66 return 0;
60} 67}
61 68
62static void 69static struct nouveau_ofuncs
63nv84_crypt_context_del(struct nouveau_channel *chan, int engine) 70nv84_crypt_ofuncs = {
64{ 71 .ctor = nv84_crypt_object_ctor,
65 struct nouveau_gpuobj *ctx = chan->engctx[engine]; 72 .dtor = _nouveau_gpuobj_dtor,
66 struct drm_device *dev = chan->dev; 73 .init = _nouveau_gpuobj_init,
67 u32 inst; 74 .fini = _nouveau_gpuobj_fini,
68 75 .rd32 = _nouveau_gpuobj_rd32,
69 inst = (chan->ramin->addr >> 12); 76 .wr32 = _nouveau_gpuobj_wr32,
70 inst |= 0x80000000; 77};
71 78
72 /* mark context as invalid if still on the hardware, not 79static struct nouveau_oclass
73 * doing this causes issues the next time PCRYPT is used, 80nv84_crypt_sclass[] = {
74 * unsurprisingly :) 81 { 0x74c1, &nv84_crypt_ofuncs },
75 */ 82 {}
76 nv_wr32(dev, 0x10200c, 0x00000000); 83};
77 if (nv_rd32(dev, 0x102188) == inst) 84
78 nv_mask(dev, 0x102188, 0x80000000, 0x00000000); 85/*******************************************************************************
79 if (nv_rd32(dev, 0x10218c) == inst) 86 * PCRYPT context
80 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); 87 ******************************************************************************/
81 nv_wr32(dev, 0x10200c, 0x00000010);
82
83 nouveau_gpuobj_ref(NULL, &ctx);
84
85 nvvm_engref(chan->vm, engine, -1);
86 chan->engctx[engine] = NULL;
87}
88 88
89static int 89static int
90nv84_crypt_object_new(struct nouveau_channel *chan, int engine, 90nv84_crypt_context_ctor(struct nouveau_object *parent,
91 u32 handle, u16 class) 91 struct nouveau_object *engine,
92 struct nouveau_oclass *oclass, void *data, u32 size,
93 struct nouveau_object **pobject)
92{ 94{
93 struct drm_device *dev = chan->dev; 95 struct nv84_crypt_chan *priv;
94 struct nouveau_gpuobj *obj = NULL;
95 int ret; 96 int ret;
96 97
97 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 98 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
99 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
100 *pobject = nv_object(priv);
98 if (ret) 101 if (ret)
99 return ret; 102 return ret;
100 obj->engine = 5;
101 obj->class = class;
102 103
103 nv_wo32(obj, 0x00, class); 104 return 0;
104 nvimem_flush(dev);
105
106 ret = nouveau_ramht_insert(chan, handle, obj);
107 nouveau_gpuobj_ref(NULL, &obj);
108 return ret;
109} 105}
110 106
111static void 107static struct nouveau_oclass
112nv84_crypt_tlb_flush(struct drm_device *dev, int engine) 108nv84_crypt_cclass = {
113{ 109 .handle = NV_ENGCTX(CRYPT, 0x84),
114 nv50_vm_flush_engine(dev, 0x0a); 110 .ofuncs = &(struct nouveau_ofuncs) {
115} 111 .ctor = nv84_crypt_context_ctor,
112 .dtor = _nouveau_crypt_context_dtor,
113 .init = _nouveau_crypt_context_init,
114 .fini = _nouveau_crypt_context_fini,
115 .rd32 = _nouveau_crypt_context_rd32,
116 .wr32 = _nouveau_crypt_context_wr32,
117 },
118};
119
120/*******************************************************************************
121 * PCRYPT engine/subdev functions
122 ******************************************************************************/
116 123
117static struct nouveau_bitfield nv84_crypt_intr[] = { 124static struct nouveau_bitfield nv84_crypt_intr_mask[] = {
118 { 0x00000001, "INVALID_STATE" }, 125 { 0x00000001, "INVALID_STATE" },
119 { 0x00000002, "ILLEGAL_MTHD" }, 126 { 0x00000002, "ILLEGAL_MTHD" },
120 { 0x00000004, "ILLEGAL_CLASS" }, 127 { 0x00000004, "ILLEGAL_CLASS" },
@@ -124,79 +131,78 @@ static struct nouveau_bitfield nv84_crypt_intr[] = {
124}; 131};
125 132
126static void 133static void
127nv84_crypt_isr(struct drm_device *dev) 134nv84_crypt_intr(struct nouveau_subdev *subdev)
128{ 135{
129 u32 stat = nv_rd32(dev, 0x102130); 136 struct nv84_crypt_priv *priv = (void *)subdev;
130 u32 mthd = nv_rd32(dev, 0x102190); 137 u32 stat = nv_rd32(priv, 0x102130);
131 u32 data = nv_rd32(dev, 0x102194); 138 u32 mthd = nv_rd32(priv, 0x102190);
132 u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12; 139 u32 data = nv_rd32(priv, 0x102194);
133 int show = nouveau_ratelimit(); 140 u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
134 int chid = nv50_graph_isr_chid(dev, inst); 141
135 142 if (stat) {
136 if (show) { 143 nv_error(priv, "");
137 NV_INFO(dev, "PCRYPT:"); 144 nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
138 nouveau_bitfield_print(nv84_crypt_intr, stat); 145 printk(" ch 0x%010llx mthd 0x%04x data 0x%08x\n",
139 printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n", 146 (u64)inst << 12, mthd, data);
140 chid, inst, mthd, data);
141 } 147 }
142 148
143 nv_wr32(dev, 0x102130, stat); 149 nv_wr32(priv, 0x102130, stat);
144 nv_wr32(dev, 0x10200c, 0x10); 150 nv_wr32(priv, 0x10200c, 0x10);
145 151
146 nv50_fb_vm_trap(dev, show); 152 nv50_fb_trap(nouveau_fb(priv), 1);
147} 153}
148 154
149static int 155static int
150nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend) 156nv84_crypt_tlb_flush(struct nouveau_engine *engine)
151{ 157{
152 nv_wr32(dev, 0x102140, 0x00000000); 158 nv50_vm_flush_engine(&engine->base, 0x0a);
153 return 0; 159 return 0;
154} 160}
155 161
156static int 162static int
157nv84_crypt_init(struct drm_device *dev, int engine) 163nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
164 struct nouveau_oclass *oclass, void *data, u32 size,
165 struct nouveau_object **pobject)
158{ 166{
159 nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 167 struct nv84_crypt_priv *priv;
160 nv_mask(dev, 0x000200, 0x00004000, 0x00004000); 168 int ret;
161 169
162 nv_wr32(dev, 0x102130, 0xffffffff); 170 ret = nouveau_crypt_create(parent, engine, oclass, &priv);
163 nv_wr32(dev, 0x102140, 0xffffffbf); 171 *pobject = nv_object(priv);
172 if (ret)
173 return ret;
164 174
165 nv_wr32(dev, 0x10200c, 0x00000010); 175 nv_subdev(priv)->unit = 0x00004000;
176 nv_subdev(priv)->intr = nv84_crypt_intr;
177 nv_engine(priv)->cclass = &nv84_crypt_cclass;
178 nv_engine(priv)->sclass = nv84_crypt_sclass;
179 nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush;
166 return 0; 180 return 0;
167} 181}
168 182
169static void 183static int
170nv84_crypt_destroy(struct drm_device *dev, int engine) 184nv84_crypt_init(struct nouveau_object *object)
171{
172 struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
173
174 NVOBJ_ENGINE_DEL(dev, CRYPT);
175
176 nouveau_irq_unregister(dev, 14);
177 kfree(pcrypt);
178}
179
180int
181nv84_crypt_create(struct drm_device *dev)
182{ 185{
183 struct nv84_crypt_engine *pcrypt; 186 struct nv84_crypt_priv *priv = (void *)object;
184 187 int ret;
185 pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
186 if (!pcrypt)
187 return -ENOMEM;
188
189 pcrypt->base.destroy = nv84_crypt_destroy;
190 pcrypt->base.init = nv84_crypt_init;
191 pcrypt->base.fini = nv84_crypt_fini;
192 pcrypt->base.context_new = nv84_crypt_context_new;
193 pcrypt->base.context_del = nv84_crypt_context_del;
194 pcrypt->base.object_new = nv84_crypt_object_new;
195 pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
196 188
197 nouveau_irq_register(dev, 14, nv84_crypt_isr); 189 ret = nouveau_crypt_init(&priv->base);
190 if (ret)
191 return ret;
198 192
199 NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base); 193 nv_wr32(priv, 0x102130, 0xffffffff);
200 NVOBJ_CLASS (dev, 0x74c1, CRYPT); 194 nv_wr32(priv, 0x102140, 0xffffffbf);
195 nv_wr32(priv, 0x10200c, 0x00000010);
201 return 0; 196 return 0;
202} 197}
198
199struct nouveau_oclass
200nv84_crypt_oclass = {
201 .handle = NV_ENGINE(CRYPT, 0x84),
202 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv84_crypt_ctor,
204 .dtor = _nouveau_crypt_dtor,
205 .init = nv84_crypt_init,
206 .fini = _nouveau_crypt_fini,
207 },
208};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index c9adc1b8a7db..559a1b1d7082 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,124 +22,74 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
26 29
27#include "nouveau_drv.h" 30#include <subdev/timer.h>
28#include "nouveau_util.h" 31#include <subdev/fb.h>
29#include <core/ramht.h> 32
33#include <engine/crypt.h>
30 34
31#include "fuc/nv98.fuc.h" 35#include "fuc/nv98.fuc.h"
32 36
33struct nv98_crypt_priv { 37struct nv98_crypt_priv {
34 struct nouveau_exec_engine base; 38 struct nouveau_crypt base;
35}; 39};
36 40
37struct nv98_crypt_chan { 41struct nv98_crypt_chan {
38 struct nouveau_gpuobj *mem; 42 struct nouveau_crypt_chan base;
39}; 43};
40 44
41static int 45/*******************************************************************************
42nv98_crypt_context_new(struct nouveau_channel *chan, int engine) 46 * Crypt object classes
43{ 47 ******************************************************************************/
44 struct drm_device *dev = chan->dev;
45 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
46 struct nv98_crypt_chan *cctx;
47 int ret;
48
49 cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
50 if (!cctx)
51 return -ENOMEM;
52
53 nvvm_engref(chan->vm, engine, 1);
54
55 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
56 NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
57 if (ret)
58 goto error;
59
60 nv_wo32(chan->ramin, 0xa0, 0x00190000);
61 nv_wo32(chan->ramin, 0xa4, cctx->mem->addr + cctx->mem->size - 1);
62 nv_wo32(chan->ramin, 0xa8, cctx->mem->addr);
63 nv_wo32(chan->ramin, 0xac, 0x00000000);
64 nv_wo32(chan->ramin, 0xb0, 0x00000000);
65 nv_wo32(chan->ramin, 0xb4, 0x00000000);
66 nvimem_flush(dev);
67
68error:
69 if (ret)
70 priv->base.context_del(chan, engine);
71 return ret;
72}
73
74static void
75nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
76{
77 struct nv98_crypt_chan *cctx = chan->engctx[engine];
78 int i;
79
80 for (i = 0xa0; i < 0xb4; i += 4)
81 nv_wo32(chan->ramin, i, 0x00000000);
82 48
83 nouveau_gpuobj_ref(NULL, &cctx->mem); 49static struct nouveau_oclass
50nv98_crypt_sclass[] = {
51 { 0x88b4, &nouveau_object_ofuncs },
52 {},
53};
84 54
85 nvvm_engref(chan->vm, engine, -1); 55/*******************************************************************************
86 chan->engctx[engine] = NULL; 56 * PCRYPT context
87 kfree(cctx); 57 ******************************************************************************/
88}
89 58
90static int 59static int
91nv98_crypt_object_new(struct nouveau_channel *chan, int engine, 60nv98_crypt_context_ctor(struct nouveau_object *parent,
92 u32 handle, u16 class) 61 struct nouveau_object *engine,
62 struct nouveau_oclass *oclass, void *data, u32 size,
63 struct nouveau_object **pobject)
93{ 64{
94 struct nv98_crypt_chan *cctx = chan->engctx[engine]; 65 struct nv98_crypt_chan *priv;
95 66 int ret;
96 /* fuc engine doesn't need an object, our ramht code does.. */
97 cctx->mem->engine = 5;
98 cctx->mem->class = class;
99 return nouveau_ramht_insert(chan, handle, cctx->mem);
100}
101 67
102static void 68 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
103nv98_crypt_tlb_flush(struct drm_device *dev, int engine) 69 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
104{ 70 *pobject = nv_object(priv);
105 nv50_vm_flush_engine(dev, 0x0a); 71 if (ret)
106} 72 return ret;
107 73
108static int
109nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
110{
111 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
112 return 0; 74 return 0;
113} 75}
114 76
115static int 77static struct nouveau_oclass
116nv98_crypt_init(struct drm_device *dev, int engine) 78nv98_crypt_cclass = {
117{ 79 .handle = NV_ENGCTX(CRYPT, 0x98),
118 int i; 80 .ofuncs = &(struct nouveau_ofuncs) {
119 81 .ctor = nv98_crypt_context_ctor,
120 /* reset! */ 82 .dtor = _nouveau_crypt_context_dtor,
121 nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 83 .init = _nouveau_crypt_context_init,
122 nv_mask(dev, 0x000200, 0x00004000, 0x00004000); 84 .fini = _nouveau_crypt_context_fini,
123 85 .rd32 = _nouveau_crypt_context_rd32,
124 /* wait for exit interrupt to signal */ 86 .wr32 = _nouveau_crypt_context_wr32,
125 nv_wait(dev, 0x087008, 0x00000010, 0x00000010); 87 },
126 nv_wr32(dev, 0x087004, 0x00000010); 88};
127
128 /* upload microcode code and data segments */
129 nv_wr32(dev, 0x087ff8, 0x00100000);
130 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
131 nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
132
133 nv_wr32(dev, 0x087ff8, 0x00000000);
134 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
135 nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
136 89
137 /* start it running */ 90/*******************************************************************************
138 nv_wr32(dev, 0x08710c, 0x00000000); 91 * PCRYPT engine/subdev functions
139 nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */ 92 ******************************************************************************/
140 nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
141 return 0;
142}
143 93
144static struct nouveau_enum nv98_crypt_isr_error_name[] = { 94static struct nouveau_enum nv98_crypt_isr_error_name[] = {
145 { 0x0000, "ILLEGAL_MTHD" }, 95 { 0x0000, "ILLEGAL_MTHD" },
@@ -150,65 +100,100 @@ static struct nouveau_enum nv98_crypt_isr_error_name[] = {
150}; 100};
151 101
152static void 102static void
153nv98_crypt_isr(struct drm_device *dev) 103nv98_crypt_intr(struct nouveau_subdev *subdev)
154{ 104{
155 u32 disp = nv_rd32(dev, 0x08701c); 105 struct nv98_crypt_priv *priv = (void *)subdev;
156 u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16); 106 u32 disp = nv_rd32(priv, 0x08701c);
157 u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff; 107 u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
158 u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff; 108 u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
159 u32 addr = nv_rd32(dev, 0x087040) >> 16; 109 u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
110 u32 addr = nv_rd32(priv, 0x087040) >> 16;
160 u32 mthd = (addr & 0x07ff) << 2; 111 u32 mthd = (addr & 0x07ff) << 2;
161 u32 subc = (addr & 0x3800) >> 11; 112 u32 subc = (addr & 0x3800) >> 11;
162 u32 data = nv_rd32(dev, 0x087044); 113 u32 data = nv_rd32(priv, 0x087044);
163 int chid = nv50_graph_isr_chid(dev, inst);
164 114
165 if (stat & 0x00000040) { 115 if (stat & 0x00000040) {
166 NV_INFO(dev, "PCRYPT: DISPATCH_ERROR ["); 116 nv_error(priv, "DISPATCH_ERROR [");
167 nouveau_enum_print(nv98_crypt_isr_error_name, ssta); 117 nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
168 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n", 118 printk("] ch 0x%08x subc %d mthd 0x%04x data 0x%08x\n",
169 chid, inst, subc, mthd, data); 119 inst, subc, mthd, data);
170 nv_wr32(dev, 0x087004, 0x00000040); 120 nv_wr32(priv, 0x087004, 0x00000040);
171 stat &= ~0x00000040; 121 stat &= ~0x00000040;
172 } 122 }
173 123
174 if (stat) { 124 if (stat) {
175 NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat); 125 nv_error(priv, "unhandled intr 0x%08x\n", stat);
176 nv_wr32(dev, 0x087004, stat); 126 nv_wr32(priv, 0x087004, stat);
177 } 127 }
178 128
179 nv50_fb_vm_trap(dev, 1); 129 nv50_fb_trap(nouveau_fb(priv), 1);
180} 130}
181 131
182static void 132static int
183nv98_crypt_destroy(struct drm_device *dev, int engine) 133nv98_crypt_tlb_flush(struct nouveau_engine *engine)
184{ 134{
185 struct nv98_crypt_priv *priv = nv_engine(dev, engine); 135 nv50_vm_flush_engine(&engine->base, 0x0a);
186 136 return 0;
187 nouveau_irq_unregister(dev, 14);
188 NVOBJ_ENGINE_DEL(dev, CRYPT);
189 kfree(priv);
190} 137}
191 138
192int 139static int
193nv98_crypt_create(struct drm_device *dev) 140nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
141 struct nouveau_oclass *oclass, void *data, u32 size,
142 struct nouveau_object **pobject)
194{ 143{
195 struct nv98_crypt_priv *priv; 144 struct nv98_crypt_priv *priv;
145 int ret;
146
147 ret = nouveau_crypt_create(parent, engine, oclass, &priv);
148 *pobject = nv_object(priv);
149 if (ret)
150 return ret;
151
152 nv_subdev(priv)->unit = 0x00004000;
153 nv_subdev(priv)->intr = nv98_crypt_intr;
154 nv_engine(priv)->cclass = &nv98_crypt_cclass;
155 nv_engine(priv)->sclass = nv98_crypt_sclass;
156 nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
157 return 0;
158}
159
160static int
161nv98_crypt_init(struct nouveau_object *object)
162{
163 struct nv98_crypt_priv *priv = (void *)object;
164 int ret, i;
165
166 ret = nouveau_crypt_init(&priv->base);
167 if (ret)
168 return ret;
196 169
197 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 170 /* wait for exit interrupt to signal */
198 if (!priv) 171 nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
199 return -ENOMEM; 172 nv_wr32(priv, 0x087004, 0x00000010);
200 173
201 priv->base.destroy = nv98_crypt_destroy; 174 /* upload microcode code and data segments */
202 priv->base.init = nv98_crypt_init; 175 nv_wr32(priv, 0x087ff8, 0x00100000);
203 priv->base.fini = nv98_crypt_fini; 176 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
204 priv->base.context_new = nv98_crypt_context_new; 177 nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
205 priv->base.context_del = nv98_crypt_context_del;
206 priv->base.object_new = nv98_crypt_object_new;
207 priv->base.tlb_flush = nv98_crypt_tlb_flush;
208 178
209 nouveau_irq_register(dev, 14, nv98_crypt_isr); 179 nv_wr32(priv, 0x087ff8, 0x00000000);
180 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
181 nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
210 182
211 NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base); 183 /* start it running */
212 NVOBJ_CLASS(dev, 0x88b4, CRYPT); 184 nv_wr32(priv, 0x08710c, 0x00000000);
185 nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
186 nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
213 return 0; 187 return 0;
214} 188}
189
190struct nouveau_oclass
191nv98_crypt_oclass = {
192 .handle = NV_ENGINE(CRYPT, 0x98),
193 .ofuncs = &(struct nouveau_ofuncs) {
194 .ctor = nv98_crypt_ctor,
195 .dtor = _nouveau_crypt_dtor,
196 .init = nv98_crypt_init,
197 .fini = _nouveau_crypt_fini,
198 },
199};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
new file mode 100644
index 000000000000..1c919f2af89f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -0,0 +1,90 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/disp.h>
26
27struct nv04_disp_priv {
28 struct nouveau_disp base;
29};
30
31static struct nouveau_oclass
32nv04_disp_sclass[] = {
33 {},
34};
35
36static void
37nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
38{
39 struct nouveau_disp *disp = &priv->base;
40 if (disp->vblank.notify)
41 disp->vblank.notify(disp->vblank.data, crtc);
42}
43
44static void
45nv04_disp_intr(struct nouveau_subdev *subdev)
46{
47 struct nv04_disp_priv *priv = (void *)subdev;
48 u32 crtc0 = nv_rd32(priv, 0x600100);
49 u32 crtc1 = nv_rd32(priv, 0x602100);
50
51 if (crtc0 & 0x00000001) {
52 nv04_disp_intr_vblank(priv, 0);
53 nv_wr32(priv, 0x600100, 0x00000001);
54 }
55
56 if (crtc1 & 0x00000001) {
57 nv04_disp_intr_vblank(priv, 1);
58 nv_wr32(priv, 0x602100, 0x00000001);
59 }
60}
61
62static int
63nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size,
65 struct nouveau_object **pobject)
66{
67 struct nv04_disp_priv *priv;
68 int ret;
69
70 ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
71 "display", &priv);
72 *pobject = nv_object(priv);
73 if (ret)
74 return ret;
75
76 nv_engine(priv)->sclass = nv04_disp_sclass;
77 nv_subdev(priv)->intr = nv04_disp_intr;
78 return 0;
79}
80
81struct nouveau_oclass
82nv04_disp_oclass = {
83 .handle = NV_ENGINE(DISP, 0x04),
84 .ofuncs = &(struct nouveau_ofuncs) {
85 .ctor = nv04_disp_ctor,
86 .dtor = _nouveau_disp_dtor,
87 .init = _nouveau_disp_init,
88 .fini = _nouveau_disp_fini,
89 },
90};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
new file mode 100644
index 000000000000..16a9afb1060b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -0,0 +1,125 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28struct nv50_disp_priv {
29 struct nouveau_disp base;
30};
31
32static struct nouveau_oclass
33nv50_disp_sclass[] = {
34 {},
35};
36
37static void
38nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
39{
40 struct nouveau_disp *disp = &priv->base;
41 struct nouveau_software_chan *chan, *temp;
42 unsigned long flags;
43
44 spin_lock_irqsave(&disp->vblank.lock, flags);
45 list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
46 if (chan->vblank.crtc != crtc)
47 continue;
48
49 nv_wr32(priv, 0x001704, chan->vblank.channel);
50 nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
51
52 if (nv_device(priv)->chipset == 0x50) {
53 nv_wr32(priv, 0x001570, chan->vblank.offset);
54 nv_wr32(priv, 0x001574, chan->vblank.value);
55 } else {
56 if (nv_device(priv)->chipset >= 0xc0) {
57 nv_wr32(priv, 0x06000c,
58 upper_32_bits(chan->vblank.offset));
59 }
60 nv_wr32(priv, 0x060010, chan->vblank.offset);
61 nv_wr32(priv, 0x060014, chan->vblank.value);
62 }
63
64 list_del(&chan->vblank.head);
65 if (disp->vblank.put)
66 disp->vblank.put(disp->vblank.data, crtc);
67 }
68 spin_unlock_irqrestore(&disp->vblank.lock, flags);
69
70 if (disp->vblank.notify)
71 disp->vblank.notify(disp->vblank.data, crtc);
72}
73
74static void
75nv50_disp_intr(struct nouveau_subdev *subdev)
76{
77 struct nv50_disp_priv *priv = (void *)subdev;
78 u32 stat1 = nv_rd32(priv, 0x610024);
79
80 if (stat1 & 0x00000004) {
81 nv50_disp_intr_vblank(priv, 0);
82 nv_wr32(priv, 0x610024, 0x00000004);
83 stat1 &= ~0x00000004;
84 }
85
86 if (stat1 & 0x00000008) {
87 nv50_disp_intr_vblank(priv, 1);
88 nv_wr32(priv, 0x610024, 0x00000008);
89 stat1 &= ~0x00000008;
90 }
91
92}
93
94static int
95nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
96 struct nouveau_oclass *oclass, void *data, u32 size,
97 struct nouveau_object **pobject)
98{
99 struct nv50_disp_priv *priv;
100 int ret;
101
102 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
103 "display", &priv);
104 *pobject = nv_object(priv);
105 if (ret)
106 return ret;
107
108 nv_engine(priv)->sclass = nv50_disp_sclass;
109 nv_subdev(priv)->intr = nv50_disp_intr;
110
111 INIT_LIST_HEAD(&priv->base.vblank.list);
112 spin_lock_init(&priv->base.vblank.lock);
113 return 0;
114}
115
116struct nouveau_oclass
117nv50_disp_oclass = {
118 .handle = NV_ENGINE(DISP, 0x50),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv50_disp_ctor,
121 .dtor = _nouveau_disp_dtor,
122 .init = _nouveau_disp_init,
123 .fini = _nouveau_disp_fini,
124 },
125};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
new file mode 100644
index 000000000000..d93efbcf75b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bar.h>
26
27#include <engine/software.h>
28#include <engine/disp.h>
29
30struct nvd0_disp_priv {
31 struct nouveau_disp base;
32};
33
34static struct nouveau_oclass
35nvd0_disp_sclass[] = {
36 {},
37};
38
39static void
40nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
41{
42 struct nouveau_bar *bar = nouveau_bar(priv);
43 struct nouveau_disp *disp = &priv->base;
44 struct nouveau_software_chan *chan, *temp;
45 unsigned long flags;
46
47 spin_lock_irqsave(&disp->vblank.lock, flags);
48 list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
49 if (chan->vblank.crtc != crtc)
50 continue;
51
52 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
53 bar->flush(bar);
54 nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
55 nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
56 nv_wr32(priv, 0x060014, chan->vblank.value);
57
58 list_del(&chan->vblank.head);
59 if (disp->vblank.put)
60 disp->vblank.put(disp->vblank.data, crtc);
61 }
62 spin_unlock_irqrestore(&disp->vblank.lock, flags);
63
64 if (disp->vblank.notify)
65 disp->vblank.notify(disp->vblank.data, crtc);
66}
67
68static void
69nvd0_disp_intr(struct nouveau_subdev *subdev)
70{
71 struct nvd0_disp_priv *priv = (void *)subdev;
72 u32 intr = nv_rd32(priv, 0x610088);
73 int i;
74
75 for (i = 0; i < 4; i++) {
76 u32 mask = 0x01000000 << i;
77 if (mask & intr) {
78 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
79 if (stat & 0x00000001)
80 nvd0_disp_intr_vblank(priv, i);
81 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
82 nv_rd32(priv, 0x6100c0 + (i * 0x800));
83 }
84 }
85}
86
87static int
88nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
89 struct nouveau_oclass *oclass, void *data, u32 size,
90 struct nouveau_object **pobject)
91{
92 struct nvd0_disp_priv *priv;
93 int ret;
94
95 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
96 "display", &priv);
97 *pobject = nv_object(priv);
98 if (ret)
99 return ret;
100
101 nv_engine(priv)->sclass = nvd0_disp_sclass;
102 nv_subdev(priv)->intr = nvd0_disp_intr;
103
104 INIT_LIST_HEAD(&priv->base.vblank.list);
105 spin_lock_init(&priv->base.vblank.lock);
106 return 0;
107}
108
109struct nouveau_oclass
110nvd0_disp_oclass = {
111 .handle = NV_ENGINE(DISP, 0xd0),
112 .ofuncs = &(struct nouveau_ofuncs) {
113 .ctor = nvd0_disp_ctor,
114 .dtor = _nouveau_disp_dtor,
115 .init = _nouveau_disp_init,
116 .fini = _nouveau_disp_fini,
117 },
118};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
new file mode 100644
index 000000000000..e1f013d39768
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/class.h>
27
28#include <subdev/fb.h>
29#include <engine/dmaobj.h>
30
31int
32nouveau_dmaobj_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass,
35 void *data, u32 size, int len, void **pobject)
36{
37 struct nv_dma_class *args = data;
38 struct nouveau_dmaobj *object;
39 int ret;
40
41 if (size < sizeof(*args))
42 return -EINVAL;
43
44 ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
45 object = *pobject;
46 if (ret)
47 return ret;
48
49 switch (args->flags & NV_DMA_TARGET_MASK) {
50 case NV_DMA_TARGET_VM:
51 object->target = NV_MEM_TARGET_VM;
52 break;
53 case NV_DMA_TARGET_VRAM:
54 object->target = NV_MEM_TARGET_VRAM;
55 break;
56 case NV_DMA_TARGET_PCI:
57 object->target = NV_MEM_TARGET_PCI;
58 break;
59 case NV_DMA_TARGET_PCI_US:
60 case NV_DMA_TARGET_AGP:
61 object->target = NV_MEM_TARGET_PCI_NOSNOOP;
62 break;
63 default:
64 return -EINVAL;
65 }
66
67 switch (args->flags & NV_DMA_ACCESS_MASK) {
68 case NV_DMA_ACCESS_VM:
69 object->access = NV_MEM_ACCESS_VM;
70 break;
71 case NV_DMA_ACCESS_RD:
72 object->access = NV_MEM_ACCESS_RO;
73 break;
74 case NV_DMA_ACCESS_WR:
75 object->access = NV_MEM_ACCESS_WO;
76 break;
77 case NV_DMA_ACCESS_RDWR:
78 object->access = NV_MEM_ACCESS_RW;
79 break;
80 default:
81 return -EINVAL;
82 }
83
84 object->start = args->start;
85 object->limit = args->limit;
86 return 0;
87}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
new file mode 100644
index 000000000000..b0d3651fcaba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/fb.h>
28#include <subdev/vm/nv04.h>
29
30#include <engine/dmaobj.h>
31
32struct nv04_dmaeng_priv {
33 struct nouveau_dmaeng base;
34};
35
36struct nv04_dmaobj_priv {
37 struct nouveau_dmaobj base;
38};
39
40static int
41nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
42 struct nouveau_object *parent,
43 struct nouveau_dmaobj *dmaobj,
44 struct nouveau_gpuobj **pgpuobj)
45{
46 struct nouveau_gpuobj *gpuobj;
47 u32 flags0 = nv_mclass(dmaobj);
48 u32 flags2 = 0x00000000;
49 u32 offset = (dmaobj->start & 0xfffff000);
50 u32 adjust = (dmaobj->start & 0x00000fff);
51 u32 length = dmaobj->limit - dmaobj->start;
52 int ret;
53
54 if (dmaobj->target == NV_MEM_TARGET_VM) {
55 gpuobj = nv04_vmmgr(dmaeng)->vm->pgt[0].obj[0];
56 if (dmaobj->start == 0)
57 return nouveau_gpuobj_dup(parent, gpuobj, pgpuobj);
58
59 offset = nv_ro32(gpuobj, 8 + (offset >> 10));
60 offset &= 0xfffff000;
61 dmaobj->target = NV_MEM_TARGET_PCI;
62 dmaobj->access = NV_MEM_ACCESS_RW;
63 }
64
65 switch (dmaobj->target) {
66 case NV_MEM_TARGET_VRAM:
67 flags0 |= 0x00003000;
68 break;
69 case NV_MEM_TARGET_PCI:
70 flags0 |= 0x00023000;
71 break;
72 case NV_MEM_TARGET_PCI_NOSNOOP:
73 flags0 |= 0x00033000;
74 break;
75 default:
76 return -EINVAL;
77 }
78
79 switch (dmaobj->access) {
80 case NV_MEM_ACCESS_RO:
81 flags0 |= 0x00004000;
82 break;
83 case NV_MEM_ACCESS_WO:
84 flags0 |= 0x00008000;
85 case NV_MEM_ACCESS_RW:
86 flags2 |= 0x00000002;
87 break;
88 default:
89 return -EINVAL;
90 }
91
92 ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
93 *pgpuobj = gpuobj;
94 if (ret == 0) {
95 nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
96 nv_wo32(*pgpuobj, 0x04, length);
97 nv_wo32(*pgpuobj, 0x08, flags2 | offset);
98 nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
99 }
100
101 return ret;
102}
103
104static int
105nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
106 struct nouveau_oclass *oclass, void *data, u32 size,
107 struct nouveau_object **pobject)
108{
109 struct nouveau_dmaeng *dmaeng = (void *)engine;
110 struct nv04_dmaobj_priv *dmaobj;
111 struct nouveau_gpuobj *gpuobj;
112 int ret;
113
114 ret = nouveau_dmaobj_create(parent, engine, oclass,
115 data, size, &dmaobj);
116 *pobject = nv_object(dmaobj);
117 if (ret)
118 return ret;
119
120 switch (nv_mclass(parent)) {
121 case 0x006e:
122 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
123 nouveau_object_ref(NULL, pobject);
124 *pobject = nv_object(gpuobj);
125 break;
126 default:
127 break;
128 }
129
130 return ret;
131}
132
133static struct nouveau_ofuncs
134nv04_dmaobj_ofuncs = {
135 .ctor = nv04_dmaobj_ctor,
136 .dtor = _nouveau_dmaobj_dtor,
137 .init = _nouveau_dmaobj_init,
138 .fini = _nouveau_dmaobj_fini,
139};
140
141static struct nouveau_oclass
142nv04_dmaobj_sclass[] = {
143 { 0x0002, &nv04_dmaobj_ofuncs },
144 { 0x0003, &nv04_dmaobj_ofuncs },
145 { 0x003d, &nv04_dmaobj_ofuncs },
146 {}
147};
148
149static int
150nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
151 struct nouveau_oclass *oclass, void *data, u32 size,
152 struct nouveau_object **pobject)
153{
154 struct nv04_dmaeng_priv *priv;
155 int ret;
156
157 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
158 *pobject = nv_object(priv);
159 if (ret)
160 return ret;
161
162 priv->base.base.sclass = nv04_dmaobj_sclass;
163 priv->base.bind = nv04_dmaobj_bind;
164 return 0;
165}
166
167struct nouveau_oclass
168nv04_dmaeng_oclass = {
169 .handle = NV_ENGINE(DMAOBJ, 0x04),
170 .ofuncs = &(struct nouveau_ofuncs) {
171 .ctor = nv04_dmaeng_ctor,
172 .dtor = _nouveau_dmaeng_dtor,
173 .init = _nouveau_dmaeng_init,
174 .fini = _nouveau_dmaeng_fini,
175 },
176};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
new file mode 100644
index 000000000000..8207ac9a0bb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -0,0 +1,168 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/fb.h>
28#include <engine/dmaobj.h>
29
30struct nv50_dmaeng_priv {
31 struct nouveau_dmaeng base;
32};
33
34struct nv50_dmaobj_priv {
35 struct nouveau_dmaobj base;
36};
37
38static int
39nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
40 struct nouveau_object *parent,
41 struct nouveau_dmaobj *dmaobj,
42 struct nouveau_gpuobj **pgpuobj)
43{
44 u32 flags = nv_mclass(dmaobj);
45 int ret;
46
47 switch (dmaobj->target) {
48 case NV_MEM_TARGET_VM:
49 flags |= 0x00000000;
50 flags |= 0x60000000; /* COMPRESSION_USEVM */
51 flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
52 break;
53 case NV_MEM_TARGET_VRAM:
54 flags |= 0x00010000;
55 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
56 break;
57 case NV_MEM_TARGET_PCI:
58 flags |= 0x00020000;
59 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
60 break;
61 case NV_MEM_TARGET_PCI_NOSNOOP:
62 flags |= 0x00030000;
63 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
64 break;
65 default:
66 return -EINVAL;
67 }
68
69 switch (dmaobj->access) {
70 case NV_MEM_ACCESS_VM:
71 break;
72 case NV_MEM_ACCESS_RO:
73 flags |= 0x00040000;
74 break;
75 case NV_MEM_ACCESS_WO:
76 case NV_MEM_ACCESS_RW:
77 flags |= 0x00080000;
78 break;
79 }
80
81 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
82 if (ret == 0) {
83 nv_wo32(*pgpuobj, 0x00, flags);
84 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
85 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
86 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
87 upper_32_bits(dmaobj->start));
88 nv_wo32(*pgpuobj, 0x10, 0x00000000);
89 nv_wo32(*pgpuobj, 0x14, 0x00000000);
90 }
91
92 return ret;
93}
94
95static int
96nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
97 struct nouveau_oclass *oclass, void *data, u32 size,
98 struct nouveau_object **pobject)
99{
100 struct nouveau_dmaeng *dmaeng = (void *)engine;
101 struct nv50_dmaobj_priv *dmaobj;
102 struct nouveau_gpuobj *gpuobj;
103 int ret;
104
105 ret = nouveau_dmaobj_create(parent, engine, oclass,
106 data, size, &dmaobj);
107 *pobject = nv_object(dmaobj);
108 if (ret)
109 return ret;
110
111 switch (nv_mclass(parent)) {
112 case 0x506f:
113 case 0x826f:
114 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
115 nouveau_object_ref(NULL, pobject);
116 *pobject = nv_object(gpuobj);
117 break;
118 default:
119 break;
120 }
121
122 return ret;
123}
124
125static struct nouveau_ofuncs
126nv50_dmaobj_ofuncs = {
127 .ctor = nv50_dmaobj_ctor,
128 .dtor = _nouveau_dmaobj_dtor,
129 .init = _nouveau_dmaobj_init,
130 .fini = _nouveau_dmaobj_fini,
131};
132
133static struct nouveau_oclass
134nv50_dmaobj_sclass[] = {
135 { 0x0002, &nv50_dmaobj_ofuncs },
136 { 0x0003, &nv50_dmaobj_ofuncs },
137 { 0x003d, &nv50_dmaobj_ofuncs },
138 {}
139};
140
141static int
142nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 struct nouveau_oclass *oclass, void *data, u32 size,
144 struct nouveau_object **pobject)
145{
146 struct nv50_dmaeng_priv *priv;
147 int ret;
148
149 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
150 *pobject = nv_object(priv);
151 if (ret)
152 return ret;
153
154 priv->base.base.sclass = nv50_dmaobj_sclass;
155 priv->base.bind = nv50_dmaobj_bind;
156 return 0;
157}
158
159struct nouveau_oclass
160nv50_dmaeng_oclass = {
161 .handle = NV_ENGINE(DMAOBJ, 0x50),
162 .ofuncs = &(struct nouveau_ofuncs) {
163 .ctor = nv50_dmaeng_ctor,
164 .dtor = _nouveau_dmaeng_dtor,
165 .init = _nouveau_dmaeng_init,
166 .fini = _nouveau_dmaeng_fini,
167 },
168};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
new file mode 100644
index 000000000000..5baa08695535
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/fb.h>
28#include <engine/dmaobj.h>
29
30struct nvc0_dmaeng_priv {
31 struct nouveau_dmaeng base;
32};
33
34struct nvc0_dmaobj_priv {
35 struct nouveau_dmaobj base;
36};
37
38static int
39nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
40 struct nouveau_oclass *oclass, void *data, u32 size,
41 struct nouveau_object **pobject)
42{
43 struct nvc0_dmaobj_priv *dmaobj;
44 int ret;
45
46 ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
47 *pobject = nv_object(dmaobj);
48 if (ret)
49 return ret;
50
51 if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
52 return -EINVAL;
53
54 return 0;
55}
56
57static struct nouveau_ofuncs
58nvc0_dmaobj_ofuncs = {
59 .ctor = nvc0_dmaobj_ctor,
60 .dtor = _nouveau_dmaobj_dtor,
61 .init = _nouveau_dmaobj_init,
62 .fini = _nouveau_dmaobj_fini,
63};
64
65static struct nouveau_oclass
66nvc0_dmaobj_sclass[] = {
67 { 0x0002, &nvc0_dmaobj_ofuncs },
68 { 0x0003, &nvc0_dmaobj_ofuncs },
69 { 0x003d, &nvc0_dmaobj_ofuncs },
70 {}
71};
72
73static int
74nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
75 struct nouveau_oclass *oclass, void *data, u32 size,
76 struct nouveau_object **pobject)
77{
78 struct nvc0_dmaeng_priv *priv;
79 int ret;
80
81 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
82 *pobject = nv_object(priv);
83 if (ret)
84 return ret;
85
86 priv->base.base.sclass = nvc0_dmaobj_sclass;
87 return 0;
88}
89
90struct nouveau_oclass
91nvc0_dmaeng_oclass = {
92 .handle = NV_ENGINE(DMAOBJ, 0xc0),
93 .ofuncs = &(struct nouveau_ofuncs) {
94 .ctor = nvc0_dmaeng_ctor,
95 .dtor = _nouveau_dmaeng_dtor,
96 .init = _nouveau_dmaeng_init,
97 .fini = _nouveau_dmaeng_fini,
98 },
99};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
new file mode 100644
index 000000000000..edeb76ee648c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/handle.h>
27
28#include <engine/dmaobj.h>
29#include <engine/fifo.h>
30
31int
32nouveau_fifo_channel_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass,
35 int bar, u32 addr, u32 size, u32 pushbuf,
36 u32 engmask, int len, void **ptr)
37{
38 struct nouveau_device *device = nv_device(engine);
39 struct nouveau_fifo *priv = (void *)engine;
40 struct nouveau_fifo_chan *chan;
41 struct nouveau_dmaeng *dmaeng;
42 unsigned long flags;
43 int ret;
44
45 /* create base object class */
46 ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
47 engmask, len, ptr);
48 chan = *ptr;
49 if (ret)
50 return ret;
51
52 /* validate dma object representing push buffer */
53 chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
54 if (!chan->pushdma)
55 return -ENOENT;
56
57 dmaeng = (void *)chan->pushdma->base.engine;
58 switch (chan->pushdma->base.oclass->handle) {
59 case 0x0002:
60 case 0x003d:
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 if (dmaeng->bind) {
67 ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
68 if (ret)
69 return ret;
70 }
71
72 /* find a free fifo channel */
73 spin_lock_irqsave(&priv->lock, flags);
74 for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
75 if (!priv->channel[chan->chid]) {
76 priv->channel[chan->chid] = nv_object(chan);
77 break;
78 }
79 }
80 spin_unlock_irqrestore(&priv->lock, flags);
81
82 if (chan->chid == priv->max) {
83 nv_error(priv, "no free channels\n");
84 return -ENOSPC;
85 }
86
87 /* map fifo control registers */
88 chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
89 (chan->chid * size), size);
90 if (!chan->user)
91 return -EFAULT;
92
93 chan->size = size;
94 return 0;
95}
96
97void
98nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
99{
100 struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
101 unsigned long flags;
102
103 iounmap(chan->user);
104
105 spin_lock_irqsave(&priv->lock, flags);
106 priv->channel[chan->chid] = NULL;
107 spin_unlock_irqrestore(&priv->lock, flags);
108
109 nouveau_gpuobj_ref(NULL, &chan->pushgpu);
110 nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
111 nouveau_namedb_destroy(&chan->base);
112}
113
114void
115_nouveau_fifo_channel_dtor(struct nouveau_object *object)
116{
117 struct nouveau_fifo_chan *chan = (void *)object;
118 nouveau_fifo_channel_destroy(chan);
119}
120
121u32
122_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
123{
124 struct nouveau_fifo_chan *chan = (void *)object;
125 return ioread32_native(chan->user + addr);
126}
127
128void
129_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
130{
131 struct nouveau_fifo_chan *chan = (void *)object;
132 iowrite32_native(data, chan->user + addr);
133}
134
135void
136nouveau_fifo_destroy(struct nouveau_fifo *priv)
137{
138 kfree(priv->channel);
139 nouveau_engine_destroy(&priv->base);
140}
141
142int
143nouveau_fifo_create_(struct nouveau_object *parent,
144 struct nouveau_object *engine,
145 struct nouveau_oclass *oclass,
146 int min, int max, int length, void **pobject)
147{
148 struct nouveau_fifo *priv;
149 int ret;
150
151 ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
152 "fifo", length, pobject);
153 priv = *pobject;
154 if (ret)
155 return ret;
156
157 priv->min = min;
158 priv->max = max;
159 priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
160 if (!priv->channel)
161 return -ENOMEM;
162
163 spin_lock_init(&priv->lock);
164 return 0;
165}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 6ab7eb0dd9bb..8b7513f4dc8f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -1,44 +1,45 @@
1/* 1/*
2 * Copyright (C) 2012 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
27#include "drmP.h" 25#include <core/os.h>
28#include "drm.h" 26#include <core/class.h>
29#include "nouveau_drv.h" 27#include <core/engctx.h>
30#include <engine/fifo.h> 28#include <core/namedb.h>
31#include "nouveau_util.h" 29#include <core/handle.h>
32#include <core/ramht.h> 30#include <core/ramht.h>
33#include "nouveau_software.h" 31
34 32#include <subdev/instmem.h>
35static struct ramfc_desc { 33#include <subdev/instmem/nv04.h>
36 unsigned bits:6; 34#include <subdev/timer.h>
37 unsigned ctxs:5; 35#include <subdev/fb.h>
38 unsigned ctxp:8; 36
39 unsigned regs:5; 37#include <engine/fifo.h>
40 unsigned regp; 38
41} nv04_ramfc[] = { 39#include "nv04.h"
40
41static struct ramfc_desc
42nv04_ramfc[] = {
42 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 43 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
43 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 44 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
44 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 45 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
@@ -50,283 +51,360 @@ static struct ramfc_desc {
50 {} 51 {}
51}; 52};
52 53
53struct nv04_fifo_priv { 54/*******************************************************************************
54 struct nouveau_fifo_priv base; 55 * FIFO channel objects
55 struct ramfc_desc *ramfc_desc; 56 ******************************************************************************/
56 struct nouveau_gpuobj *ramro;
57 struct nouveau_gpuobj *ramfc;
58};
59 57
60struct nv04_fifo_chan { 58int
61 struct nouveau_fifo_chan base; 59nv04_fifo_object_attach(struct nouveau_object *parent,
62 u32 ramfc; 60 struct nouveau_object *object, u32 handle)
63};
64
65bool
66nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
67{ 61{
68 int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable); 62 struct nv04_fifo_priv *priv = (void *)parent->engine;
69 63 struct nv04_fifo_chan *chan = (void *)parent;
70 if (!enable) { 64 u32 context, chid = chan->base.chid;
71 /* In some cases the PFIFO puller may be left in an 65 int ret;
72 * inconsistent state if you try to stop it when it's 66
73 * busy translating handles. Sometimes you get a 67 if (nv_iclass(object, NV_GPUOBJ_CLASS))
74 * PFIFO_CACHE_ERROR, sometimes it just fails silently 68 context = nv_gpuobj(object)->addr >> 4;
75 * sending incorrect instance offsets to PGRAPH after 69 else
76 * it's started up again. To avoid the latter we 70 context = 0x00000004; /* just non-zero */
77 * invalidate the most recently calculated instance. 71
78 */ 72 switch (nv_engidx(object->engine)) {
79 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0, 73 case NVDEV_ENGINE_DMAOBJ:
80 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0)) 74 case NVDEV_ENGINE_SW:
81 NV_ERROR(dev, "Timeout idling the PFIFO puller.\n"); 75 context |= 0x00000000;
82 76 break;
83 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) & 77 case NVDEV_ENGINE_GR:
84 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 78 context |= 0x00010000;
85 nv_wr32(dev, NV03_PFIFO_INTR_0, 79 break;
86 NV_PFIFO_INTR_CACHE_ERROR); 80 case NVDEV_ENGINE_MPEG:
87 81 context |= 0x00020000;
88 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); 82 break;
83 default:
84 return -EINVAL;
89 } 85 }
90 86
91 return pull & 1; 87 context |= 0x80000000; /* valid */
88 context |= chid << 24;
89
90 mutex_lock(&nv_subdev(priv)->mutex);
91 ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
92 mutex_unlock(&nv_subdev(priv)->mutex);
93 return ret;
94}
95
96void
97nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
98{
99 struct nv04_fifo_priv *priv = (void *)parent->engine;
100 mutex_lock(&nv_subdev(priv)->mutex);
101 nouveau_ramht_remove(priv->ramht, cookie);
102 mutex_unlock(&nv_subdev(priv)->mutex);
92} 103}
93 104
94static int 105static int
95nv04_fifo_context_new(struct nouveau_channel *chan, int engine) 106nv04_fifo_chan_ctor(struct nouveau_object *parent,
107 struct nouveau_object *engine,
108 struct nouveau_oclass *oclass, void *data, u32 size,
109 struct nouveau_object **pobject)
96{ 110{
97 struct drm_device *dev = chan->dev; 111 struct nv04_fifo_priv *priv = (void *)engine;
98 struct drm_nouveau_private *dev_priv = dev->dev_private; 112 struct nv04_fifo_chan *chan;
99 struct nv04_fifo_priv *priv = nv_engine(dev, engine); 113 struct nv_channel_dma_class *args = data;
100 struct nv04_fifo_chan *fctx;
101 unsigned long flags;
102 int ret; 114 int ret;
103 115
104 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 116 if (size < sizeof(*args))
105 if (!fctx) 117 return -EINVAL;
106 return -ENOMEM;
107 118
108 fctx->ramfc = chan->id * 32; 119 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
120 0x10000, args->pushbuf,
121 (1 << NVDEV_ENGINE_DMAOBJ) |
122 (1 << NVDEV_ENGINE_SW) |
123 (1 << NVDEV_ENGINE_GR), &chan);
124 *pobject = nv_object(chan);
125 if (ret)
126 return ret;
109 127
110 /* map channel control registers */ 128 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
111 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 129 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
112 NV03_USER(chan->id), PAGE_SIZE); 130 chan->ramfc = chan->base.chid * 32;
113 if (!chan->user) {
114 ret = -ENOMEM;
115 goto error;
116 }
117 131
118 /* initialise default fifo context */ 132 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
119 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base); 133 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
120 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base); 134 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
121 nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->addr >> 4); 135 nv_wo32(priv->ramfc, chan->ramfc + 0x10,
122 nv_wo32(priv->ramfc, fctx->ramfc + 0x10,
123 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 136 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
124 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 137 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
125#ifdef __BIG_ENDIAN 138#ifdef __BIG_ENDIAN
126 NV_PFIFO_CACHE1_BIG_ENDIAN | 139 NV_PFIFO_CACHE1_BIG_ENDIAN |
127#endif 140#endif
128 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 141 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
142 return 0;
143}
144
145void
146nv04_fifo_chan_dtor(struct nouveau_object *object)
147{
148 struct nv04_fifo_priv *priv = (void *)object->engine;
149 struct nv04_fifo_chan *chan = (void *)object;
150 struct ramfc_desc *c = priv->ramfc_desc;
129 151
130 /* enable dma mode on the channel */ 152 do {
131 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 153 nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
132 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id)); 154 } while ((++c)->bits);
133 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 155
156 nouveau_fifo_channel_destroy(&chan->base);
157}
134 158
135error: 159int
160nv04_fifo_chan_init(struct nouveau_object *object)
161{
162 struct nv04_fifo_priv *priv = (void *)object->engine;
163 struct nv04_fifo_chan *chan = (void *)object;
164 u32 mask = 1 << chan->base.chid;
165 unsigned long flags;
166 int ret;
167
168 ret = nouveau_fifo_channel_init(&chan->base);
136 if (ret) 169 if (ret)
137 priv->base.base.context_del(chan, engine); 170 return ret;
138 return ret; 171
172 spin_lock_irqsave(&priv->base.lock, flags);
173 nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
174 spin_unlock_irqrestore(&priv->base.lock, flags);
175 return 0;
139} 176}
140 177
141void 178int
142nv04_fifo_context_del(struct nouveau_channel *chan, int engine) 179nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
143{ 180{
144 struct drm_device *dev = chan->dev; 181 struct nv04_fifo_priv *priv = (void *)object->engine;
145 struct drm_nouveau_private *dev_priv = dev->dev_private; 182 struct nv04_fifo_chan *chan = (void *)object;
146 struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine); 183 struct nouveau_gpuobj *fctx = priv->ramfc;
147 struct nv04_fifo_chan *fctx = chan->engctx[engine]; 184 struct ramfc_desc *c;
148 struct ramfc_desc *c = priv->ramfc_desc;
149 unsigned long flags; 185 unsigned long flags;
150 int chid; 186 u32 data = chan->ramfc;
187 u32 chid;
151 188
152 /* prevent fifo context switches */ 189 /* prevent fifo context switches */
153 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 190 spin_lock_irqsave(&priv->base.lock, flags);
154 nv_wr32(dev, NV03_PFIFO_CACHES, 0); 191 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
155 192
156 /* if this channel is active, replace it with a null context */ 193 /* if this channel is active, replace it with a null context */
157 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels; 194 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
158 if (chid == chan->id) { 195 if (chid == chan->base.chid) {
159 nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 196 nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
160 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); 197 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
161 nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 198 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
162 199
200 c = priv->ramfc_desc;
163 do { 201 do {
164 u32 mask = ((1ULL << c->bits) - 1) << c->regs; 202 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
165 nv_mask(dev, c->regp, mask, 0x00000000); 203 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
166 nv_wo32(priv->ramfc, fctx->ramfc + c->ctxp, 0x00000000); 204 u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
205 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
206 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
207 } while ((++c)->bits);
208
209 c = priv->ramfc_desc;
210 do {
211 nv_wr32(priv, c->regp, 0x00000000);
167 } while ((++c)->bits); 212 } while ((++c)->bits);
168 213
169 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); 214 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
170 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); 215 nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
171 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); 216 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
172 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); 217 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
173 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); 218 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
174 } 219 }
175 220
176 /* restore normal operation, after disabling dma mode */ 221 /* restore normal operation, after disabling dma mode */
177 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); 222 nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
178 nv_wr32(dev, NV03_PFIFO_CACHES, 1); 223 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
179 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 224 spin_unlock_irqrestore(&priv->base.lock, flags);
180 225
181 /* clean up */ 226 return nouveau_fifo_channel_fini(&chan->base, suspend);
182 nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
183 if (chan->user) {
184 iounmap(chan->user);
185 chan->user = NULL;
186 }
187} 227}
188 228
229static struct nouveau_ofuncs
230nv04_fifo_ofuncs = {
231 .ctor = nv04_fifo_chan_ctor,
232 .dtor = nv04_fifo_chan_dtor,
233 .init = nv04_fifo_chan_init,
234 .fini = nv04_fifo_chan_fini,
235 .rd32 = _nouveau_fifo_channel_rd32,
236 .wr32 = _nouveau_fifo_channel_wr32,
237};
238
239static struct nouveau_oclass
240nv04_fifo_sclass[] = {
241 { 0x006e, &nv04_fifo_ofuncs },
242 {}
243};
244
245/*******************************************************************************
246 * FIFO context - basically just the instmem reserved for the channel
247 ******************************************************************************/
248
189int 249int
190nv04_fifo_init(struct drm_device *dev, int engine) 250nv04_fifo_context_ctor(struct nouveau_object *parent,
251 struct nouveau_object *engine,
252 struct nouveau_oclass *oclass, void *data, u32 size,
253 struct nouveau_object **pobject)
191{ 254{
192 struct drm_nouveau_private *dev_priv = dev->dev_private; 255 struct nv04_fifo_base *base;
193 struct nv04_fifo_priv *priv = nv_engine(dev, engine); 256 int ret;
194 int i;
195 257
196 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0); 258 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
197 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO); 259 0x1000, NVOBJ_FLAG_HEAP, &base);
260 *pobject = nv_object(base);
261 if (ret)
262 return ret;
198 263
199 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff); 264 return 0;
200 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 265}
201 266
202 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 267static struct nouveau_oclass
203 ((dev_priv->ramht->bits - 9) << 16) | 268nv04_fifo_cclass = {
204 (dev_priv->ramht->gpuobj->addr >> 8)); 269 .handle = NV_ENGCTX(FIFO, 0x04),
205 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); 270 .ofuncs = &(struct nouveau_ofuncs) {
206 nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8); 271 .ctor = nv04_fifo_context_ctor,
272 .dtor = _nouveau_fifo_context_dtor,
273 .init = _nouveau_fifo_context_init,
274 .fini = _nouveau_fifo_context_fini,
275 .rd32 = _nouveau_fifo_context_rd32,
276 .wr32 = _nouveau_fifo_context_wr32,
277 },
278};
207 279
208 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); 280/*******************************************************************************
281 * PFIFO engine
282 ******************************************************************************/
209 283
210 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff); 284void
211 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff); 285nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
286__acquires(priv->base.lock)
287{
288 struct nv04_fifo_priv *priv = (void *)pfifo;
289 unsigned long flags;
212 290
213 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); 291 spin_lock_irqsave(&priv->base.lock, flags);
214 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); 292 *pflags = flags;
215 nv_wr32(dev, NV03_PFIFO_CACHES, 1); 293
294 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
295 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
296
297 /* in some cases the puller may be left in an inconsistent state
298 * if you try to stop it while it's busy translating handles.
299 * sometimes you get a CACHE_ERROR, sometimes it just fails
300 * silently; sending incorrect instance offsets to PGRAPH after
301 * it's started up again.
302 *
303 * to avoid this, we invalidate the most recently calculated
304 * instance.
305 */
306 if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
307 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
308 nv_warn(priv, "timeout idling puller\n");
309
310 if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
311 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
312 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
313
314 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
315}
216 316
217 for (i = 0; i < priv->base.channels; i++) { 317void
218 if (dev_priv->channels.ptr[i]) 318nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
219 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i)); 319__releases(priv->base.lock)
220 } 320{
321 struct nv04_fifo_priv *priv = (void *)pfifo;
322 unsigned long flags = *pflags;
221 323
222 return 0; 324 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
325 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
326
327 spin_unlock_irqrestore(&priv->base.lock, flags);
223} 328}
224 329
225int 330static const char *
226nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend) 331nv_dma_state_err(u32 state)
227{ 332{
228 struct drm_nouveau_private *dev_priv = dev->dev_private; 333 static const char * const desc[] = {
229 struct nv04_fifo_priv *priv = nv_engine(dev, engine); 334 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
230 struct nouveau_channel *chan; 335 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
231 int chid; 336 };
232 337 return desc[(state >> 29) & 0x7];
233 /* prevent context switches and halt fifo operation */
234 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
235 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
236 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
237 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
238
239 /* store current fifo context in ramfc */
240 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
241 chan = dev_priv->channels.ptr[chid];
242 if (suspend && chid != priv->base.channels && chan) {
243 struct nv04_fifo_chan *fctx = chan->engctx[engine];
244 struct nouveau_gpuobj *ctx = priv->ramfc;
245 struct ramfc_desc *c = priv->ramfc_desc;
246 do {
247 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
248 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
249 u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
250 u32 cv = (nv_ro32(ctx, c->ctxp + fctx->ramfc) & ~cm);
251 nv_wo32(ctx, c->ctxp + fctx->ramfc, cv | (rv << c->ctxs));
252 } while ((++c)->bits);
253 }
254
255 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
256 return 0;
257} 338}
258 339
259static bool 340static bool
260nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) 341nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
261{ 342{
262 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 343 struct nv04_fifo_chan *chan = NULL;
263 struct drm_nouveau_private *dev_priv = dev->dev_private; 344 struct nouveau_handle *bind;
264 struct nouveau_channel *chan = NULL;
265 struct nouveau_gpuobj *obj;
266 unsigned long flags;
267 const int subc = (addr >> 13) & 0x7; 345 const int subc = (addr >> 13) & 0x7;
268 const int mthd = addr & 0x1ffc; 346 const int mthd = addr & 0x1ffc;
269 bool handled = false; 347 bool handled = false;
348 unsigned long flags;
270 u32 engine; 349 u32 engine;
271 350
272 spin_lock_irqsave(&dev_priv->channels.lock, flags); 351 spin_lock_irqsave(&priv->base.lock, flags);
273 if (likely(chid >= 0 && chid < pfifo->channels)) 352 if (likely(chid >= priv->base.min && chid <= priv->base.max))
274 chan = dev_priv->channels.ptr[chid]; 353 chan = (void *)priv->base.channel[chid];
275 if (unlikely(!chan)) 354 if (unlikely(!chan))
276 goto out; 355 goto out;
277 356
278 switch (mthd) { 357 switch (mthd) {
279 case 0x0000: /* bind object to subchannel */ 358 case 0x0000:
280 obj = nouveau_ramht_find(chan, data); 359 bind = nouveau_namedb_get(nv_namedb(chan), data);
281 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) 360 if (unlikely(!bind))
282 break; 361 break;
283 362
284 engine = 0x0000000f << (subc * 4); 363 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
364 engine = 0x0000000f << (subc * 4);
365 chan->subc[subc] = data;
366 handled = true;
367
368 nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
369 }
285 370
286 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); 371 nouveau_namedb_put(bind);
287 handled = true;
288 break; 372 break;
289 default: 373 default:
290 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE); 374 engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
291 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 375 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
292 break; 376 break;
293 377
294 if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev), 378 bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
295 mthd, data)) 379 if (likely(bind)) {
296 handled = true; 380 if (!nv_call(bind->object, mthd, data))
381 handled = true;
382 nouveau_namedb_put(bind);
383 }
297 break; 384 break;
298 } 385 }
299 386
300out: 387out:
301 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 388 spin_unlock_irqrestore(&priv->base.lock, flags);
302 return handled; 389 return handled;
303} 390}
304 391
305static const char *nv_dma_state_err(u32 state)
306{
307 static const char * const desc[] = {
308 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
309 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
310 };
311 return desc[(state >> 29) & 0x7];
312}
313
314void 392void
315nv04_fifo_isr(struct drm_device *dev) 393nv04_fifo_intr(struct nouveau_subdev *subdev)
316{ 394{
317 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 395 struct nouveau_device *device = nv_device(subdev);
318 struct drm_nouveau_private *dev_priv = dev->dev_private; 396 struct nv04_fifo_priv *priv = (void *)subdev;
319 uint32_t status, reassign; 397 uint32_t status, reassign;
320 int cnt = 0; 398 int cnt = 0;
321 399
322 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; 400 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
323 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { 401 while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
324 uint32_t chid, get; 402 uint32_t chid, get;
325 403
326 nv_wr32(dev, NV03_PFIFO_CACHES, 0); 404 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
327 405
328 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels; 406 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
329 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); 407 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
330 408
331 if (status & NV_PFIFO_INTR_CACHE_ERROR) { 409 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
332 uint32_t mthd, data; 410 uint32_t mthd, data;
@@ -340,86 +418,85 @@ nv04_fifo_isr(struct drm_device *dev)
340 */ 418 */
341 ptr = (get & 0x7ff) >> 2; 419 ptr = (get & 0x7ff) >> 2;
342 420
343 if (dev_priv->card_type < NV_40) { 421 if (device->card_type < NV_40) {
344 mthd = nv_rd32(dev, 422 mthd = nv_rd32(priv,
345 NV04_PFIFO_CACHE1_METHOD(ptr)); 423 NV04_PFIFO_CACHE1_METHOD(ptr));
346 data = nv_rd32(dev, 424 data = nv_rd32(priv,
347 NV04_PFIFO_CACHE1_DATA(ptr)); 425 NV04_PFIFO_CACHE1_DATA(ptr));
348 } else { 426 } else {
349 mthd = nv_rd32(dev, 427 mthd = nv_rd32(priv,
350 NV40_PFIFO_CACHE1_METHOD(ptr)); 428 NV40_PFIFO_CACHE1_METHOD(ptr));
351 data = nv_rd32(dev, 429 data = nv_rd32(priv,
352 NV40_PFIFO_CACHE1_DATA(ptr)); 430 NV40_PFIFO_CACHE1_DATA(ptr));
353 } 431 }
354 432
355 if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) { 433 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
356 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " 434 nv_info(priv, "CACHE_ERROR - Ch %d/%d "
357 "Mthd 0x%04x Data 0x%08x\n", 435 "Mthd 0x%04x Data 0x%08x\n",
358 chid, (mthd >> 13) & 7, mthd & 0x1ffc, 436 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
359 data); 437 data);
360 } 438 }
361 439
362 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 440 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
363 nv_wr32(dev, NV03_PFIFO_INTR_0, 441 nv_wr32(priv, NV03_PFIFO_INTR_0,
364 NV_PFIFO_INTR_CACHE_ERROR); 442 NV_PFIFO_INTR_CACHE_ERROR);
365 443
366 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 444 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
367 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); 445 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
368 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); 446 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
369 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 447 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
370 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); 448 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
371 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); 449 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
372 450
373 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 451 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
374 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 452 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
375 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); 453 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
376 454
377 status &= ~NV_PFIFO_INTR_CACHE_ERROR; 455 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
378 } 456 }
379 457
380 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 458 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
381 u32 dma_get = nv_rd32(dev, 0x003244); 459 u32 dma_get = nv_rd32(priv, 0x003244);
382 u32 dma_put = nv_rd32(dev, 0x003240); 460 u32 dma_put = nv_rd32(priv, 0x003240);
383 u32 push = nv_rd32(dev, 0x003220); 461 u32 push = nv_rd32(priv, 0x003220);
384 u32 state = nv_rd32(dev, 0x003228); 462 u32 state = nv_rd32(priv, 0x003228);
385 463
386 if (dev_priv->card_type == NV_50) { 464 if (device->card_type == NV_50) {
387 u32 ho_get = nv_rd32(dev, 0x003328); 465 u32 ho_get = nv_rd32(priv, 0x003328);
388 u32 ho_put = nv_rd32(dev, 0x003320); 466 u32 ho_put = nv_rd32(priv, 0x003320);
389 u32 ib_get = nv_rd32(dev, 0x003334); 467 u32 ib_get = nv_rd32(priv, 0x003334);
390 u32 ib_put = nv_rd32(dev, 0x003330); 468 u32 ib_put = nv_rd32(priv, 0x003330);
391 469
392 if (nouveau_ratelimit()) 470 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
393 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " 471 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
394 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " 472 "State 0x%08x (err: %s) Push 0x%08x\n",
395 "State 0x%08x (err: %s) Push 0x%08x\n", 473 chid, ho_get, dma_get, ho_put,
396 chid, ho_get, dma_get, ho_put, 474 dma_put, ib_get, ib_put, state,
397 dma_put, ib_get, ib_put, state, 475 nv_dma_state_err(state),
398 nv_dma_state_err(state), 476 push);
399 push);
400 477
401 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 478 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
402 nv_wr32(dev, 0x003364, 0x00000000); 479 nv_wr32(priv, 0x003364, 0x00000000);
403 if (dma_get != dma_put || ho_get != ho_put) { 480 if (dma_get != dma_put || ho_get != ho_put) {
404 nv_wr32(dev, 0x003244, dma_put); 481 nv_wr32(priv, 0x003244, dma_put);
405 nv_wr32(dev, 0x003328, ho_put); 482 nv_wr32(priv, 0x003328, ho_put);
406 } else 483 } else
407 if (ib_get != ib_put) { 484 if (ib_get != ib_put) {
408 nv_wr32(dev, 0x003334, ib_put); 485 nv_wr32(priv, 0x003334, ib_put);
409 } 486 }
410 } else { 487 } else {
411 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " 488 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
412 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n", 489 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
413 chid, dma_get, dma_put, state, 490 chid, dma_get, dma_put, state,
414 nv_dma_state_err(state), push); 491 nv_dma_state_err(state), push);
415 492
416 if (dma_get != dma_put) 493 if (dma_get != dma_put)
417 nv_wr32(dev, 0x003244, dma_put); 494 nv_wr32(priv, 0x003244, dma_put);
418 } 495 }
419 496
420 nv_wr32(dev, 0x003228, 0x00000000); 497 nv_wr32(priv, 0x003228, 0x00000000);
421 nv_wr32(dev, 0x003220, 0x00000001); 498 nv_wr32(priv, 0x003220, 0x00000001);
422 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 499 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
423 status &= ~NV_PFIFO_INTR_DMA_PUSHER; 500 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
424 } 501 }
425 502
@@ -427,81 +504,118 @@ nv04_fifo_isr(struct drm_device *dev)
427 uint32_t sem; 504 uint32_t sem;
428 505
429 status &= ~NV_PFIFO_INTR_SEMAPHORE; 506 status &= ~NV_PFIFO_INTR_SEMAPHORE;
430 nv_wr32(dev, NV03_PFIFO_INTR_0, 507 nv_wr32(priv, NV03_PFIFO_INTR_0,
431 NV_PFIFO_INTR_SEMAPHORE); 508 NV_PFIFO_INTR_SEMAPHORE);
432 509
433 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); 510 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
434 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 511 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
435 512
436 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); 513 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
437 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); 514 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
438 } 515 }
439 516
440 if (dev_priv->card_type == NV_50) { 517 if (device->card_type == NV_50) {
441 if (status & 0x00000010) { 518 if (status & 0x00000010) {
442 nv50_fb_vm_trap(dev, nouveau_ratelimit()); 519 nv50_fb_trap(nouveau_fb(priv), 1);
443 status &= ~0x00000010; 520 status &= ~0x00000010;
444 nv_wr32(dev, 0x002100, 0x00000010); 521 nv_wr32(priv, 0x002100, 0x00000010);
445 } 522 }
446 } 523 }
447 524
448 if (status) { 525 if (status) {
449 if (nouveau_ratelimit()) 526 nv_info(priv, "unknown intr 0x%08x, ch %d\n",
450 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 527 status, chid);
451 status, chid); 528 nv_wr32(priv, NV03_PFIFO_INTR_0, status);
452 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
453 status = 0; 529 status = 0;
454 } 530 }
455 531
456 nv_wr32(dev, NV03_PFIFO_CACHES, reassign); 532 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
457 } 533 }
458 534
459 if (status) { 535 if (status) {
460 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); 536 nv_info(priv, "still angry after %d spins, halt\n", cnt);
461 nv_wr32(dev, 0x2140, 0); 537 nv_wr32(priv, 0x002140, 0);
462 nv_wr32(dev, 0x140, 0); 538 nv_wr32(priv, 0x000140, 0);
463 } 539 }
464 540
465 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); 541 nv_wr32(priv, 0x000100, 0x00000100);
466} 542}
467 543
468void 544static int
469nv04_fifo_destroy(struct drm_device *dev, int engine) 545nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
546 struct nouveau_oclass *oclass, void *data, u32 size,
547 struct nouveau_object **pobject)
470{ 548{
471 struct drm_nouveau_private *dev_priv = dev->dev_private; 549 struct nv04_instmem_priv *imem = nv04_instmem(parent);
472 struct nv04_fifo_priv *priv = nv_engine(dev, engine); 550 struct nv04_fifo_priv *priv;
551 int ret;
473 552
474 nouveau_irq_unregister(dev, 8); 553 ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
554 *pobject = nv_object(priv);
555 if (ret)
556 return ret;
557
558 nouveau_ramht_ref(imem->ramht, &priv->ramht);
559 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
560 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
561
562 nv_subdev(priv)->unit = 0x00000100;
563 nv_subdev(priv)->intr = nv04_fifo_intr;
564 nv_engine(priv)->cclass = &nv04_fifo_cclass;
565 nv_engine(priv)->sclass = nv04_fifo_sclass;
566 priv->base.pause = nv04_fifo_pause;
567 priv->base.start = nv04_fifo_start;
568 priv->ramfc_desc = nv04_ramfc;
569 return 0;
570}
475 571
572void
573nv04_fifo_dtor(struct nouveau_object *object)
574{
575 struct nv04_fifo_priv *priv = (void *)object;
476 nouveau_gpuobj_ref(NULL, &priv->ramfc); 576 nouveau_gpuobj_ref(NULL, &priv->ramfc);
477 nouveau_gpuobj_ref(NULL, &priv->ramro); 577 nouveau_gpuobj_ref(NULL, &priv->ramro);
478 578 nouveau_ramht_ref(NULL, &priv->ramht);
479 dev_priv->eng[engine] = NULL; 579 nouveau_fifo_destroy(&priv->base);
480 kfree(priv);
481} 580}
482 581
483int 582int
484nv04_fifo_create(struct drm_device *dev) 583nv04_fifo_init(struct nouveau_object *object)
485{ 584{
486 struct drm_nouveau_private *dev_priv = dev->dev_private; 585 struct nv04_fifo_priv *priv = (void *)object;
487 struct nv04_fifo_priv *priv; 586 int ret;
587
588 ret = nouveau_fifo_init(&priv->base);
589 if (ret)
590 return ret;
488 591
489 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 592 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
490 if (!priv) 593 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
491 return -ENOMEM;
492 594
493 nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro); 595 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
494 nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc); 596 ((priv->ramht->bits - 9) << 16) |
597 (priv->ramht->base.addr >> 8));
598 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
599 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
495 600
496 priv->base.base.destroy = nv04_fifo_destroy; 601 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
497 priv->base.base.init = nv04_fifo_init;
498 priv->base.base.fini = nv04_fifo_fini;
499 priv->base.base.context_new = nv04_fifo_context_new;
500 priv->base.base.context_del = nv04_fifo_context_del;
501 priv->base.channels = 15;
502 priv->ramfc_desc = nv04_ramfc;
503 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
504 602
505 nouveau_irq_register(dev, 8, nv04_fifo_isr); 603 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
604 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
605
606 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
607 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
608 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
506 return 0; 609 return 0;
507} 610}
611
612struct nouveau_oclass
613nv04_fifo_oclass = {
614 .handle = NV_ENGINE(FIFO, 0x04),
615 .ofuncs = &(struct nouveau_ofuncs) {
616 .ctor = nv04_fifo_ctor,
617 .dtor = nv04_fifo_dtor,
618 .init = nv04_fifo_init,
619 .fini = _nouveau_fifo_fini,
620 },
621};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
new file mode 100644
index 000000000000..496a4b4fdfaf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
@@ -0,0 +1,178 @@
1#ifndef __NV04_FIFO_H__
2#define __NV04_FIFO_H__
3
4#include <engine/fifo.h>
5
6#define NV04_PFIFO_DELAY_0 0x00002040
7#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
8#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
9#define NV03_PFIFO_INTR_0 0x00002100
10#define NV03_PFIFO_INTR_EN_0 0x00002140
11# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
12# define NV_PFIFO_INTR_RUNOUT (1<<4)
13# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
14# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
15# define NV_PFIFO_INTR_DMA_PT (1<<16)
16# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
17# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
18#define NV03_PFIFO_RAMHT 0x00002210
19#define NV03_PFIFO_RAMFC 0x00002214
20#define NV03_PFIFO_RAMRO 0x00002218
21#define NV40_PFIFO_RAMFC 0x00002220
22#define NV03_PFIFO_CACHES 0x00002500
23#define NV04_PFIFO_MODE 0x00002504
24#define NV04_PFIFO_DMA 0x00002508
25#define NV04_PFIFO_SIZE 0x0000250c
26#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
27#define NV50_PFIFO_CTX_TABLE__SIZE 128
28#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
29#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
30#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
31#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
32#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
33#define NV03_PFIFO_CACHE0_PULL0 0x00003040
34#define NV04_PFIFO_CACHE0_PULL0 0x00003050
35#define NV04_PFIFO_CACHE0_PULL1 0x00003054
36#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
37#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
38#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
39#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
40#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
41#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
42#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
43#define NV03_PFIFO_CACHE1_PUT 0x00003210
44#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
45#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
46# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
47# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
48# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
49# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
50# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
51# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
52# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
53# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
54# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
55# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
56# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
57# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
58# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
59# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
60# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
61# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
62# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
63# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
64# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
65# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
66# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
67# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
68# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
69# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
70# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
71# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
72# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
73# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
74# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
75# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
76# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
77# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
78# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
79# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
80# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
81# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
82# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
83# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
84# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
85# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
86# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
87# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
88# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
89# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
90# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
91# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
92# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
93# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
94# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
95# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
96# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
97# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
98# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
99# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
100# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
101# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
102# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
103# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
104# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
105# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
106# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
107#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
108#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
109#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
110#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
111#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
112#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
113#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
114#define NV03_PFIFO_CACHE1_PULL0 0x00003240
115#define NV04_PFIFO_CACHE1_PULL0 0x00003250
116# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
117# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
118#define NV03_PFIFO_CACHE1_PULL1 0x00003250
119#define NV04_PFIFO_CACHE1_PULL1 0x00003254
120#define NV04_PFIFO_CACHE1_HASH 0x00003258
121#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
122#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
123#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
124#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
125#define NV03_PFIFO_CACHE1_GET 0x00003270
126#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
127#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
128#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
129#define NV40_PFIFO_UNK32E4 0x000032E4
130#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
131#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
132#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
133#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
134
135struct ramfc_desc {
136 unsigned bits:6;
137 unsigned ctxs:5;
138 unsigned ctxp:8;
139 unsigned regs:5;
140 unsigned regp;
141};
142
143struct nv04_fifo_priv {
144 struct nouveau_fifo base;
145 struct ramfc_desc *ramfc_desc;
146 struct nouveau_ramht *ramht;
147 struct nouveau_gpuobj *ramro;
148 struct nouveau_gpuobj *ramfc;
149};
150
151struct nv04_fifo_base {
152 struct nouveau_fifo_base base;
153};
154
155struct nv04_fifo_chan {
156 struct nouveau_fifo_chan base;
157 u32 subc[8];
158 u32 ramfc;
159};
160
161int nv04_fifo_object_attach(struct nouveau_object *,
162 struct nouveau_object *, u32);
163void nv04_fifo_object_detach(struct nouveau_object *, int);
164
165void nv04_fifo_chan_dtor(struct nouveau_object *);
166int nv04_fifo_chan_init(struct nouveau_object *);
167int nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
168
169int nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
170 struct nouveau_oclass *, void *, u32,
171 struct nouveau_object **);
172
173void nv04_fifo_dtor(struct nouveau_object *);
174int nv04_fifo_init(struct nouveau_object *);
175void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
176void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
177
178#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 2d38fa88f9c7..391fefa7c472 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -1,43 +1,42 @@
1/* 1/*
2 * Copyright (C) 2012 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
27#include "drmP.h" 25#include <core/os.h>
28#include "drm.h" 26#include <core/class.h>
29#include "nouveau_drv.h" 27#include <core/engctx.h>
30#include <engine/fifo.h>
31#include "nouveau_util.h"
32#include <core/ramht.h> 28#include <core/ramht.h>
33 29
34static struct ramfc_desc { 30#include <subdev/instmem.h>
35 unsigned bits:6; 31#include <subdev/instmem/nv04.h>
36 unsigned ctxs:5; 32#include <subdev/fb.h>
37 unsigned ctxp:8; 33
38 unsigned regs:5; 34#include <engine/fifo.h>
39 unsigned regp; 35
40} nv10_ramfc[] = { 36#include "nv04.h"
37
38static struct ramfc_desc
39nv10_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, 42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -50,87 +49,122 @@ static struct ramfc_desc {
50 {} 49 {}
51}; 50};
52 51
53struct nv10_fifo_priv { 52/*******************************************************************************
54 struct nouveau_fifo_priv base; 53 * FIFO channel objects
55 struct ramfc_desc *ramfc_desc; 54 ******************************************************************************/
56 struct nouveau_gpuobj *ramro;
57 struct nouveau_gpuobj *ramfc;
58};
59
60struct nv10_fifo_chan {
61 struct nouveau_fifo_chan base;
62 u32 ramfc;
63};
64 55
65static int 56static int
66nv10_fifo_context_new(struct nouveau_channel *chan, int engine) 57nv10_fifo_chan_ctor(struct nouveau_object *parent,
58 struct nouveau_object *engine,
59 struct nouveau_oclass *oclass, void *data, u32 size,
60 struct nouveau_object **pobject)
67{ 61{
68 struct drm_device *dev = chan->dev; 62 struct nv04_fifo_priv *priv = (void *)engine;
69 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct nv04_fifo_chan *chan;
70 struct nv10_fifo_priv *priv = nv_engine(dev, engine); 64 struct nv_channel_dma_class *args = data;
71 struct nv10_fifo_chan *fctx;
72 unsigned long flags;
73 int ret; 65 int ret;
74 66
75 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 67 if (size < sizeof(*args))
76 if (!fctx) 68 return -EINVAL;
77 return -ENOMEM; 69
78 70 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
79 fctx->ramfc = chan->id * 32; 71 0x10000, args->pushbuf,
80 72 (1 << NVDEV_ENGINE_DMAOBJ) |
81 /* map channel control registers */ 73 (1 << NVDEV_ENGINE_SW) |
82 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 74 (1 << NVDEV_ENGINE_GR), &chan);
83 NV03_USER(chan->id), PAGE_SIZE); 75 *pobject = nv_object(chan);
84 if (!chan->user) { 76 if (ret)
85 ret = -ENOMEM; 77 return ret;
86 goto error; 78
87 } 79 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
88 80 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
89 /* initialise default fifo context */ 81 chan->ramfc = chan->base.chid * 32;
90 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base); 82
91 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base); 83 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
92 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4); 84 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
93 nv_wo32(priv->ramfc, fctx->ramfc + 0x14, 85 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
86 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
94 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 87 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
95 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 88 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
96#ifdef __BIG_ENDIAN 89#ifdef __BIG_ENDIAN
97 NV_PFIFO_CACHE1_BIG_ENDIAN | 90 NV_PFIFO_CACHE1_BIG_ENDIAN |
98#endif 91#endif
99 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 92 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
93 return 0;
94}
100 95
101 /* enable dma mode on the channel */ 96static struct nouveau_ofuncs
102 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 97nv10_fifo_ofuncs = {
103 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id)); 98 .ctor = nv10_fifo_chan_ctor,
104 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 99 .dtor = nv04_fifo_chan_dtor,
100 .init = nv04_fifo_chan_init,
101 .fini = nv04_fifo_chan_fini,
102 .rd32 = _nouveau_fifo_channel_rd32,
103 .wr32 = _nouveau_fifo_channel_wr32,
104};
105 105
106error: 106static struct nouveau_oclass
107 if (ret) 107nv10_fifo_sclass[] = {
108 priv->base.base.context_del(chan, engine); 108 { 0x006e, &nv10_fifo_ofuncs },
109 return ret; 109 {}
110} 110};
111
112/*******************************************************************************
113 * FIFO context - basically just the instmem reserved for the channel
114 ******************************************************************************/
115
116static struct nouveau_oclass
117nv10_fifo_cclass = {
118 .handle = NV_ENGCTX(FIFO, 0x10),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv04_fifo_context_ctor,
121 .dtor = _nouveau_fifo_context_dtor,
122 .init = _nouveau_fifo_context_init,
123 .fini = _nouveau_fifo_context_fini,
124 .rd32 = _nouveau_fifo_context_rd32,
125 .wr32 = _nouveau_fifo_context_wr32,
126 },
127};
128
129/*******************************************************************************
130 * PFIFO engine
131 ******************************************************************************/
111 132
112int 133static int
113nv10_fifo_create(struct drm_device *dev) 134nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
114{ 137{
115 struct drm_nouveau_private *dev_priv = dev->dev_private; 138 struct nv04_instmem_priv *imem = nv04_instmem(parent);
116 struct nv10_fifo_priv *priv; 139 struct nv04_fifo_priv *priv;
117 140 int ret;
118 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
119 if (!priv)
120 return -ENOMEM;
121
122 nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
123 nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
124
125 priv->base.base.destroy = nv04_fifo_destroy;
126 priv->base.base.init = nv04_fifo_init;
127 priv->base.base.fini = nv04_fifo_fini;
128 priv->base.base.context_new = nv10_fifo_context_new;
129 priv->base.base.context_del = nv04_fifo_context_del;
130 priv->base.channels = 31;
131 priv->ramfc_desc = nv10_ramfc;
132 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
133 141
134 nouveau_irq_register(dev, 8, nv04_fifo_isr); 142 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
143 *pobject = nv_object(priv);
144 if (ret)
145 return ret;
146
147 nouveau_ramht_ref(imem->ramht, &priv->ramht);
148 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
149 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
150
151 nv_subdev(priv)->unit = 0x00000100;
152 nv_subdev(priv)->intr = nv04_fifo_intr;
153 nv_engine(priv)->cclass = &nv10_fifo_cclass;
154 nv_engine(priv)->sclass = nv10_fifo_sclass;
155 priv->base.pause = nv04_fifo_pause;
156 priv->base.start = nv04_fifo_start;
157 priv->ramfc_desc = nv10_ramfc;
135 return 0; 158 return 0;
136} 159}
160
161struct nouveau_oclass
162nv10_fifo_oclass = {
163 .handle = NV_ENGINE(FIFO, 0x10),
164 .ofuncs = &(struct nouveau_ofuncs) {
165 .ctor = nv10_fifo_ctor,
166 .dtor = nv04_fifo_dtor,
167 .init = nv04_fifo_init,
168 .fini = _nouveau_fifo_fini,
169 },
170};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index 2f700a15e286..3b9d6c97f9ba 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -1,43 +1,42 @@
1/* 1/*
2 * Copyright (C) 2012 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
27#include "drmP.h" 25#include <core/os.h>
28#include "drm.h" 26#include <core/class.h>
29#include "nouveau_drv.h" 27#include <core/engctx.h>
30#include <engine/fifo.h>
31#include "nouveau_util.h"
32#include <core/ramht.h> 28#include <core/ramht.h>
33 29
34static struct ramfc_desc { 30#include <subdev/instmem.h>
35 unsigned bits:6; 31#include <subdev/instmem/nv04.h>
36 unsigned ctxs:5; 32#include <subdev/fb.h>
37 unsigned ctxp:8; 33
38 unsigned regs:5; 34#include <engine/fifo.h>
39 unsigned regp; 35
40} nv17_ramfc[] = { 36#include "nv04.h"
37
38static struct ramfc_desc
39nv17_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, 42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -55,124 +54,154 @@ static struct ramfc_desc {
55 {} 54 {}
56}; 55};
57 56
58struct nv17_fifo_priv { 57/*******************************************************************************
59 struct nouveau_fifo_priv base; 58 * FIFO channel objects
60 struct ramfc_desc *ramfc_desc; 59 ******************************************************************************/
61 struct nouveau_gpuobj *ramro;
62 struct nouveau_gpuobj *ramfc;
63};
64
65struct nv17_fifo_chan {
66 struct nouveau_fifo_chan base;
67 u32 ramfc;
68};
69 60
70static int 61static int
71nv17_fifo_context_new(struct nouveau_channel *chan, int engine) 62nv17_fifo_chan_ctor(struct nouveau_object *parent,
63 struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size,
65 struct nouveau_object **pobject)
72{ 66{
73 struct drm_device *dev = chan->dev; 67 struct nv04_fifo_priv *priv = (void *)engine;
74 struct drm_nouveau_private *dev_priv = dev->dev_private; 68 struct nv04_fifo_chan *chan;
75 struct nv17_fifo_priv *priv = nv_engine(dev, engine); 69 struct nv_channel_dma_class *args = data;
76 struct nv17_fifo_chan *fctx;
77 unsigned long flags;
78 int ret; 70 int ret;
79 71
80 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 72 if (size < sizeof(*args))
81 if (!fctx) 73 return -EINVAL;
82 return -ENOMEM; 74
83 75 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
84 fctx->ramfc = chan->id * 64; 76 0x10000, args->pushbuf,
85 77 (1 << NVDEV_ENGINE_DMAOBJ) |
86 /* map channel control registers */ 78 (1 << NVDEV_ENGINE_SW) |
87 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 79 (1 << NVDEV_ENGINE_GR) |
88 NV03_USER(chan->id), PAGE_SIZE); 80 (1 << NVDEV_ENGINE_MPEG), /* NV31- */
89 if (!chan->user) { 81 &chan);
90 ret = -ENOMEM; 82 *pobject = nv_object(chan);
91 goto error; 83 if (ret)
92 } 84 return ret;
93 85
94 /* initialise default fifo context */ 86 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
95 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base); 87 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
96 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base); 88 chan->ramfc = chan->base.chid * 64;
97 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4); 89
98 nv_wo32(priv->ramfc, fctx->ramfc + 0x14, 90 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
91 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
92 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
93 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
99 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 94 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
100 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 95 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
101#ifdef __BIG_ENDIAN 96#ifdef __BIG_ENDIAN
102 NV_PFIFO_CACHE1_BIG_ENDIAN | 97 NV_PFIFO_CACHE1_BIG_ENDIAN |
103#endif 98#endif
104 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 99 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
100 return 0;
101}
105 102
106 /* enable dma mode on the channel */ 103static struct nouveau_ofuncs
107 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 104nv17_fifo_ofuncs = {
108 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id)); 105 .ctor = nv17_fifo_chan_ctor,
109 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 106 .dtor = nv04_fifo_chan_dtor,
107 .init = nv04_fifo_chan_init,
108 .fini = nv04_fifo_chan_fini,
109 .rd32 = _nouveau_fifo_channel_rd32,
110 .wr32 = _nouveau_fifo_channel_wr32,
111};
110 112
111error: 113static struct nouveau_oclass
112 if (ret) 114nv17_fifo_sclass[] = {
113 priv->base.base.context_del(chan, engine); 115 { 0x006e, &nv17_fifo_ofuncs },
114 return ret; 116 {}
115} 117};
118
119/*******************************************************************************
120 * FIFO context - basically just the instmem reserved for the channel
121 ******************************************************************************/
122
123static struct nouveau_oclass
124nv17_fifo_cclass = {
125 .handle = NV_ENGCTX(FIFO, 0x17),
126 .ofuncs = &(struct nouveau_ofuncs) {
127 .ctor = nv04_fifo_context_ctor,
128 .dtor = _nouveau_fifo_context_dtor,
129 .init = _nouveau_fifo_context_init,
130 .fini = _nouveau_fifo_context_fini,
131 .rd32 = _nouveau_fifo_context_rd32,
132 .wr32 = _nouveau_fifo_context_wr32,
133 },
134};
135
136/*******************************************************************************
137 * PFIFO engine
138 ******************************************************************************/
116 139
117static int 140static int
118nv17_fifo_init(struct drm_device *dev, int engine) 141nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
142 struct nouveau_oclass *oclass, void *data, u32 size,
143 struct nouveau_object **pobject)
119{ 144{
120 struct drm_nouveau_private *dev_priv = dev->dev_private; 145 struct nv04_instmem_priv *imem = nv04_instmem(parent);
121 struct nv17_fifo_priv *priv = nv_engine(dev, engine); 146 struct nv04_fifo_priv *priv;
122 int i; 147 int ret;
123 148
124 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0); 149 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
125 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO); 150 *pobject = nv_object(priv);
151 if (ret)
152 return ret;
153
154 nouveau_ramht_ref(imem->ramht, &priv->ramht);
155 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
156 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
157
158 nv_subdev(priv)->unit = 0x00000100;
159 nv_subdev(priv)->intr = nv04_fifo_intr;
160 nv_engine(priv)->cclass = &nv17_fifo_cclass;
161 nv_engine(priv)->sclass = nv17_fifo_sclass;
162 priv->base.pause = nv04_fifo_pause;
163 priv->base.start = nv04_fifo_start;
164 priv->ramfc_desc = nv17_ramfc;
165 return 0;
166}
126 167
127 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff); 168static int
128 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 169nv17_fifo_init(struct nouveau_object *object)
170{
171 struct nv04_fifo_priv *priv = (void *)object;
172 int ret;
129 173
130 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 174 ret = nouveau_fifo_init(&priv->base);
131 ((dev_priv->ramht->bits - 9) << 16) | 175 if (ret)
132 (dev_priv->ramht->gpuobj->addr >> 8)); 176 return ret;
133 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
134 nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
135 priv->ramfc->addr >> 8);
136 177
137 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); 178 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
179 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
138 180
139 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff); 181 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
140 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff); 182 ((priv->ramht->bits - 9) << 16) |
183 (priv->ramht->base.addr >> 8));
184 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
185 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
141 186
142 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); 187 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
143 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
144 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
145 188
146 for (i = 0; i < priv->base.channels; i++) { 189 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
147 if (dev_priv->channels.ptr[i]) 190 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
148 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
149 }
150 191
192 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
193 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
194 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
151 return 0; 195 return 0;
152} 196}
153 197
154int 198struct nouveau_oclass
155nv17_fifo_create(struct drm_device *dev) 199nv17_fifo_oclass = {
156{ 200 .handle = NV_ENGINE(FIFO, 0x17),
157 struct drm_nouveau_private *dev_priv = dev->dev_private; 201 .ofuncs = &(struct nouveau_ofuncs) {
158 struct nv17_fifo_priv *priv; 202 .ctor = nv17_fifo_ctor,
159 203 .dtor = nv04_fifo_dtor,
160 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 204 .init = nv17_fifo_init,
161 if (!priv) 205 .fini = _nouveau_fifo_fini,
162 return -ENOMEM; 206 },
163 207};
164 nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
165 nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
166
167 priv->base.base.destroy = nv04_fifo_destroy;
168 priv->base.base.init = nv17_fifo_init;
169 priv->base.base.fini = nv04_fifo_fini;
170 priv->base.base.context_new = nv17_fifo_context_new;
171 priv->base.base.context_del = nv04_fifo_context_del;
172 priv->base.channels = 31;
173 priv->ramfc_desc = nv17_ramfc;
174 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
175
176 nouveau_irq_register(dev, 8, nv04_fifo_isr);
177 return 0;
178}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 65a670f92a07..43d5c9eea865 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -1,43 +1,42 @@
1/* 1/*
2 * Copyright (C) 2012 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
27#include "drmP.h" 25#include <core/os.h>
28#include "drm.h" 26#include <core/class.h>
29#include "nouveau_drv.h" 27#include <core/engctx.h>
30#include <engine/fifo.h>
31#include "nouveau_util.h"
32#include <core/ramht.h> 28#include <core/ramht.h>
33 29
34static struct ramfc_desc { 30#include <subdev/instmem.h>
35 unsigned bits:6; 31#include <subdev/instmem/nv04.h>
36 unsigned ctxs:5; 32#include <subdev/fb.h>
37 unsigned ctxp:8; 33
38 unsigned regs:5; 34#include <engine/fifo.h>
39 unsigned regp; 35
40} nv40_ramfc[] = { 36#include "nv04.h"
37
38static struct ramfc_desc
39nv40_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, 42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -63,148 +62,287 @@ static struct ramfc_desc {
63 {} 62 {}
64}; 63};
65 64
66struct nv40_fifo_priv { 65/*******************************************************************************
67 struct nouveau_fifo_priv base; 66 * FIFO channel objects
68 struct ramfc_desc *ramfc_desc; 67 ******************************************************************************/
69 struct nouveau_gpuobj *ramro;
70 struct nouveau_gpuobj *ramfc;
71};
72 68
73struct nv40_fifo_chan { 69static int
74 struct nouveau_fifo_chan base; 70nv40_fifo_object_attach(struct nouveau_object *parent,
75 u32 ramfc; 71 struct nouveau_object *object, u32 handle)
76}; 72{
73 struct nv04_fifo_priv *priv = (void *)parent->engine;
74 struct nv04_fifo_chan *chan = (void *)parent;
75 u32 context, chid = chan->base.chid;
76 int ret;
77
78 if (nv_iclass(object, NV_GPUOBJ_CLASS))
79 context = nv_gpuobj(object)->addr >> 4;
80 else
81 context = 0x00000004; /* just non-zero */
82
83 switch (nv_engidx(object->engine)) {
84 case NVDEV_ENGINE_DMAOBJ:
85 case NVDEV_ENGINE_SW:
86 context |= 0x00000000;
87 break;
88 case NVDEV_ENGINE_GR:
89 context |= 0x00100000;
90 break;
91 case NVDEV_ENGINE_MPEG:
92 context |= 0x00200000;
93 break;
94 default:
95 return -EINVAL;
96 }
97
98 context |= chid << 23;
99
100 mutex_lock(&nv_subdev(priv)->mutex);
101 ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
102 mutex_unlock(&nv_subdev(priv)->mutex);
103 return ret;
104}
77 105
78static int 106static int
79nv40_fifo_context_new(struct nouveau_channel *chan, int engine) 107nv40_fifo_context_attach(struct nouveau_object *parent,
108 struct nouveau_object *engctx)
80{ 109{
81 struct drm_device *dev = chan->dev; 110 struct nv04_fifo_priv *priv = (void *)parent->engine;
82 struct drm_nouveau_private *dev_priv = dev->dev_private; 111 struct nv04_fifo_chan *chan = (void *)parent;
83 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
84 struct nv40_fifo_chan *fctx;
85 unsigned long flags; 112 unsigned long flags;
86 int ret; 113 u32 reg, ctx;
114
115 switch (nv_engidx(engctx->engine)) {
116 case NVDEV_ENGINE_SW:
117 return 0;
118 case NVDEV_ENGINE_GR:
119 reg = 0x32e0;
120 ctx = 0x38;
121 break;
122 case NVDEV_ENGINE_MPEG:
123 reg = 0x330c;
124 ctx = 0x54;
125 break;
126 default:
127 return -EINVAL;
128 }
129
130 spin_lock_irqsave(&priv->base.lock, flags);
131 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
87 132
88 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 133 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
89 if (!fctx) 134 nv_wr32(priv, reg, nv_gpuobj(engctx)->addr >> 4);
90 return -ENOMEM; 135 nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_gpuobj(engctx)->addr >> 4);
136
137 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
138 spin_unlock_irqrestore(&priv->base.lock, flags);
139 return 0;
140}
91 141
92 fctx->ramfc = chan->id * 128; 142static int
143nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
144 struct nouveau_object *engctx)
145{
146 struct nv04_fifo_priv *priv = (void *)parent->engine;
147 struct nv04_fifo_chan *chan = (void *)parent;
148 unsigned long flags;
149 u32 reg, ctx;
93 150
94 /* map channel control registers */ 151 switch (nv_engidx(engctx->engine)) {
95 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 152 case NVDEV_ENGINE_SW:
96 NV03_USER(chan->id), PAGE_SIZE); 153 return 0;
97 if (!chan->user) { 154 case NVDEV_ENGINE_GR:
98 ret = -ENOMEM; 155 reg = 0x32e0;
99 goto error; 156 ctx = 0x38;
157 break;
158 case NVDEV_ENGINE_MPEG:
159 reg = 0x330c;
160 ctx = 0x54;
161 break;
162 default:
163 return -EINVAL;
100 } 164 }
101 165
102 /* initialise default fifo context */ 166 spin_lock_irqsave(&priv->base.lock, flags);
103 nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base); 167 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
104 nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base); 168
105 nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4); 169 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
106 nv_wo32(priv->ramfc, fctx->ramfc + 0x18, 0x30000000 | 170 nv_wr32(priv, reg, 0x00000000);
171 nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
172
173 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
174 spin_unlock_irqrestore(&priv->base.lock, flags);
175 return 0;
176}
177
178static int
179nv40_fifo_chan_ctor(struct nouveau_object *parent,
180 struct nouveau_object *engine,
181 struct nouveau_oclass *oclass, void *data, u32 size,
182 struct nouveau_object **pobject)
183{
184 struct nv04_fifo_priv *priv = (void *)engine;
185 struct nv04_fifo_chan *chan;
186 struct nv_channel_dma_class *args = data;
187 int ret;
188
189 if (size < sizeof(*args))
190 return -EINVAL;
191
192 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
193 0x1000, args->pushbuf,
194 (1 << NVDEV_ENGINE_DMAOBJ) |
195 (1 << NVDEV_ENGINE_SW) |
196 (1 << NVDEV_ENGINE_GR) |
197 (1 << NVDEV_ENGINE_MPEG), &chan);
198 *pobject = nv_object(chan);
199 if (ret)
200 return ret;
201
202 nv_parent(chan)->context_attach = nv40_fifo_context_attach;
203 nv_parent(chan)->context_detach = nv40_fifo_context_detach;
204 nv_parent(chan)->object_attach = nv40_fifo_object_attach;
205 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
206 chan->ramfc = chan->base.chid * 128;
207
208 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
209 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
210 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
211 nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
107 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 212 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
108 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 213 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
109#ifdef __BIG_ENDIAN 214#ifdef __BIG_ENDIAN
110 NV_PFIFO_CACHE1_BIG_ENDIAN | 215 NV_PFIFO_CACHE1_BIG_ENDIAN |
111#endif 216#endif
112 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 217 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
113 nv_wo32(priv->ramfc, fctx->ramfc + 0x3c, 0x0001ffff); 218 nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
219 return 0;
220}
221
222static struct nouveau_ofuncs
223nv40_fifo_ofuncs = {
224 .ctor = nv40_fifo_chan_ctor,
225 .dtor = nv04_fifo_chan_dtor,
226 .init = nv04_fifo_chan_init,
227 .fini = nv04_fifo_chan_fini,
228 .rd32 = _nouveau_fifo_channel_rd32,
229 .wr32 = _nouveau_fifo_channel_wr32,
230};
231
232static struct nouveau_oclass
233nv40_fifo_sclass[] = {
234 { 0x006e, &nv40_fifo_ofuncs },
235 {}
236};
114 237
115 /* enable dma mode on the channel */ 238/*******************************************************************************
116 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 239 * FIFO context - basically just the instmem reserved for the channel
117 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id)); 240 ******************************************************************************/
118 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
119 241
120 /*XXX: remove this later, need fifo engine context commit hook */ 242static struct nouveau_oclass
121 nouveau_gpuobj_ref(priv->ramfc, &chan->ramfc); 243nv40_fifo_cclass = {
244 .handle = NV_ENGCTX(FIFO, 0x40),
245 .ofuncs = &(struct nouveau_ofuncs) {
246 .ctor = nv04_fifo_context_ctor,
247 .dtor = _nouveau_fifo_context_dtor,
248 .init = _nouveau_fifo_context_init,
249 .fini = _nouveau_fifo_context_fini,
250 .rd32 = _nouveau_fifo_context_rd32,
251 .wr32 = _nouveau_fifo_context_wr32,
252 },
253};
122 254
123error: 255/*******************************************************************************
256 * PFIFO engine
257 ******************************************************************************/
258
259static int
260nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
261 struct nouveau_oclass *oclass, void *data, u32 size,
262 struct nouveau_object **pobject)
263{
264 struct nv04_instmem_priv *imem = nv04_instmem(parent);
265 struct nv04_fifo_priv *priv;
266 int ret;
267
268 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
269 *pobject = nv_object(priv);
124 if (ret) 270 if (ret)
125 priv->base.base.context_del(chan, engine); 271 return ret;
126 return ret; 272
273 nouveau_ramht_ref(imem->ramht, &priv->ramht);
274 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
275 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
276
277 nv_subdev(priv)->unit = 0x00000100;
278 nv_subdev(priv)->intr = nv04_fifo_intr;
279 nv_engine(priv)->cclass = &nv40_fifo_cclass;
280 nv_engine(priv)->sclass = nv40_fifo_sclass;
281 priv->base.pause = nv04_fifo_pause;
282 priv->base.start = nv04_fifo_start;
283 priv->ramfc_desc = nv40_ramfc;
284 return 0;
127} 285}
128 286
129static int 287static int
130nv40_fifo_init(struct drm_device *dev, int engine) 288nv40_fifo_init(struct nouveau_object *object)
131{ 289{
132 struct drm_nouveau_private *dev_priv = dev->dev_private; 290 struct nv04_fifo_priv *priv = (void *)object;
133 struct nv40_fifo_priv *priv = nv_engine(dev, engine); 291 struct nouveau_fb *pfb = nouveau_fb(object);
134 int i; 292 int ret;
135 293
136 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0); 294 ret = nouveau_fifo_init(&priv->base);
137 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO); 295 if (ret)
296 return ret;
138 297
139 nv_wr32(dev, 0x002040, 0x000000ff); 298 nv_wr32(priv, 0x002040, 0x000000ff);
140 nv_wr32(dev, 0x002044, 0x2101ffff); 299 nv_wr32(priv, 0x002044, 0x2101ffff);
141 nv_wr32(dev, 0x002058, 0x00000001); 300 nv_wr32(priv, 0x002058, 0x00000001);
142 301
143 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 302 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
144 ((dev_priv->ramht->bits - 9) << 16) | 303 ((priv->ramht->bits - 9) << 16) |
145 (dev_priv->ramht->gpuobj->addr >> 8)); 304 (priv->ramht->base.addr >> 8));
146 nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); 305 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
147 306
148 switch (dev_priv->chipset) { 307 switch (nv_device(priv)->chipset) {
149 case 0x47: 308 case 0x47:
150 case 0x49: 309 case 0x49:
151 case 0x4b: 310 case 0x4b:
152 nv_wr32(dev, 0x002230, 0x00000001); 311 nv_wr32(priv, 0x002230, 0x00000001);
153 case 0x40: 312 case 0x40:
154 case 0x41: 313 case 0x41:
155 case 0x42: 314 case 0x42:
156 case 0x43: 315 case 0x43:
157 case 0x45: 316 case 0x45:
158 case 0x48: 317 case 0x48:
159 nv_wr32(dev, 0x002220, 0x00030002); 318 nv_wr32(priv, 0x002220, 0x00030002);
160 break; 319 break;
161 default: 320 default:
162 nv_wr32(dev, 0x002230, 0x00000000); 321 nv_wr32(priv, 0x002230, 0x00000000);
163 nv_wr32(dev, 0x002220, ((nvfb_vram_size(dev) - 512 * 1024 + 322 nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 +
164 priv->ramfc->addr) >> 16) | 323 priv->ramfc->addr) >> 16) |
165 0x00030000); 324 0x00030000);
166 break; 325 break;
167 } 326 }
168 327
169 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); 328 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
170 329
171 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff); 330 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
172 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff); 331 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
173
174 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
175 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
176 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
177
178 for (i = 0; i < priv->base.channels; i++) {
179 if (dev_priv->channels.ptr[i])
180 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
181 }
182 332
333 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
334 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
335 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
183 return 0; 336 return 0;
184} 337}
185 338
186int 339struct nouveau_oclass
187nv40_fifo_create(struct drm_device *dev) 340nv40_fifo_oclass = {
188{ 341 .handle = NV_ENGINE(FIFO, 0x40),
189 struct drm_nouveau_private *dev_priv = dev->dev_private; 342 .ofuncs = &(struct nouveau_ofuncs) {
190 struct nv40_fifo_priv *priv; 343 .ctor = nv40_fifo_ctor,
191 344 .dtor = nv04_fifo_dtor,
192 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 345 .init = nv40_fifo_init,
193 if (!priv) 346 .fini = _nouveau_fifo_fini,
194 return -ENOMEM; 347 },
195 348};
196 nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
197 nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
198
199 priv->base.base.destroy = nv04_fifo_destroy;
200 priv->base.base.init = nv40_fifo_init;
201 priv->base.base.fini = nv04_fifo_fini;
202 priv->base.base.context_new = nv40_fifo_context_new;
203 priv->base.base.context_del = nv04_fifo_context_del;
204 priv->base.channels = 31;
205 priv->ramfc_desc = nv40_ramfc;
206 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
207
208 nouveau_irq_register(dev, 8, nv04_fifo_isr);
209 return 0;
210}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 7b5b1592bf61..4914c3b94413 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -1,126 +1,123 @@
1/* 1/*
2 * Copyright (C) 2012 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
27#include "drmP.h" 25#include <core/client.h>
28#include "drm.h" 26#include <core/engctx.h>
29#include "nouveau_drv.h"
30#include <engine/fifo.h>
31#include <core/ramht.h> 27#include <core/ramht.h>
28#include <core/class.h>
29#include <core/math.h>
32 30
33struct nv50_fifo_priv { 31#include <subdev/timer.h>
34 struct nouveau_fifo_priv base; 32#include <subdev/bar.h>
35 struct nouveau_gpuobj *playlist[2];
36 int cur_playlist;
37};
38 33
39struct nv50_fifo_chan { 34#include <engine/dmaobj.h>
40 struct nouveau_fifo_chan base; 35#include <engine/fifo.h>
41}; 36
37#include "nv50.h"
38
39/*******************************************************************************
40 * FIFO channel objects
41 ******************************************************************************/
42 42
43void 43void
44nv50_fifo_playlist_update(struct drm_device *dev) 44nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
45{ 45{
46 struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 46 struct nouveau_bar *bar = nouveau_bar(priv);
47 struct nouveau_gpuobj *cur; 47 struct nouveau_gpuobj *cur;
48 int i, p; 48 int i, p;
49 49
50 cur = priv->playlist[priv->cur_playlist]; 50 cur = priv->playlist[priv->cur_playlist];
51 priv->cur_playlist = !priv->cur_playlist; 51 priv->cur_playlist = !priv->cur_playlist;
52 52
53 for (i = 0, p = 0; i < priv->base.channels; i++) { 53 for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
54 if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000) 54 if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
55 nv_wo32(cur, p++ * 4, i); 55 nv_wo32(cur, p++ * 4, i);
56 } 56 }
57 57
58 nvimem_flush(dev); 58 bar->flush(bar);
59 59
60 nv_wr32(dev, 0x0032f4, cur->addr >> 12); 60 nv_wr32(priv, 0x0032f4, cur->addr >> 12);
61 nv_wr32(dev, 0x0032ec, p); 61 nv_wr32(priv, 0x0032ec, p);
62 nv_wr32(dev, 0x002500, 0x00000101); 62 nv_wr32(priv, 0x002500, 0x00000101);
63} 63}
64 64
65static int 65static int
66nv50_fifo_context_new(struct nouveau_channel *chan, int engine) 66nv50_fifo_context_attach(struct nouveau_object *parent,
67 struct nouveau_object *object)
67{ 68{
68 struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine); 69 struct nouveau_bar *bar = nouveau_bar(parent);
69 struct nv50_fifo_chan *fctx; 70 struct nv50_fifo_base *base = (void *)parent->parent;
70 struct drm_device *dev = chan->dev; 71 struct nouveau_gpuobj *ectx = (void *)object;
71 struct drm_nouveau_private *dev_priv = dev->dev_private; 72 u64 limit = ectx->addr + ectx->size - 1;
72 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; 73 u64 start = ectx->addr;
73 u64 instance = chan->ramin->addr >> 12; 74 u32 addr;
74 unsigned long flags; 75
75 int ret = 0, i; 76 switch (nv_engidx(object->engine)) {
76 77 case NVDEV_ENGINE_SW : return 0;
77 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 78 case NVDEV_ENGINE_GR : addr = 0x0000; break;
78 if (!fctx) 79 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
79 return -ENOMEM; 80 default:
80 nvvm_engref(chan->vm, engine, 1); 81 return -EINVAL;
81
82 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
83 NV50_USER(chan->id), PAGE_SIZE);
84 if (!chan->user) {
85 ret = -ENOMEM;
86 goto error;
87 } 82 }
88 83
89 for (i = 0; i < 0x100; i += 4) 84 nv_wo32(base->eng, addr + 0x00, 0x00190000);
90 nv_wo32(chan->ramin, i, 0x00000000); 85 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
91 nv_wo32(chan->ramin, 0x3c, 0x403f6078); 86 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
92 nv_wo32(chan->ramin, 0x40, 0x00000000); 87 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
93 nv_wo32(chan->ramin, 0x44, 0x01003fff); 88 upper_32_bits(start));
94 nv_wo32(chan->ramin, 0x48, chan->pushbuf->node->offset >> 4); 89 nv_wo32(base->eng, addr + 0x10, 0x00000000);
95 nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset)); 90 nv_wo32(base->eng, addr + 0x14, 0x00000000);
96 nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) | 91 bar->flush(bar);
97 drm_order(chan->dma.ib_max + 1) << 16); 92 return 0;
98 nv_wo32(chan->ramin, 0x60, 0x7fffffff);
99 nv_wo32(chan->ramin, 0x78, 0x00000000);
100 nv_wo32(chan->ramin, 0x7c, 0x30000001);
101 nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
102 (4 << 24) /* SEARCH_FULL */ |
103 (chan->ramht->gpuobj->node->offset >> 4));
104
105 nvimem_flush(dev);
106
107 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
108 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
109 nv50_fifo_playlist_update(dev);
110 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
111
112error:
113 if (ret)
114 priv->base.base.context_del(chan, engine);
115 return ret;
116} 93}
117 94
118static bool 95static int
119nv50_fifo_kickoff(struct nouveau_channel *chan) 96nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
97 struct nouveau_object *object)
120{ 98{
121 struct drm_device *dev = chan->dev; 99 struct nouveau_bar *bar = nouveau_bar(parent);
122 bool done = true; 100 struct nv50_fifo_priv *priv = (void *)parent->engine;
123 u32 me; 101 struct nv50_fifo_base *base = (void *)parent->parent;
102 struct nv50_fifo_chan *chan = (void *)parent;
103 u32 addr, me;
104 int ret = 0;
105
106 switch (nv_engidx(object->engine)) {
107 case NVDEV_ENGINE_SW : return 0;
108 case NVDEV_ENGINE_GR : addr = 0x0000; break;
109 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
110 default:
111 return -EINVAL;
112 }
113
114 nv_wo32(base->eng, addr + 0x00, 0x00000000);
115 nv_wo32(base->eng, addr + 0x04, 0x00000000);
116 nv_wo32(base->eng, addr + 0x08, 0x00000000);
117 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
118 nv_wo32(base->eng, addr + 0x10, 0x00000000);
119 nv_wo32(base->eng, addr + 0x14, 0x00000000);
120 bar->flush(bar);
124 121
125 /* HW bug workaround: 122 /* HW bug workaround:
126 * 123 *
@@ -134,159 +131,308 @@ nv50_fifo_kickoff(struct nouveau_channel *chan)
134 * there's also a "ignore these engines" bitmask reg we can use 131 * there's also a "ignore these engines" bitmask reg we can use
135 * if we hit the issue there.. 132 * if we hit the issue there..
136 */ 133 */
137 134 me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
138 /* PME: make sure engine is enabled */
139 me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
140 135
141 /* do the kickoff... */ 136 /* do the kickoff... */
142 nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12); 137 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
143 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) { 138 if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
144 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); 139 nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
145 done = false; 140 if (suspend)
141 ret = -EBUSY;
146 } 142 }
147 143
148 /* restore any engine states we changed, and exit */ 144 nv_wr32(priv, 0x00b860, me);
149 nv_wr32(dev, 0x00b860, me); 145 return ret;
150 return done;
151} 146}
152 147
153static void 148static int
154nv50_fifo_context_del(struct nouveau_channel *chan, int engine) 149nv50_fifo_object_attach(struct nouveau_object *parent,
150 struct nouveau_object *object, u32 handle)
155{ 151{
156 struct nv50_fifo_chan *fctx = chan->engctx[engine]; 152 struct nv50_fifo_chan *chan = (void *)parent;
157 struct drm_device *dev = chan->dev; 153 u32 context;
158 struct drm_nouveau_private *dev_priv = dev->dev_private; 154
159 unsigned long flags; 155 if (nv_iclass(object, NV_GPUOBJ_CLASS))
160 156 context = nv_gpuobj(object)->node->offset >> 4;
161 /* remove channel from playlist, will context switch if active */ 157 else
162 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 158 context = 0x00000004; /* just non-zero */
163 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); 159
164 nv50_fifo_playlist_update(dev); 160 switch (nv_engidx(object->engine)) {
165 161 case NVDEV_ENGINE_DMAOBJ:
166 /* tell any engines on this channel to unload their contexts */ 162 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
167 nv50_fifo_kickoff(chan); 163 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
168 164 case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
169 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); 165 default:
170 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 166 return -EINVAL;
171
172 /* clean up */
173 if (chan->user) {
174 iounmap(chan->user);
175 chan->user = NULL;
176 } 167 }
177 168
178 nvvm_engref(chan->vm, engine, -1); 169 return nouveau_ramht_insert(chan->ramht, 0, handle, context);
179 chan->engctx[engine] = NULL; 170}
180 kfree(fctx); 171
172void
173nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
174{
175 struct nv50_fifo_chan *chan = (void *)parent;
176 nouveau_ramht_remove(chan->ramht, cookie);
181} 177}
182 178
183static int 179static int
184nv50_fifo_init(struct drm_device *dev, int engine) 180nv50_fifo_chan_ctor(struct nouveau_object *parent,
181 struct nouveau_object *engine,
182 struct nouveau_oclass *oclass, void *data, u32 size,
183 struct nouveau_object **pobject)
185{ 184{
186 struct drm_nouveau_private *dev_priv = dev->dev_private; 185 struct nv_channel_ind_class *args = data;
187 u32 instance; 186 struct nouveau_bar *bar = nouveau_bar(parent);
188 int i; 187 struct nv50_fifo_base *base = (void *)parent;
189 188 struct nv50_fifo_chan *chan;
190 nv_mask(dev, 0x000200, 0x00000100, 0x00000000); 189 u64 ioffset, ilength;
191 nv_mask(dev, 0x000200, 0x00000100, 0x00000100); 190 int ret;
192 nv_wr32(dev, 0x00250c, 0x6f3cfc34); 191
193 nv_wr32(dev, 0x002044, 0x01003fff); 192 if (size < sizeof(*args))
194 193 return -EINVAL;
195 nv_wr32(dev, 0x002100, 0xffffffff); 194
196 nv_wr32(dev, 0x002140, 0xffffffff); 195 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
197 196 0x2000, args->pushbuf,
198 for (i = 0; i < 128; i++) { 197 (1 << NVDEV_ENGINE_DMAOBJ) |
199 struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 198 (1 << NVDEV_ENGINE_SW) |
200 if (chan && chan->engctx[engine]) 199 (1 << NVDEV_ENGINE_GR) |
201 instance = 0x80000000 | chan->ramin->addr >> 12; 200 (1 << NVDEV_ENGINE_MPEG), &chan);
202 else 201 *pobject = nv_object(chan);
203 instance = 0x00000000; 202 if (ret)
204 nv_wr32(dev, 0x002600 + (i * 4), instance); 203 return ret;
205 }
206 204
207 nv50_fifo_playlist_update(dev); 205 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
206 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
207 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
208 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
208 209
209 nv_wr32(dev, 0x003200, 1); 210 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
210 nv_wr32(dev, 0x003250, 1); 211 if (ret)
211 nv_wr32(dev, 0x002500, 1); 212 return ret;
213
214 ioffset = args->ioffset;
215 ilength = log2i(args->ilength / 8);
216
217 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
218 nv_wo32(base->ramfc, 0x44, 0x01003fff);
219 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
220 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
221 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
222 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
223 nv_wo32(base->ramfc, 0x78, 0x00000000);
224 nv_wo32(base->ramfc, 0x7c, 0x30000001);
225 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
226 (4 << 24) /* SEARCH_FULL */ |
227 (chan->ramht->base.node->offset >> 4));
228 bar->flush(bar);
212 return 0; 229 return 0;
213} 230}
214 231
232void
233nv50_fifo_chan_dtor(struct nouveau_object *object)
234{
235 struct nv50_fifo_chan *chan = (void *)object;
236 nouveau_ramht_ref(NULL, &chan->ramht);
237 nouveau_fifo_channel_destroy(&chan->base);
238}
239
215static int 240static int
216nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend) 241nv50_fifo_chan_init(struct nouveau_object *object)
217{ 242{
218 struct drm_nouveau_private *dev_priv = dev->dev_private; 243 struct nv50_fifo_priv *priv = (void *)object->engine;
219 struct nv50_fifo_priv *priv = nv_engine(dev, engine); 244 struct nv50_fifo_base *base = (void *)object->parent;
220 int i; 245 struct nv50_fifo_chan *chan = (void *)object;
221 246 struct nouveau_gpuobj *ramfc = base->ramfc;
222 /* set playlist length to zero, fifo will unload context */ 247 u32 chid = chan->base.chid;
223 nv_wr32(dev, 0x0032ec, 0); 248 int ret;
224
225 /* tell all connected engines to unload their contexts */
226 for (i = 0; i < priv->base.channels; i++) {
227 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
228 if (chan && !nv50_fifo_kickoff(chan))
229 return -EBUSY;
230 }
231 249
232 nv_wr32(dev, 0x002140, 0); 250 ret = nouveau_fifo_channel_init(&chan->base);
251 if (ret)
252 return ret;
253
254 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
255 nv50_fifo_playlist_update(priv);
233 return 0; 256 return 0;
234} 257}
235 258
236void 259int
237nv50_fifo_tlb_flush(struct drm_device *dev, int engine) 260nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
238{ 261{
239 nv50_vm_flush_engine(dev, 5); 262 struct nv50_fifo_priv *priv = (void *)object->engine;
263 struct nv50_fifo_chan *chan = (void *)object;
264 u32 chid = chan->base.chid;
265
266 /* remove channel from playlist, fifo will unload context */
267 nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
268 nv50_fifo_playlist_update(priv);
269 nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
270
271 return nouveau_fifo_channel_fini(&chan->base, suspend);
240} 272}
241 273
242void 274static struct nouveau_ofuncs
243nv50_fifo_destroy(struct drm_device *dev, int engine) 275nv50_fifo_ofuncs = {
276 .ctor = nv50_fifo_chan_ctor,
277 .dtor = nv50_fifo_chan_dtor,
278 .init = nv50_fifo_chan_init,
279 .fini = nv50_fifo_chan_fini,
280 .rd32 = _nouveau_fifo_channel_rd32,
281 .wr32 = _nouveau_fifo_channel_wr32,
282};
283
284static struct nouveau_oclass
285nv50_fifo_sclass[] = {
286 { 0x506f, &nv50_fifo_ofuncs },
287 {}
288};
289
290/*******************************************************************************
291 * FIFO context - basically just the instmem reserved for the channel
292 ******************************************************************************/
293
294static int
295nv50_fifo_context_ctor(struct nouveau_object *parent,
296 struct nouveau_object *engine,
297 struct nouveau_oclass *oclass, void *data, u32 size,
298 struct nouveau_object **pobject)
244{ 299{
245 struct drm_nouveau_private *dev_priv = dev->dev_private; 300 struct nv50_fifo_base *base;
246 struct nv50_fifo_priv *priv = nv_engine(dev, engine); 301 int ret;
247 302
248 nouveau_irq_unregister(dev, 8); 303 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
304 0x1000, NVOBJ_FLAG_HEAP, &base);
305 *pobject = nv_object(base);
306 if (ret)
307 return ret;
249 308
250 nouveau_gpuobj_ref(NULL, &priv->playlist[0]); 309 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
251 nouveau_gpuobj_ref(NULL, &priv->playlist[1]); 310 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
311 if (ret)
312 return ret;
313
314 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
315 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
316 if (ret)
317 return ret;
318
319 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
320 &base->pgd);
321 if (ret)
322 return ret;
323
324 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
325 if (ret)
326 return ret;
252 327
253 dev_priv->eng[engine] = NULL; 328 return 0;
254 kfree(priv);
255} 329}
256 330
257int 331void
258nv50_fifo_create(struct drm_device *dev) 332nv50_fifo_context_dtor(struct nouveau_object *object)
333{
334 struct nv50_fifo_base *base = (void *)object;
335 nouveau_vm_ref(NULL, &base->vm, base->pgd);
336 nouveau_gpuobj_ref(NULL, &base->pgd);
337 nouveau_gpuobj_ref(NULL, &base->eng);
338 nouveau_gpuobj_ref(NULL, &base->ramfc);
339 nouveau_gpuobj_ref(NULL, &base->cache);
340 nouveau_fifo_context_destroy(&base->base);
341}
342
343static struct nouveau_oclass
344nv50_fifo_cclass = {
345 .handle = NV_ENGCTX(FIFO, 0x50),
346 .ofuncs = &(struct nouveau_ofuncs) {
347 .ctor = nv50_fifo_context_ctor,
348 .dtor = nv50_fifo_context_dtor,
349 .init = _nouveau_fifo_context_init,
350 .fini = _nouveau_fifo_context_fini,
351 .rd32 = _nouveau_fifo_context_rd32,
352 .wr32 = _nouveau_fifo_context_wr32,
353 },
354};
355
356/*******************************************************************************
357 * PFIFO engine
358 ******************************************************************************/
359
360static int
361nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
362 struct nouveau_oclass *oclass, void *data, u32 size,
363 struct nouveau_object **pobject)
259{ 364{
260 struct drm_nouveau_private *dev_priv = dev->dev_private;
261 struct nv50_fifo_priv *priv; 365 struct nv50_fifo_priv *priv;
262 int ret; 366 int ret;
263 367
264 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 368 ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
265 if (!priv) 369 *pobject = nv_object(priv);
266 return -ENOMEM;
267
268 priv->base.base.destroy = nv50_fifo_destroy;
269 priv->base.base.init = nv50_fifo_init;
270 priv->base.base.fini = nv50_fifo_fini;
271 priv->base.base.context_new = nv50_fifo_context_new;
272 priv->base.base.context_del = nv50_fifo_context_del;
273 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
274 priv->base.channels = 127;
275 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
276
277 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
278 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
279 if (ret) 370 if (ret)
280 goto error; 371 return ret;
281 372
282 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000, 373 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
283 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]); 374 &priv->playlist[0]);
284 if (ret) 375 if (ret)
285 goto error; 376 return ret;
286 377
287 nouveau_irq_register(dev, 8, nv04_fifo_isr); 378 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
288error: 379 &priv->playlist[1]);
289 if (ret) 380 if (ret)
290 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); 381 return ret;
291 return ret; 382
383 nv_subdev(priv)->unit = 0x00000100;
384 nv_subdev(priv)->intr = nv04_fifo_intr;
385 nv_engine(priv)->cclass = &nv50_fifo_cclass;
386 nv_engine(priv)->sclass = nv50_fifo_sclass;
387 return 0;
388}
389
390void
391nv50_fifo_dtor(struct nouveau_object *object)
392{
393 struct nv50_fifo_priv *priv = (void *)object;
394
395 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
396 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
397
398 nouveau_fifo_destroy(&priv->base);
292} 399}
400
401int
402nv50_fifo_init(struct nouveau_object *object)
403{
404 struct nv50_fifo_priv *priv = (void *)object;
405 int ret, i;
406
407 ret = nouveau_fifo_init(&priv->base);
408 if (ret)
409 return ret;
410
411 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
412 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
413 nv_wr32(priv, 0x00250c, 0x6f3cfc34);
414 nv_wr32(priv, 0x002044, 0x01003fff);
415
416 nv_wr32(priv, 0x002100, 0xffffffff);
417 nv_wr32(priv, 0x002140, 0xffffffff);
418
419 for (i = 0; i < 128; i++)
420 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
421 nv50_fifo_playlist_update(priv);
422
423 nv_wr32(priv, 0x003200, 0x00000001);
424 nv_wr32(priv, 0x003250, 0x00000001);
425 nv_wr32(priv, 0x002500, 0x00000001);
426 return 0;
427}
428
429struct nouveau_oclass
430nv50_fifo_oclass = {
431 .handle = NV_ENGINE(FIFO, 0x50),
432 .ofuncs = &(struct nouveau_ofuncs) {
433 .ctor = nv50_fifo_ctor,
434 .dtor = nv50_fifo_dtor,
435 .init = nv50_fifo_init,
436 .fini = _nouveau_fifo_fini,
437 },
438};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
new file mode 100644
index 000000000000..3a9ceb315c20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
@@ -0,0 +1,36 @@
1#ifndef __NV50_FIFO_H__
2#define __NV50_FIFO_H__
3
4struct nv50_fifo_priv {
5 struct nouveau_fifo base;
6 struct nouveau_gpuobj *playlist[2];
7 int cur_playlist;
8};
9
10struct nv50_fifo_base {
11 struct nouveau_fifo_base base;
12 struct nouveau_gpuobj *ramfc;
13 struct nouveau_gpuobj *cache;
14 struct nouveau_gpuobj *eng;
15 struct nouveau_gpuobj *pgd;
16 struct nouveau_vm *vm;
17};
18
19struct nv50_fifo_chan {
20 struct nouveau_fifo_chan base;
21 u32 subc[8];
22 struct nouveau_ramht *ramht;
23};
24
25void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
26
27void nv50_fifo_object_detach(struct nouveau_object *, int);
28void nv50_fifo_chan_dtor(struct nouveau_object *);
29int nv50_fifo_chan_fini(struct nouveau_object *, bool);
30
31void nv50_fifo_context_dtor(struct nouveau_object *);
32
33void nv50_fifo_dtor(struct nouveau_object *);
34int nv50_fifo_init(struct nouveau_object *);
35
36#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 63a4941e285c..765affb12666 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -1,249 +1,343 @@
1/* 1/*
2 * Copyright (C) 2012 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
27#include "drmP.h" 25#include <core/os.h>
28#include "drm.h" 26#include <core/client.h>
29#include "nouveau_drv.h" 27#include <core/engctx.h>
30#include <engine/fifo.h>
31#include <core/ramht.h> 28#include <core/ramht.h>
29#include <core/class.h>
30#include <core/math.h>
32 31
33struct nv84_fifo_priv { 32#include <subdev/timer.h>
34 struct nouveau_fifo_priv base; 33#include <subdev/bar.h>
35 struct nouveau_gpuobj *playlist[2];
36 int cur_playlist;
37};
38 34
39struct nv84_fifo_chan { 35#include <engine/dmaobj.h>
40 struct nouveau_fifo_chan base; 36#include <engine/fifo.h>
41 struct nouveau_gpuobj *ramfc; 37
42 struct nouveau_gpuobj *cache; 38#include "nv50.h"
43}; 39
40/*******************************************************************************
41 * FIFO channel objects
42 ******************************************************************************/
44 43
45static int 44static int
46nv84_fifo_context_new(struct nouveau_channel *chan, int engine) 45nv84_fifo_context_attach(struct nouveau_object *parent,
46 struct nouveau_object *object)
47{ 47{
48 struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine); 48 struct nouveau_bar *bar = nouveau_bar(parent);
49 struct nv84_fifo_chan *fctx; 49 struct nv50_fifo_base *base = (void *)parent->parent;
50 struct drm_device *dev = chan->dev; 50 struct nouveau_gpuobj *ectx = (void *)object;
51 struct drm_nouveau_private *dev_priv = dev->dev_private; 51 u64 limit = ectx->addr + ectx->size - 1;
52 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; 52 u64 start = ectx->addr;
53 u64 instance; 53 u32 addr;
54 unsigned long flags; 54
55 int ret; 55 switch (nv_engidx(object->engine)) {
56 case NVDEV_ENGINE_SW : return 0;
57 case NVDEV_ENGINE_GR : addr = 0x0020; break;
58 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
59 case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
60 case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
61 default:
62 return -EINVAL;
63 }
56 64
57 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 65 nv_wo32(base->eng, addr + 0x00, 0x00190000);
58 if (!fctx) 66 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
59 return -ENOMEM; 67 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
60 nvvm_engref(chan->vm, engine, 1); 68 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
69 upper_32_bits(start));
70 nv_wo32(base->eng, addr + 0x10, 0x00000000);
71 nv_wo32(base->eng, addr + 0x14, 0x00000000);
72 bar->flush(bar);
73 return 0;
74}
61 75
62 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 76static int
63 NV50_USER(chan->id), PAGE_SIZE); 77nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
64 if (!chan->user) { 78 struct nouveau_object *object)
65 ret = -ENOMEM; 79{
66 goto error; 80 struct nouveau_bar *bar = nouveau_bar(parent);
81 struct nv50_fifo_priv *priv = (void *)parent->engine;
82 struct nv50_fifo_base *base = (void *)parent->parent;
83 struct nv50_fifo_chan *chan = (void *)parent;
84 u32 addr;
85
86 switch (nv_engidx(object->engine)) {
87 case NVDEV_ENGINE_SW : return 0;
88 case NVDEV_ENGINE_GR : addr = 0x0020; break;
89 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
90 case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
91 case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
92 default:
93 return -EINVAL;
67 } 94 }
68 95
69 ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC | 96 nv_wo32(base->eng, addr + 0x00, 0x00000000);
70 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc); 97 nv_wo32(base->eng, addr + 0x04, 0x00000000);
71 if (ret) 98 nv_wo32(base->eng, addr + 0x08, 0x00000000);
72 goto error; 99 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
73 100 nv_wo32(base->eng, addr + 0x10, 0x00000000);
74 instance = fctx->ramfc->addr >> 8; 101 nv_wo32(base->eng, addr + 0x14, 0x00000000);
102 bar->flush(bar);
103
104 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
105 if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
106 nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
107 if (suspend)
108 return -EBUSY;
109 }
110 return 0;
111}
75 112
76 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache); 113static int
77 if (ret) 114nv84_fifo_object_attach(struct nouveau_object *parent,
78 goto error; 115 struct nouveau_object *object, u32 handle)
79 116{
80 nv_wo32(fctx->ramfc, 0x3c, 0x403f6078); 117 struct nv50_fifo_chan *chan = (void *)parent;
81 nv_wo32(fctx->ramfc, 0x40, 0x00000000); 118 u32 context;
82 nv_wo32(fctx->ramfc, 0x44, 0x01003fff); 119
83 nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->node->offset >> 4); 120 if (nv_iclass(object, NV_GPUOBJ_CLASS))
84 nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset)); 121 context = nv_gpuobj(object)->node->offset >> 4;
85 nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) | 122 else
86 drm_order(chan->dma.ib_max + 1) << 16); 123 context = 0x00000004; /* just non-zero */
87 nv_wo32(fctx->ramfc, 0x60, 0x7fffffff); 124
88 nv_wo32(fctx->ramfc, 0x78, 0x00000000); 125 switch (nv_engidx(object->engine)) {
89 nv_wo32(fctx->ramfc, 0x7c, 0x30000001); 126 case NVDEV_ENGINE_DMAOBJ:
90 nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | 127 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
91 (4 << 24) /* SEARCH_FULL */ | 128 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
92 (chan->ramht->gpuobj->node->offset >> 4)); 129 case NVDEV_ENGINE_MPEG :
93 nv_wo32(fctx->ramfc, 0x88, fctx->cache->addr >> 10); 130 case NVDEV_ENGINE_PPP : context |= 0x00200000; break;
94 nv_wo32(fctx->ramfc, 0x98, chan->ramin->addr >> 12); 131 case NVDEV_ENGINE_ME :
132 case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
133 case NVDEV_ENGINE_VP : context |= 0x00400000; break;
134 case NVDEV_ENGINE_CRYPT :
135 case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
136 case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
137 default:
138 return -EINVAL;
139 }
95 140
96 nv_wo32(chan->ramin, 0x00, chan->id); 141 return nouveau_ramht_insert(chan->ramht, 0, handle, context);
97 nv_wo32(chan->ramin, 0x04, fctx->ramfc->addr >> 8); 142}
98 143
99 nvimem_flush(dev); 144static int
145nv84_fifo_chan_ctor(struct nouveau_object *parent,
146 struct nouveau_object *engine,
147 struct nouveau_oclass *oclass, void *data, u32 size,
148 struct nouveau_object **pobject)
149{
150 struct nouveau_bar *bar = nouveau_bar(parent);
151 struct nv50_fifo_base *base = (void *)parent;
152 struct nv50_fifo_chan *chan;
153 struct nv_channel_ind_class *args = data;
154 u64 ioffset, ilength;
155 int ret;
100 156
101 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 157 if (size < sizeof(*args))
102 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance); 158 return -EINVAL;
103 nv50_fifo_playlist_update(dev); 159
104 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 160 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
161 0x2000, args->pushbuf,
162 (1 << NVDEV_ENGINE_DMAOBJ) |
163 (1 << NVDEV_ENGINE_SW) |
164 (1 << NVDEV_ENGINE_GR) |
165 (1 << NVDEV_ENGINE_MPEG) |
166 (1 << NVDEV_ENGINE_ME) |
167 (1 << NVDEV_ENGINE_VP) |
168 (1 << NVDEV_ENGINE_CRYPT) |
169 (1 << NVDEV_ENGINE_BSP) |
170 (1 << NVDEV_ENGINE_PPP) |
171 (1 << NVDEV_ENGINE_COPY0) |
172 (1 << NVDEV_ENGINE_UNK1C1), &chan);
173 *pobject = nv_object(chan);
174 if (ret)
175 return ret;
105 176
106error: 177 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
107 if (ret) 178 if (ret)
108 priv->base.base.context_del(chan, engine); 179 return ret;
109 return ret; 180
181 nv_parent(chan)->context_attach = nv84_fifo_context_attach;
182 nv_parent(chan)->context_detach = nv84_fifo_context_detach;
183 nv_parent(chan)->object_attach = nv84_fifo_object_attach;
184 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
185
186 ioffset = args->ioffset;
187 ilength = log2i(args->ilength / 8);
188
189 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
190 nv_wo32(base->ramfc, 0x44, 0x01003fff);
191 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
192 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
193 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
194 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
195 nv_wo32(base->ramfc, 0x78, 0x00000000);
196 nv_wo32(base->ramfc, 0x7c, 0x30000001);
197 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
198 (4 << 24) /* SEARCH_FULL */ |
199 (chan->ramht->base.node->offset >> 4));
200 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
201 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
202 bar->flush(bar);
203 return 0;
110} 204}
111 205
112static void 206static int
113nv84_fifo_context_del(struct nouveau_channel *chan, int engine) 207nv84_fifo_chan_init(struct nouveau_object *object)
114{ 208{
115 struct nv84_fifo_chan *fctx = chan->engctx[engine]; 209 struct nv50_fifo_priv *priv = (void *)object->engine;
116 struct drm_device *dev = chan->dev; 210 struct nv50_fifo_base *base = (void *)object->parent;
117 struct drm_nouveau_private *dev_priv = dev->dev_private; 211 struct nv50_fifo_chan *chan = (void *)object;
118 unsigned long flags; 212 struct nouveau_gpuobj *ramfc = base->ramfc;
119 u32 save; 213 u32 chid = chan->base.chid;
214 int ret;
215
216 ret = nouveau_fifo_channel_init(&chan->base);
217 if (ret)
218 return ret;
120 219
121 /* remove channel from playlist, will context switch if active */ 220 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
122 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 221 nv50_fifo_playlist_update(priv);
123 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); 222 return 0;
124 nv50_fifo_playlist_update(dev); 223}
125 224
126 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); 225static struct nouveau_ofuncs
226nv84_fifo_ofuncs = {
227 .ctor = nv84_fifo_chan_ctor,
228 .dtor = nv50_fifo_chan_dtor,
229 .init = nv84_fifo_chan_init,
230 .fini = nv50_fifo_chan_fini,
231 .rd32 = _nouveau_fifo_channel_rd32,
232 .wr32 = _nouveau_fifo_channel_wr32,
233};
127 234
128 /* tell any engines on this channel to unload their contexts */ 235static struct nouveau_oclass
129 nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12); 236nv84_fifo_sclass[] = {
130 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) 237 { 0x826f, &nv84_fifo_ofuncs },
131 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); 238 {}
239};
132 240
133 nv_wr32(dev, 0x002520, save); 241/*******************************************************************************
242 * FIFO context - basically just the instmem reserved for the channel
243 ******************************************************************************/
134 244
135 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); 245int
136 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 246nv84_fifo_context_ctor(struct nouveau_object *parent,
247 struct nouveau_object *engine,
248 struct nouveau_oclass *oclass, void *data, u32 size,
249 struct nouveau_object **pobject)
250{
251 struct nv50_fifo_base *base;
252 int ret;
137 253
138 /* clean up */ 254 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
139 if (chan->user) { 255 0x1000, NVOBJ_FLAG_HEAP, &base);
140 iounmap(chan->user); 256 *pobject = nv_object(base);
141 chan->user = NULL; 257 if (ret)
142 } 258 return ret;
143 259
144 nouveau_gpuobj_ref(NULL, &fctx->ramfc); 260 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
145 nouveau_gpuobj_ref(NULL, &fctx->cache); 261 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
262 if (ret)
263 return ret;
146 264
147 nvvm_engref(chan->vm, engine, -1); 265 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
148 chan->engctx[engine] = NULL; 266 0, &base->pgd);
149 kfree(fctx); 267 if (ret)
150} 268 return ret;
151 269
152static int 270 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
153nv84_fifo_init(struct drm_device *dev, int engine) 271 if (ret)
154{ 272 return ret;
155 struct drm_nouveau_private *dev_priv = dev->dev_private;
156 struct nv84_fifo_chan *fctx;
157 u32 instance;
158 int i;
159
160 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
161 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
162 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
163 nv_wr32(dev, 0x002044, 0x01003fff);
164
165 nv_wr32(dev, 0x002100, 0xffffffff);
166 nv_wr32(dev, 0x002140, 0xffffffff);
167
168 for (i = 0; i < 128; i++) {
169 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
170 if (chan && (fctx = chan->engctx[engine]))
171 instance = 0x80000000 | fctx->ramfc->addr >> 8;
172 else
173 instance = 0x00000000;
174 nv_wr32(dev, 0x002600 + (i * 4), instance);
175 }
176 273
177 nv50_fifo_playlist_update(dev); 274 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
275 NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
276 if (ret)
277 return ret;
278
279 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
280 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
281 if (ret)
282 return ret;
178 283
179 nv_wr32(dev, 0x003200, 1);
180 nv_wr32(dev, 0x003250, 1);
181 nv_wr32(dev, 0x002500, 1);
182 return 0; 284 return 0;
183} 285}
184 286
185static int 287static struct nouveau_oclass
186nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) 288nv84_fifo_cclass = {
187{ 289 .handle = NV_ENGCTX(FIFO, 0x84),
188 struct drm_nouveau_private *dev_priv = dev->dev_private; 290 .ofuncs = &(struct nouveau_ofuncs) {
189 struct nv84_fifo_priv *priv = nv_engine(dev, engine); 291 .ctor = nv84_fifo_context_ctor,
190 int i; 292 .dtor = nv50_fifo_context_dtor,
191 u32 save; 293 .init = _nouveau_fifo_context_init,
192 294 .fini = _nouveau_fifo_context_fini,
193 /* set playlist length to zero, fifo will unload context */ 295 .rd32 = _nouveau_fifo_context_rd32,
194 nv_wr32(dev, 0x0032ec, 0); 296 .wr32 = _nouveau_fifo_context_wr32,
195 297 },
196 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); 298};
197
198 /* tell all connected engines to unload their contexts */
199 for (i = 0; i < priv->base.channels; i++) {
200 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
201 if (chan)
202 nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
203 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
204 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
205 return -EBUSY;
206 }
207 }
208 299
209 nv_wr32(dev, 0x002520, save); 300/*******************************************************************************
210 nv_wr32(dev, 0x002140, 0); 301 * PFIFO engine
211 return 0; 302 ******************************************************************************/
212}
213 303
214int 304static int
215nv84_fifo_create(struct drm_device *dev) 305nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
306 struct nouveau_oclass *oclass, void *data, u32 size,
307 struct nouveau_object **pobject)
216{ 308{
217 struct drm_nouveau_private *dev_priv = dev->dev_private; 309 struct nv50_fifo_priv *priv;
218 struct nv84_fifo_priv *priv;
219 int ret; 310 int ret;
220 311
221 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 312 ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
222 if (!priv) 313 *pobject = nv_object(priv);
223 return -ENOMEM;
224
225 priv->base.base.destroy = nv50_fifo_destroy;
226 priv->base.base.init = nv84_fifo_init;
227 priv->base.base.fini = nv84_fifo_fini;
228 priv->base.base.context_new = nv84_fifo_context_new;
229 priv->base.base.context_del = nv84_fifo_context_del;
230 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
231 priv->base.channels = 127;
232 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
233
234 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
235 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
236 if (ret) 314 if (ret)
237 goto error; 315 return ret;
238 316
239 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000, 317 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
240 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]); 318 &priv->playlist[0]);
241 if (ret) 319 if (ret)
242 goto error; 320 return ret;
243 321
244 nouveau_irq_register(dev, 8, nv04_fifo_isr); 322 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
245error: 323 &priv->playlist[1]);
246 if (ret) 324 if (ret)
247 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); 325 return ret;
248 return ret; 326
327 nv_subdev(priv)->unit = 0x00000100;
328 nv_subdev(priv)->intr = nv04_fifo_intr;
329 nv_engine(priv)->cclass = &nv84_fifo_cclass;
330 nv_engine(priv)->sclass = nv84_fifo_sclass;
331 return 0;
249} 332}
333
334struct nouveau_oclass
335nv84_fifo_oclass = {
336 .handle = NV_ENGINE(FIFO, 0x84),
337 .ofuncs = &(struct nouveau_ofuncs) {
338 .ctor = nv84_fifo_ctor,
339 .dtor = nv50_fifo_dtor,
340 .init = nv50_fifo_init,
341 .fini = _nouveau_fifo_fini,
342 },
343};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index b99d976011d1..ef403fe66ce0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,17 +22,24 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/client.h>
26#include <core/handle.h>
27#include <core/namedb.h>
28#include <core/gpuobj.h>
29#include <core/engctx.h>
30#include <core/class.h>
31#include <core/math.h>
32#include <core/enum.h>
26 33
27#include "nouveau_drv.h" 34#include <subdev/timer.h>
28#include <core/mm.h> 35#include <subdev/bar.h>
29#include <engine/fifo.h> 36#include <subdev/vm.h>
30#include "nouveau_software.h"
31 37
32static void nvc0_fifo_isr(struct drm_device *); 38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
33 40
34struct nvc0_fifo_priv { 41struct nvc0_fifo_priv {
35 struct nouveau_fifo_priv base; 42 struct nouveau_fifo base;
36 struct nouveau_gpuobj *playlist[2]; 43 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist; 44 int cur_playlist;
38 struct { 45 struct {
@@ -42,14 +49,24 @@ struct nvc0_fifo_priv {
42 int spoon_nr; 49 int spoon_nr;
43}; 50};
44 51
52struct nvc0_fifo_base {
53 struct nouveau_fifo_base base;
54 struct nouveau_gpuobj *pgd;
55 struct nouveau_vm *vm;
56};
57
45struct nvc0_fifo_chan { 58struct nvc0_fifo_chan {
46 struct nouveau_fifo_chan base; 59 struct nouveau_fifo_chan base;
47}; 60};
48 61
62/*******************************************************************************
63 * FIFO channel objects
64 ******************************************************************************/
65
49static void 66static void
50nvc0_fifo_playlist_update(struct drm_device *dev) 67nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
51{ 68{
52 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 69 struct nouveau_bar *bar = nouveau_bar(priv);
53 struct nouveau_gpuobj *cur; 70 struct nouveau_gpuobj *cur;
54 int i, p; 71 int i, p;
55 72
@@ -57,174 +74,253 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
57 priv->cur_playlist = !priv->cur_playlist; 74 priv->cur_playlist = !priv->cur_playlist;
58 75
59 for (i = 0, p = 0; i < 128; i++) { 76 for (i = 0, p = 0; i < 128; i++) {
60 if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1)) 77 if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
61 continue; 78 continue;
62 nv_wo32(cur, p + 0, i); 79 nv_wo32(cur, p + 0, i);
63 nv_wo32(cur, p + 4, 0x00000004); 80 nv_wo32(cur, p + 4, 0x00000004);
64 p += 8; 81 p += 8;
65 } 82 }
66 nvimem_flush(dev); 83 bar->flush(bar);
67 84
68 nv_wr32(dev, 0x002270, cur->addr >> 12); 85 nv_wr32(priv, 0x002270, cur->addr >> 12);
69 nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3)); 86 nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
70 if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000)) 87 if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
71 NV_ERROR(dev, "PFIFO - playlist update failed\n"); 88 nv_error(priv, "playlist update failed\n");
72} 89}
73 90
74static int 91static int
75nvc0_fifo_context_new(struct nouveau_channel *chan, int engine) 92nvc0_fifo_context_attach(struct nouveau_object *parent,
93 struct nouveau_object *object)
76{ 94{
77 struct drm_device *dev = chan->dev; 95 struct nouveau_bar *bar = nouveau_bar(parent);
78 struct nvc0_fifo_priv *priv = nv_engine(dev, engine); 96 struct nvc0_fifo_base *base = (void *)parent->parent;
79 struct nvc0_fifo_chan *fctx; 97 struct nouveau_engctx *ectx = (void *)object;
80 u64 usermem = priv->user.mem->addr + chan->id * 0x1000; 98 u32 addr;
81 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 99 int ret;
82 int ret, i;
83 100
84 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 101 switch (nv_engidx(object->engine)) {
85 if (!fctx) 102 case NVDEV_ENGINE_SW : return 0;
86 return -ENOMEM; 103 case NVDEV_ENGINE_GR : addr = 0x0210; break;
104 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
105 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
106 default:
107 return -EINVAL;
108 }
87 109
88 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + 110 if (!ectx->vma.node) {
89 priv->user.bar.offset + (chan->id * 0x1000), 111 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
90 PAGE_SIZE); 112 NV_MEM_ACCESS_RW, &ectx->vma);
91 if (!chan->user) { 113 if (ret)
92 ret = -ENOMEM; 114 return ret;
93 goto error;
94 } 115 }
95 116
96 for (i = 0; i < 0x100; i += 4) 117 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
97 nv_wo32(chan->ramin, i, 0x00000000); 118 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
98 nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem)); 119 bar->flush(bar);
99 nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem)); 120 return 0;
100 nv_wo32(chan->ramin, 0x10, 0x0000face);
101 nv_wo32(chan->ramin, 0x30, 0xfffff902);
102 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
103 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
104 upper_32_bits(ib_virt));
105 nv_wo32(chan->ramin, 0x54, 0x00000002);
106 nv_wo32(chan->ramin, 0x84, 0x20400000);
107 nv_wo32(chan->ramin, 0x94, 0x30000001);
108 nv_wo32(chan->ramin, 0x9c, 0x00000100);
109 nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
110 nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
111 nv_wo32(chan->ramin, 0xac, 0x0000001f);
112 nv_wo32(chan->ramin, 0xb8, 0xf8000000);
113 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
114 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
115 nvimem_flush(dev);
116
117 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
118 (chan->ramin->addr >> 12));
119 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
120 nvc0_fifo_playlist_update(dev);
121
122error:
123 if (ret)
124 priv->base.base.context_del(chan, engine);
125 return ret;
126} 121}
127 122
128static void 123static int
129nvc0_fifo_context_del(struct nouveau_channel *chan, int engine) 124nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
125 struct nouveau_object *object)
130{ 126{
131 struct nvc0_fifo_chan *fctx = chan->engctx[engine]; 127 struct nouveau_bar *bar = nouveau_bar(parent);
132 struct drm_device *dev = chan->dev; 128 struct nvc0_fifo_priv *priv = (void *)parent->engine;
133 129 struct nvc0_fifo_base *base = (void *)parent->parent;
134 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); 130 struct nvc0_fifo_chan *chan = (void *)parent;
135 nv_wr32(dev, 0x002634, chan->id); 131 u32 addr;
136 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) 132
137 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); 133 switch (nv_engidx(object->engine)) {
138 nvc0_fifo_playlist_update(dev); 134 case NVDEV_ENGINE_SW : return 0;
139 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000); 135 case NVDEV_ENGINE_GR : addr = 0x0210; break;
140 136 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
141 if (chan->user) { 137 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
142 iounmap(chan->user); 138 default:
143 chan->user = NULL; 139 return -EINVAL;
144 } 140 }
145 141
146 chan->engctx[engine] = NULL; 142 nv_wo32(base, addr + 0x00, 0x00000000);
147 kfree(fctx); 143 nv_wo32(base, addr + 0x04, 0x00000000);
144 bar->flush(bar);
145
146 nv_wr32(priv, 0x002634, chan->base.chid);
147 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
148 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
149 if (suspend)
150 return -EBUSY;
151 }
152
153 return 0;
148} 154}
149 155
150static int 156static int
151nvc0_fifo_init(struct drm_device *dev, int engine) 157nvc0_fifo_chan_ctor(struct nouveau_object *parent,
158 struct nouveau_object *engine,
159 struct nouveau_oclass *oclass, void *data, u32 size,
160 struct nouveau_object **pobject)
152{ 161{
153 struct drm_nouveau_private *dev_priv = dev->dev_private; 162 struct nouveau_bar *bar = nouveau_bar(parent);
154 struct nvc0_fifo_priv *priv = nv_engine(dev, engine); 163 struct nvc0_fifo_priv *priv = (void *)engine;
155 struct nouveau_channel *chan; 164 struct nvc0_fifo_base *base = (void *)parent;
156 int i; 165 struct nvc0_fifo_chan *chan;
166 struct nv_channel_ind_class *args = data;
167 u64 usermem, ioffset, ilength;
168 int ret, i;
157 169
158 /* reset PFIFO, enable all available PSUBFIFO areas */ 170 if (size < sizeof(*args))
159 nv_mask(dev, 0x000200, 0x00000100, 0x00000000); 171 return -EINVAL;
160 nv_mask(dev, 0x000200, 0x00000100, 0x00000100); 172
161 nv_wr32(dev, 0x000204, 0xffffffff); 173 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
162 nv_wr32(dev, 0x002204, 0xffffffff); 174 priv->user.bar.offset, 0x1000,
175 args->pushbuf,
176 (1 << NVDEV_ENGINE_SW) |
177 (1 << NVDEV_ENGINE_GR) |
178 (1 << NVDEV_ENGINE_COPY0) |
179 (1 << NVDEV_ENGINE_COPY1), &chan);
180 *pobject = nv_object(chan);
181 if (ret)
182 return ret;
183
184 nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
185 nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
186
187 usermem = chan->base.chid * 0x1000;
188 ioffset = args->ioffset;
189 ilength = log2i(args->ilength / 8);
190
191 for (i = 0; i < 0x1000; i += 4)
192 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
193
194 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
195 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
196 nv_wo32(base, 0x10, 0x0000face);
197 nv_wo32(base, 0x30, 0xfffff902);
198 nv_wo32(base, 0x48, lower_32_bits(ioffset));
199 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
200 nv_wo32(base, 0x54, 0x00000002);
201 nv_wo32(base, 0x84, 0x20400000);
202 nv_wo32(base, 0x94, 0x30000001);
203 nv_wo32(base, 0x9c, 0x00000100);
204 nv_wo32(base, 0xa4, 0x1f1f1f1f);
205 nv_wo32(base, 0xa8, 0x1f1f1f1f);
206 nv_wo32(base, 0xac, 0x0000001f);
207 nv_wo32(base, 0xb8, 0xf8000000);
208 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
209 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
210 bar->flush(bar);
211 return 0;
212}
163 213
164 priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204)); 214static int
165 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr); 215nvc0_fifo_chan_init(struct nouveau_object *object)
216{
217 struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
218 struct nvc0_fifo_priv *priv = (void *)object->engine;
219 struct nvc0_fifo_chan *chan = (void *)object;
220 u32 chid = chan->base.chid;
221 int ret;
166 222
167 /* assign engines to subfifos */ 223 ret = nouveau_fifo_channel_init(&chan->base);
168 if (priv->spoon_nr >= 3) { 224 if (ret)
169 nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */ 225 return ret;
170 nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
171 nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
172 nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
173 nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
174 nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
175 }
176 226
177 /* PSUBFIFO[n] */ 227 nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
178 for (i = 0; i < priv->spoon_nr; i++) { 228 nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
179 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 229 nvc0_fifo_playlist_update(priv);
180 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 230 return 0;
181 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */ 231}
182 }
183 232
184 nv_mask(dev, 0x002200, 0x00000001, 0x00000001); 233static int
185 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); 234nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
235{
236 struct nvc0_fifo_priv *priv = (void *)object->engine;
237 struct nvc0_fifo_chan *chan = (void *)object;
238 u32 chid = chan->base.chid;
186 239
187 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */ 240 nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
188 nv_wr32(dev, 0x002100, 0xffffffff); 241 nvc0_fifo_playlist_update(priv);
189 nv_wr32(dev, 0x002140, 0xbfffffff); 242 nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
190 243
191 /* restore PFIFO context table */ 244 return nouveau_fifo_channel_fini(&chan->base, suspend);
192 for (i = 0; i < 128; i++) { 245}
193 chan = dev_priv->channels.ptr[i];
194 if (!chan || !chan->engctx[engine])
195 continue;
196 246
197 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | 247static struct nouveau_ofuncs
198 (chan->ramin->addr >> 12)); 248nvc0_fifo_ofuncs = {
199 nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001); 249 .ctor = nvc0_fifo_chan_ctor,
200 } 250 .dtor = _nouveau_fifo_channel_dtor,
201 nvc0_fifo_playlist_update(dev); 251 .init = nvc0_fifo_chan_init,
252 .fini = nvc0_fifo_chan_fini,
253 .rd32 = _nouveau_fifo_channel_rd32,
254 .wr32 = _nouveau_fifo_channel_wr32,
255};
202 256
203 return 0; 257static struct nouveau_oclass
204} 258nvc0_fifo_sclass[] = {
259 { 0x906f, &nvc0_fifo_ofuncs },
260 {}
261};
262
263/*******************************************************************************
264 * FIFO context - instmem heap and vm setup
265 ******************************************************************************/
205 266
206static int 267static int
207nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend) 268nvc0_fifo_context_ctor(struct nouveau_object *parent,
269 struct nouveau_object *engine,
270 struct nouveau_oclass *oclass, void *data, u32 size,
271 struct nouveau_object **pobject)
208{ 272{
209 int i; 273 struct nvc0_fifo_base *base;
274 int ret;
210 275
211 for (i = 0; i < 128; i++) { 276 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
212 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1)) 277 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
213 continue; 278 NVOBJ_FLAG_HEAP, &base);
279 *pobject = nv_object(base);
280 if (ret)
281 return ret;
214 282
215 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000); 283 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
216 nv_wr32(dev, 0x002634, i); 284 if (ret)
217 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { 285 return ret;
218 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", 286
219 i, nv_rd32(dev, 0x002634)); 287 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
220 return -EBUSY; 288 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
221 } 289 nv_wo32(base, 0x0208, 0xffffffff);
222 } 290 nv_wo32(base, 0x020c, 0x000000ff);
291
292 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
293 if (ret)
294 return ret;
223 295
224 nv_wr32(dev, 0x002140, 0x00000000);
225 return 0; 296 return 0;
226} 297}
227 298
299static void
300nvc0_fifo_context_dtor(struct nouveau_object *object)
301{
302 struct nvc0_fifo_base *base = (void *)object;
303 nouveau_vm_ref(NULL, &base->vm, base->pgd);
304 nouveau_gpuobj_ref(NULL, &base->pgd);
305 nouveau_fifo_context_destroy(&base->base);
306}
307
308static struct nouveau_oclass
309nvc0_fifo_cclass = {
310 .handle = NV_ENGCTX(FIFO, 0xc0),
311 .ofuncs = &(struct nouveau_ofuncs) {
312 .ctor = nvc0_fifo_context_ctor,
313 .dtor = nvc0_fifo_context_dtor,
314 .init = _nouveau_fifo_context_init,
315 .fini = _nouveau_fifo_context_fini,
316 .rd32 = _nouveau_fifo_context_rd32,
317 .wr32 = _nouveau_fifo_context_wr32,
318 },
319};
320
321/*******************************************************************************
322 * PFIFO engine
323 ******************************************************************************/
228 324
229struct nouveau_enum nvc0_fifo_fault_unit[] = { 325struct nouveau_enum nvc0_fifo_fault_unit[] = {
230 { 0x00, "PGRAPH" }, 326 { 0x00, "PGRAPH" },
@@ -289,16 +385,16 @@ struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
289}; 385};
290 386
291static void 387static void
292nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit) 388nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
293{ 389{
294 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10)); 390 u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
295 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10)); 391 u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
296 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10)); 392 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
297 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10)); 393 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
298 u32 client = (stat & 0x00001f00) >> 8; 394 u32 client = (stat & 0x00001f00) >> 8;
299 395
300 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [", 396 nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
301 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo); 397 "write" : "read", (u64)vahi << 32 | valo);
302 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f); 398 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
303 printk("] from "); 399 printk("] from ");
304 nouveau_enum_print(nvc0_fifo_fault_unit, unit); 400 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
@@ -313,165 +409,223 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
313} 409}
314 410
315static int 411static int
316nvc0_fifo_page_flip(struct drm_device *dev, u32 chid) 412nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
317{ 413{
318 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 414 struct nvc0_fifo_chan *chan = NULL;
319 struct drm_nouveau_private *dev_priv = dev->dev_private; 415 struct nouveau_handle *bind;
320 struct nouveau_channel *chan = NULL;
321 unsigned long flags; 416 unsigned long flags;
322 int ret = -EINVAL; 417 int ret = -EINVAL;
323 418
324 spin_lock_irqsave(&dev_priv->channels.lock, flags); 419 spin_lock_irqsave(&priv->base.lock, flags);
325 if (likely(chid >= 0 && chid < priv->base.channels)) { 420 if (likely(chid >= priv->base.min && chid <= priv->base.max))
326 chan = dev_priv->channels.ptr[chid]; 421 chan = (void *)priv->base.channel[chid];
327 if (likely(chan)) { 422 if (unlikely(!chan))
328 struct nouveau_software_chan *swch = 423 goto out;
329 chan->engctx[NVOBJ_ENGINE_SW]; 424
330 ret = swch->flip(swch->flip_data); 425 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
331 } 426 if (likely(bind)) {
427 if (!mthd || !nv_call(bind->object, mthd, data))
428 ret = 0;
429 nouveau_namedb_put(bind);
332 } 430 }
333 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 431
432out:
433 spin_unlock_irqrestore(&priv->base.lock, flags);
334 return ret; 434 return ret;
335} 435}
336 436
337static void 437static void
338nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) 438nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
339{ 439{
340 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000)); 440 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
341 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000)); 441 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
342 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000)); 442 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
343 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f; 443 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
344 u32 subc = (addr & 0x00070000); 444 u32 subc = (addr & 0x00070000) >> 16;
345 u32 mthd = (addr & 0x00003ffc); 445 u32 mthd = (addr & 0x00003ffc);
346 u32 show = stat; 446 u32 show = stat;
347 447
348 if (stat & 0x00200000) { 448 if (stat & 0x00200000) {
349 if (mthd == 0x0054) { 449 if (mthd == 0x0054) {
350 if (!nvc0_fifo_page_flip(dev, chid)) 450 if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
351 show &= ~0x00200000; 451 show &= ~0x00200000;
352 } 452 }
353 } 453 }
354 454
455 if (stat & 0x00800000) {
456 if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
457 show &= ~0x00800000;
458 }
459
355 if (show) { 460 if (show) {
356 NV_INFO(dev, "PFIFO%d:", unit); 461 nv_error(priv, "SUBFIFO%d:", unit);
357 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show); 462 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
358 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n", 463 printk("\n");
359 unit, chid, subc, mthd, data); 464 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
465 "data 0x%08x\n",
466 unit, chid, subc, mthd, data);
360 } 467 }
361 468
362 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); 469 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
363 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); 470 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
364} 471}
365 472
366static void 473static void
367nvc0_fifo_isr(struct drm_device *dev) 474nvc0_fifo_intr(struct nouveau_subdev *subdev)
368{ 475{
369 u32 mask = nv_rd32(dev, 0x002140); 476 struct nvc0_fifo_priv *priv = (void *)subdev;
370 u32 stat = nv_rd32(dev, 0x002100) & mask; 477 u32 mask = nv_rd32(priv, 0x002140);
478 u32 stat = nv_rd32(priv, 0x002100) & mask;
371 479
372 if (stat & 0x00000100) { 480 if (stat & 0x00000100) {
373 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); 481 nv_info(priv, "unknown status 0x00000100\n");
374 nv_wr32(dev, 0x002100, 0x00000100); 482 nv_wr32(priv, 0x002100, 0x00000100);
375 stat &= ~0x00000100; 483 stat &= ~0x00000100;
376 } 484 }
377 485
378 if (stat & 0x10000000) { 486 if (stat & 0x10000000) {
379 u32 units = nv_rd32(dev, 0x00259c); 487 u32 units = nv_rd32(priv, 0x00259c);
380 u32 u = units; 488 u32 u = units;
381 489
382 while (u) { 490 while (u) {
383 int i = ffs(u) - 1; 491 int i = ffs(u) - 1;
384 nvc0_fifo_isr_vm_fault(dev, i); 492 nvc0_fifo_isr_vm_fault(priv, i);
385 u &= ~(1 << i); 493 u &= ~(1 << i);
386 } 494 }
387 495
388 nv_wr32(dev, 0x00259c, units); 496 nv_wr32(priv, 0x00259c, units);
389 stat &= ~0x10000000; 497 stat &= ~0x10000000;
390 } 498 }
391 499
392 if (stat & 0x20000000) { 500 if (stat & 0x20000000) {
393 u32 units = nv_rd32(dev, 0x0025a0); 501 u32 units = nv_rd32(priv, 0x0025a0);
394 u32 u = units; 502 u32 u = units;
395 503
396 while (u) { 504 while (u) {
397 int i = ffs(u) - 1; 505 int i = ffs(u) - 1;
398 nvc0_fifo_isr_subfifo_intr(dev, i); 506 nvc0_fifo_isr_subfifo_intr(priv, i);
399 u &= ~(1 << i); 507 u &= ~(1 << i);
400 } 508 }
401 509
402 nv_wr32(dev, 0x0025a0, units); 510 nv_wr32(priv, 0x0025a0, units);
403 stat &= ~0x20000000; 511 stat &= ~0x20000000;
404 } 512 }
405 513
406 if (stat & 0x40000000) { 514 if (stat & 0x40000000) {
407 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n"); 515 nv_warn(priv, "unknown status 0x40000000\n");
408 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000); 516 nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
409 stat &= ~0x40000000; 517 stat &= ~0x40000000;
410 } 518 }
411 519
412 if (stat) { 520 if (stat) {
413 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat); 521 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
414 nv_wr32(dev, 0x002100, stat); 522 nv_wr32(priv, 0x002100, stat);
415 nv_wr32(dev, 0x002140, 0); 523 nv_wr32(priv, 0x002140, 0);
416 } 524 }
417} 525}
418 526
527static int
528nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
529 struct nouveau_oclass *oclass, void *data, u32 size,
530 struct nouveau_object **pobject)
531{
532 struct nvc0_fifo_priv *priv;
533 int ret;
534
535 ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
536 *pobject = nv_object(priv);
537 if (ret)
538 return ret;
539
540 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
541 &priv->playlist[0]);
542 if (ret)
543 return ret;
544
545 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
546 &priv->playlist[1]);
547 if (ret)
548 return ret;
549
550 ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
551 &priv->user.mem);
552 if (ret)
553 return ret;
554
555 ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
556 &priv->user.bar);
557 if (ret)
558 return ret;
559
560 nv_subdev(priv)->unit = 0x00000100;
561 nv_subdev(priv)->intr = nvc0_fifo_intr;
562 nv_engine(priv)->cclass = &nvc0_fifo_cclass;
563 nv_engine(priv)->sclass = nvc0_fifo_sclass;
564 return 0;
565}
566
419static void 567static void
420nvc0_fifo_destroy(struct drm_device *dev, int engine) 568nvc0_fifo_dtor(struct nouveau_object *object)
421{ 569{
422 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 570 struct nvc0_fifo_priv *priv = (void *)object;
423 struct drm_nouveau_private *dev_priv = dev->dev_private;
424 571
425 nouveau_gpuobj_unmap(&priv->user.bar); 572 nouveau_gpuobj_unmap(&priv->user.bar);
426 nouveau_gpuobj_ref(NULL, &priv->user.mem); 573 nouveau_gpuobj_ref(NULL, &priv->user.mem);
427
428 nouveau_gpuobj_ref(NULL, &priv->playlist[1]); 574 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
429 nouveau_gpuobj_ref(NULL, &priv->playlist[0]); 575 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
430 576
431 dev_priv->eng[engine] = NULL; 577 nouveau_fifo_destroy(&priv->base);
432 kfree(priv);
433} 578}
434 579
435int 580static int
436nvc0_fifo_create(struct drm_device *dev) 581nvc0_fifo_init(struct nouveau_object *object)
437{ 582{
438 struct drm_nouveau_private *dev_priv = dev->dev_private; 583 struct nvc0_fifo_priv *priv = (void *)object;
439 struct nvc0_fifo_priv *priv; 584 int ret, i;
440 int ret;
441 585
442 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 586 ret = nouveau_fifo_init(&priv->base);
443 if (!priv) 587 if (ret)
444 return -ENOMEM; 588 return ret;
445 589
446 priv->base.base.destroy = nvc0_fifo_destroy; 590 nv_wr32(priv, 0x000204, 0xffffffff);
447 priv->base.base.init = nvc0_fifo_init; 591 nv_wr32(priv, 0x002204, 0xffffffff);
448 priv->base.base.fini = nvc0_fifo_fini;
449 priv->base.base.context_new = nvc0_fifo_context_new;
450 priv->base.base.context_del = nvc0_fifo_context_del;
451 priv->base.channels = 128;
452 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
453 592
454 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]); 593 priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
455 if (ret) 594 nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
456 goto error;
457 595
458 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]); 596 /* assign engines to subfifos */
459 if (ret) 597 if (priv->spoon_nr >= 3) {
460 goto error; 598 nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
599 nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
600 nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
601 nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
602 nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
603 nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
604 }
461 605
462 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4096, 0x1000, 606 /* PSUBFIFO[n] */
463 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); 607 for (i = 0; i < priv->spoon_nr; i++) {
464 if (ret) 608 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
465 goto error; 609 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
610 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
611 }
466 612
467 ret = nouveau_gpuobj_map_bar(priv->user.mem, NV_MEM_ACCESS_RW, 613 nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
468 &priv->user.bar); 614 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
469 if (ret)
470 goto error;
471 615
472 nouveau_irq_register(dev, 8, nvc0_fifo_isr); 616 nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
473error: 617 nv_wr32(priv, 0x002100, 0xffffffff);
474 if (ret) 618 nv_wr32(priv, 0x002140, 0xbfffffff);
475 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); 619 return 0;
476 return ret;
477} 620}
621
622struct nouveau_oclass
623nvc0_fifo_oclass = {
624 .handle = NV_ENGINE(FIFO, 0xc0),
625 .ofuncs = &(struct nouveau_ofuncs) {
626 .ctor = nvc0_fifo_ctor,
627 .dtor = nvc0_fifo_dtor,
628 .init = nvc0_fifo_init,
629 .fini = _nouveau_fifo_fini,
630 },
631};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 0b356f1b6864..aaff086dfd2a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,25 +22,30 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/client.h>
26#include <core/handle.h>
27#include <core/namedb.h>
28#include <core/gpuobj.h>
29#include <core/engctx.h>
30#include <core/class.h>
31#include <core/math.h>
32#include <core/enum.h>
26 33
27#include "nouveau_drv.h" 34#include <subdev/timer.h>
28#include <core/mm.h> 35#include <subdev/bar.h>
29#include <engine/fifo.h> 36#include <subdev/vm.h>
30#include "nouveau_software.h"
31
32#define NVE0_FIFO_ENGINE_NUM 32
33 37
34static void nve0_fifo_isr(struct drm_device *); 38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
35 40
36struct nve0_fifo_engine { 41struct nve0_fifo_engn {
37 struct nouveau_gpuobj *playlist[2]; 42 struct nouveau_gpuobj *playlist[2];
38 int cur_playlist; 43 int cur_playlist;
39}; 44};
40 45
41struct nve0_fifo_priv { 46struct nve0_fifo_priv {
42 struct nouveau_fifo_priv base; 47 struct nouveau_fifo base;
43 struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM]; 48 struct nve0_fifo_engn engine[16];
44 struct { 49 struct {
45 struct nouveau_gpuobj *mem; 50 struct nouveau_gpuobj *mem;
46 struct nouveau_vma bar; 51 struct nouveau_vma bar;
@@ -48,194 +53,286 @@ struct nve0_fifo_priv {
48 int spoon_nr; 53 int spoon_nr;
49}; 54};
50 55
56struct nve0_fifo_base {
57 struct nouveau_fifo_base base;
58 struct nouveau_gpuobj *pgd;
59 struct nouveau_vm *vm;
60};
61
51struct nve0_fifo_chan { 62struct nve0_fifo_chan {
52 struct nouveau_fifo_chan base; 63 struct nouveau_fifo_chan base;
53 u32 engine; 64 u32 engine;
54}; 65};
55 66
67/*******************************************************************************
68 * FIFO channel objects
69 ******************************************************************************/
70
56static void 71static void
57nve0_fifo_playlist_update(struct drm_device *dev, u32 engine) 72nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
58{ 73{
59 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 74 struct nouveau_bar *bar = nouveau_bar(priv);
60 struct nve0_fifo_engine *peng = &priv->engine[engine]; 75 struct nve0_fifo_engn *engn = &priv->engine[engine];
61 struct nouveau_gpuobj *cur; 76 struct nouveau_gpuobj *cur;
62 u32 match = (engine << 16) | 0x00000001; 77 u32 match = (engine << 16) | 0x00000001;
63 int ret, i, p; 78 int i, p;
64 79
65 cur = peng->playlist[peng->cur_playlist]; 80 cur = engn->playlist[engn->cur_playlist];
66 if (unlikely(cur == NULL)) { 81 if (unlikely(cur == NULL)) {
67 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur); 82 int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
83 0x8000, 0x1000, 0, &cur);
68 if (ret) { 84 if (ret) {
69 NV_ERROR(dev, "PFIFO: playlist alloc failed\n"); 85 nv_error(priv, "playlist alloc failed\n");
70 return; 86 return;
71 } 87 }
72 88
73 peng->playlist[peng->cur_playlist] = cur; 89 engn->playlist[engn->cur_playlist] = cur;
74 } 90 }
75 91
76 peng->cur_playlist = !peng->cur_playlist; 92 engn->cur_playlist = !engn->cur_playlist;
77 93
78 for (i = 0, p = 0; i < priv->base.channels; i++) { 94 for (i = 0, p = 0; i < priv->base.max; i++) {
79 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001; 95 u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
80 if (ctrl != match) 96 if (ctrl != match)
81 continue; 97 continue;
82 nv_wo32(cur, p + 0, i); 98 nv_wo32(cur, p + 0, i);
83 nv_wo32(cur, p + 4, 0x00000000); 99 nv_wo32(cur, p + 4, 0x00000000);
84 p += 8; 100 p += 8;
85 } 101 }
86 nvimem_flush(dev); 102 bar->flush(bar);
87 103
88 nv_wr32(dev, 0x002270, cur->addr >> 12); 104 nv_wr32(priv, 0x002270, cur->addr >> 12);
89 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3)); 105 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
90 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) 106 if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
91 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine); 107 nv_error(priv, "playlist %d update timeout\n", engine);
92} 108}
93 109
94static int 110static int
95nve0_fifo_context_new(struct nouveau_channel *chan, int engine) 111nve0_fifo_context_attach(struct nouveau_object *parent,
112 struct nouveau_object *object)
96{ 113{
97 struct drm_device *dev = chan->dev; 114 struct nouveau_bar *bar = nouveau_bar(parent);
98 struct nve0_fifo_priv *priv = nv_engine(dev, engine); 115 struct nve0_fifo_base *base = (void *)parent->parent;
99 struct nve0_fifo_chan *fctx; 116 struct nouveau_engctx *ectx = (void *)object;
100 u64 usermem = priv->user.mem->addr + chan->id * 512; 117 u32 addr;
101 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 118 int ret;
102 int ret = 0, i; 119
103 120 switch (nv_engidx(object->engine)) {
104 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 121 case NVDEV_ENGINE_SW : return 0;
105 if (!fctx) 122 case NVDEV_ENGINE_GR : addr = 0x0210; break;
106 return -ENOMEM; 123 default:
107 124 return -EINVAL;
108 fctx->engine = 0; /* PGRAPH */
109
110 /* allocate vram for control regs, map into polling area */
111 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
112 priv->user.bar.offset + (chan->id * 512), 512);
113 if (!chan->user) {
114 ret = -ENOMEM;
115 goto error;
116 } 125 }
117 126
118 for (i = 0; i < 0x100; i += 4) 127 if (!ectx->vma.node) {
119 nv_wo32(chan->ramin, i, 0x00000000); 128 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
120 nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem)); 129 NV_MEM_ACCESS_RW, &ectx->vma);
121 nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem)); 130 if (ret)
122 nv_wo32(chan->ramin, 0x10, 0x0000face); 131 return ret;
123 nv_wo32(chan->ramin, 0x30, 0xfffff902); 132 }
124 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt)); 133
125 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | 134 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
126 upper_32_bits(ib_virt)); 135 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
127 nv_wo32(chan->ramin, 0x84, 0x20400000); 136 bar->flush(bar);
128 nv_wo32(chan->ramin, 0x94, 0x30000001); 137 return 0;
129 nv_wo32(chan->ramin, 0x9c, 0x00000100);
130 nv_wo32(chan->ramin, 0xac, 0x0000001f);
131 nv_wo32(chan->ramin, 0xe4, 0x00000000);
132 nv_wo32(chan->ramin, 0xe8, chan->id);
133 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
134 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
135 nvimem_flush(dev);
136
137 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
138 (chan->ramin->addr >> 12));
139 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
140 nve0_fifo_playlist_update(dev, fctx->engine);
141 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
142
143error:
144 if (ret)
145 priv->base.base.context_del(chan, engine);
146 return ret;
147} 138}
148 139
149static void 140static int
150nve0_fifo_context_del(struct nouveau_channel *chan, int engine) 141nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
142 struct nouveau_object *object)
151{ 143{
152 struct nve0_fifo_chan *fctx = chan->engctx[engine]; 144 struct nouveau_bar *bar = nouveau_bar(parent);
153 struct drm_device *dev = chan->dev; 145 struct nve0_fifo_priv *priv = (void *)parent->engine;
154 146 struct nve0_fifo_base *base = (void *)parent->parent;
155 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800); 147 struct nve0_fifo_chan *chan = (void *)parent;
156 nv_wr32(dev, 0x002634, chan->id); 148 u32 addr;
157 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) 149
158 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); 150 switch (nv_engidx(object->engine)) {
159 nve0_fifo_playlist_update(dev, fctx->engine); 151 case NVDEV_ENGINE_SW : return 0;
160 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000); 152 case NVDEV_ENGINE_GR : addr = 0x0210; break;
161 153 default:
162 if (chan->user) { 154 return -EINVAL;
163 iounmap(chan->user); 155 }
164 chan->user = NULL; 156
157 nv_wo32(base, addr + 0x00, 0x00000000);
158 nv_wo32(base, addr + 0x04, 0x00000000);
159 bar->flush(bar);
160
161 nv_wr32(priv, 0x002634, chan->base.chid);
162 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
163 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
164 if (suspend)
165 return -EBUSY;
165 } 166 }
166 167
167 chan->engctx[NVOBJ_ENGINE_FIFO] = NULL; 168 return 0;
168 kfree(fctx);
169} 169}
170 170
171static int 171static int
172nve0_fifo_init(struct drm_device *dev, int engine) 172nve0_fifo_chan_ctor(struct nouveau_object *parent,
173 struct nouveau_object *engine,
174 struct nouveau_oclass *oclass, void *data, u32 size,
175 struct nouveau_object **pobject)
173{ 176{
174 struct drm_nouveau_private *dev_priv = dev->dev_private; 177 struct nouveau_bar *bar = nouveau_bar(parent);
175 struct nve0_fifo_priv *priv = nv_engine(dev, engine); 178 struct nve0_fifo_priv *priv = (void *)engine;
176 struct nve0_fifo_chan *fctx; 179 struct nve0_fifo_base *base = (void *)parent;
177 int i; 180 struct nve0_fifo_chan *chan;
181 struct nv_channel_ind_class *args = data;
182 u64 usermem, ioffset, ilength;
183 int ret, i;
184
185 if (size < sizeof(*args))
186 return -EINVAL;
187
188 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
189 priv->user.bar.offset, 0x200,
190 args->pushbuf,
191 (1 << NVDEV_ENGINE_SW) |
192 (1 << NVDEV_ENGINE_GR), &chan);
193 *pobject = nv_object(chan);
194 if (ret)
195 return ret;
196
197 nv_parent(chan)->context_attach = nve0_fifo_context_attach;
198 nv_parent(chan)->context_detach = nve0_fifo_context_detach;
199
200 usermem = chan->base.chid * 0x200;
201 ioffset = args->ioffset;
202 ilength = log2i(args->ilength / 8);
203
204 for (i = 0; i < 0x200; i += 4)
205 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
206
207 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
208 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
209 nv_wo32(base, 0x10, 0x0000face);
210 nv_wo32(base, 0x30, 0xfffff902);
211 nv_wo32(base, 0x48, lower_32_bits(ioffset));
212 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
213 nv_wo32(base, 0x84, 0x20400000);
214 nv_wo32(base, 0x94, 0x30000001);
215 nv_wo32(base, 0x9c, 0x00000100);
216 nv_wo32(base, 0xac, 0x0000001f);
217 nv_wo32(base, 0xe8, chan->base.chid);
218 nv_wo32(base, 0xb8, 0xf8000000);
219 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
220 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
221 bar->flush(bar);
222 return 0;
223}
178 224
179 /* reset PFIFO, enable all available PSUBFIFO areas */ 225static int
180 nv_mask(dev, 0x000200, 0x00000100, 0x00000000); 226nve0_fifo_chan_init(struct nouveau_object *object)
181 nv_mask(dev, 0x000200, 0x00000100, 0x00000100); 227{
182 nv_wr32(dev, 0x000204, 0xffffffff); 228 struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
229 struct nve0_fifo_priv *priv = (void *)object->engine;
230 struct nve0_fifo_chan *chan = (void *)object;
231 u32 chid = chan->base.chid;
232 int ret;
183 233
184 priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204)); 234 ret = nouveau_fifo_channel_init(&chan->base);
185 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr); 235 if (ret)
236 return ret;
186 237
187 /* PSUBFIFO[n] */ 238 nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
188 for (i = 0; i < priv->spoon_nr; i++) { 239 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
189 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 240 nve0_fifo_playlist_update(priv, chan->engine);
190 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 241 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
191 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */ 242 return 0;
192 } 243}
193 244
194 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); 245static int
246nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
247{
248 struct nve0_fifo_priv *priv = (void *)object->engine;
249 struct nve0_fifo_chan *chan = (void *)object;
250 u32 chid = chan->base.chid;
195 251
196 nv_wr32(dev, 0x002a00, 0xffffffff); 252 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
197 nv_wr32(dev, 0x002100, 0xffffffff); 253 nve0_fifo_playlist_update(priv, chan->engine);
198 nv_wr32(dev, 0x002140, 0xbfffffff); 254 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
199 255
200 /* restore PFIFO context table */ 256 return nouveau_fifo_channel_fini(&chan->base, suspend);
201 for (i = 0; i < priv->base.channels; i++) { 257}
202 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
203 if (!chan || !(fctx = chan->engctx[engine]))
204 continue;
205 258
206 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 | 259static struct nouveau_ofuncs
207 (chan->ramin->addr >> 12)); 260nve0_fifo_ofuncs = {
208 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400); 261 .ctor = nve0_fifo_chan_ctor,
209 nve0_fifo_playlist_update(dev, fctx->engine); 262 .dtor = _nouveau_fifo_channel_dtor,
210 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400); 263 .init = nve0_fifo_chan_init,
211 } 264 .fini = nve0_fifo_chan_fini,
265 .rd32 = _nouveau_fifo_channel_rd32,
266 .wr32 = _nouveau_fifo_channel_wr32,
267};
212 268
213 return 0; 269static struct nouveau_oclass
214} 270nve0_fifo_sclass[] = {
271 { 0xa06f, &nve0_fifo_ofuncs },
272 {}
273};
274
275/*******************************************************************************
276 * FIFO context - instmem heap and vm setup
277 ******************************************************************************/
215 278
216static int 279static int
217nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend) 280nve0_fifo_context_ctor(struct nouveau_object *parent,
281 struct nouveau_object *engine,
282 struct nouveau_oclass *oclass, void *data, u32 size,
283 struct nouveau_object **pobject)
218{ 284{
219 struct nve0_fifo_priv *priv = nv_engine(dev, engine); 285 struct nve0_fifo_base *base;
220 int i; 286 int ret;
221 287
222 for (i = 0; i < priv->base.channels; i++) { 288 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
223 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1)) 289 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
224 continue; 290 *pobject = nv_object(base);
291 if (ret)
292 return ret;
225 293
226 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800); 294 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
227 nv_wr32(dev, 0x002634, i); 295 if (ret)
228 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { 296 return ret;
229 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", 297
230 i, nv_rd32(dev, 0x002634)); 298 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
231 return -EBUSY; 299 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
232 } 300 nv_wo32(base, 0x0208, 0xffffffff);
233 } 301 nv_wo32(base, 0x020c, 0x000000ff);
302
303 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
304 if (ret)
305 return ret;
234 306
235 nv_wr32(dev, 0x002140, 0x00000000);
236 return 0; 307 return 0;
237} 308}
238 309
310static void
311nve0_fifo_context_dtor(struct nouveau_object *object)
312{
313 struct nve0_fifo_base *base = (void *)object;
314 nouveau_vm_ref(NULL, &base->vm, base->pgd);
315 nouveau_gpuobj_ref(NULL, &base->pgd);
316 nouveau_fifo_context_destroy(&base->base);
317}
318
319static struct nouveau_oclass
320nve0_fifo_cclass = {
321 .handle = NV_ENGCTX(FIFO, 0xe0),
322 .ofuncs = &(struct nouveau_ofuncs) {
323 .ctor = nve0_fifo_context_ctor,
324 .dtor = nve0_fifo_context_dtor,
325 .init = _nouveau_fifo_context_init,
326 .fini = _nouveau_fifo_context_fini,
327 .rd32 = _nouveau_fifo_context_rd32,
328 .wr32 = _nouveau_fifo_context_wr32,
329 },
330};
331
332/*******************************************************************************
333 * PFIFO engine
334 ******************************************************************************/
335
239struct nouveau_enum nve0_fifo_fault_unit[] = { 336struct nouveau_enum nve0_fifo_fault_unit[] = {
240 {} 337 {}
241}; 338};
@@ -268,16 +365,16 @@ struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
268}; 365};
269 366
270static void 367static void
271nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit) 368nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
272{ 369{
273 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10)); 370 u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
274 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10)); 371 u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
275 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10)); 372 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
276 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10)); 373 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
277 u32 client = (stat & 0x00001f00) >> 8; 374 u32 client = (stat & 0x00001f00) >> 8;
278 375
279 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [", 376 nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
280 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo); 377 "write" : "read", (u64)vahi << 32 | valo);
281 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f); 378 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
282 printk("] from "); 379 printk("] from ");
283 nouveau_enum_print(nve0_fifo_fault_unit, unit); 380 nouveau_enum_print(nve0_fifo_fault_unit, unit);
@@ -292,160 +389,205 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
292} 389}
293 390
294static int 391static int
295nve0_fifo_page_flip(struct drm_device *dev, u32 chid) 392nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
296{ 393{
297 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); 394 struct nve0_fifo_chan *chan = NULL;
298 struct drm_nouveau_private *dev_priv = dev->dev_private; 395 struct nouveau_handle *bind;
299 struct nouveau_channel *chan = NULL;
300 unsigned long flags; 396 unsigned long flags;
301 int ret = -EINVAL; 397 int ret = -EINVAL;
302 398
303 spin_lock_irqsave(&dev_priv->channels.lock, flags); 399 spin_lock_irqsave(&priv->base.lock, flags);
304 if (likely(chid >= 0 && chid < priv->base.channels)) { 400 if (likely(chid >= priv->base.min && chid <= priv->base.max))
305 chan = dev_priv->channels.ptr[chid]; 401 chan = (void *)priv->base.channel[chid];
306 if (likely(chan)) { 402 if (unlikely(!chan))
307 struct nouveau_software_chan *swch = 403 goto out;
308 chan->engctx[NVOBJ_ENGINE_SW]; 404
309 ret = swch->flip(swch->flip_data); 405 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
310 } 406 if (likely(bind)) {
407 if (!mthd || !nv_call(bind->object, mthd, data))
408 ret = 0;
409 nouveau_namedb_put(bind);
311 } 410 }
312 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 411
412out:
413 spin_unlock_irqrestore(&priv->base.lock, flags);
313 return ret; 414 return ret;
314} 415}
315 416
316static void 417static void
317nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) 418nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
318{ 419{
319 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000)); 420 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
320 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000)); 421 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
321 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000)); 422 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
322 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0xfff; 423 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
323 u32 subc = (addr & 0x00070000); 424 u32 subc = (addr & 0x00070000) >> 16;
324 u32 mthd = (addr & 0x00003ffc); 425 u32 mthd = (addr & 0x00003ffc);
325 u32 show = stat; 426 u32 show = stat;
326 427
327 if (stat & 0x00200000) { 428 if (stat & 0x00200000) {
328 if (mthd == 0x0054) { 429 if (mthd == 0x0054) {
329 if (!nve0_fifo_page_flip(dev, chid)) 430 if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
330 show &= ~0x00200000; 431 show &= ~0x00200000;
331 } 432 }
332 } 433 }
333 434
435 if (stat & 0x00800000) {
436 if (!nve0_fifo_swmthd(priv, chid, mthd, data))
437 show &= ~0x00800000;
438 }
439
334 if (show) { 440 if (show) {
335 NV_INFO(dev, "PFIFO%d:", unit); 441 nv_error(priv, "SUBFIFO%d:", unit);
336 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show); 442 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
337 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n", 443 printk("\n");
338 unit, chid, subc, mthd, data); 444 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
445 "data 0x%08x\n",
446 unit, chid, subc, mthd, data);
339 } 447 }
340 448
341 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); 449 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
342 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); 450 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
343} 451}
344 452
345static void 453static void
346nve0_fifo_isr(struct drm_device *dev) 454nve0_fifo_intr(struct nouveau_subdev *subdev)
347{ 455{
348 u32 mask = nv_rd32(dev, 0x002140); 456 struct nve0_fifo_priv *priv = (void *)subdev;
349 u32 stat = nv_rd32(dev, 0x002100) & mask; 457 u32 mask = nv_rd32(priv, 0x002140);
458 u32 stat = nv_rd32(priv, 0x002100) & mask;
350 459
351 if (stat & 0x00000100) { 460 if (stat & 0x00000100) {
352 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); 461 nv_warn(priv, "unknown status 0x00000100\n");
353 nv_wr32(dev, 0x002100, 0x00000100); 462 nv_wr32(priv, 0x002100, 0x00000100);
354 stat &= ~0x00000100; 463 stat &= ~0x00000100;
355 } 464 }
356 465
357 if (stat & 0x10000000) { 466 if (stat & 0x10000000) {
358 u32 units = nv_rd32(dev, 0x00259c); 467 u32 units = nv_rd32(priv, 0x00259c);
359 u32 u = units; 468 u32 u = units;
360 469
361 while (u) { 470 while (u) {
362 int i = ffs(u) - 1; 471 int i = ffs(u) - 1;
363 nve0_fifo_isr_vm_fault(dev, i); 472 nve0_fifo_isr_vm_fault(priv, i);
364 u &= ~(1 << i); 473 u &= ~(1 << i);
365 } 474 }
366 475
367 nv_wr32(dev, 0x00259c, units); 476 nv_wr32(priv, 0x00259c, units);
368 stat &= ~0x10000000; 477 stat &= ~0x10000000;
369 } 478 }
370 479
371 if (stat & 0x20000000) { 480 if (stat & 0x20000000) {
372 u32 units = nv_rd32(dev, 0x0025a0); 481 u32 units = nv_rd32(priv, 0x0025a0);
373 u32 u = units; 482 u32 u = units;
374 483
375 while (u) { 484 while (u) {
376 int i = ffs(u) - 1; 485 int i = ffs(u) - 1;
377 nve0_fifo_isr_subfifo_intr(dev, i); 486 nve0_fifo_isr_subfifo_intr(priv, i);
378 u &= ~(1 << i); 487 u &= ~(1 << i);
379 } 488 }
380 489
381 nv_wr32(dev, 0x0025a0, units); 490 nv_wr32(priv, 0x0025a0, units);
382 stat &= ~0x20000000; 491 stat &= ~0x20000000;
383 } 492 }
384 493
385 if (stat & 0x40000000) { 494 if (stat & 0x40000000) {
386 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n"); 495 nv_warn(priv, "unknown status 0x40000000\n");
387 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000); 496 nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
388 stat &= ~0x40000000; 497 stat &= ~0x40000000;
389 } 498 }
390 499
391 if (stat) { 500 if (stat) {
392 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat); 501 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
393 nv_wr32(dev, 0x002100, stat); 502 nv_wr32(priv, 0x002100, stat);
394 nv_wr32(dev, 0x002140, 0); 503 nv_wr32(priv, 0x002140, 0);
395 } 504 }
396} 505}
397 506
507static int
508nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
509 struct nouveau_oclass *oclass, void *data, u32 size,
510 struct nouveau_object **pobject)
511{
512 struct nve0_fifo_priv *priv;
513 int ret;
514
515 ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
516 *pobject = nv_object(priv);
517 if (ret)
518 return ret;
519
520 ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
521 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
522 if (ret)
523 return ret;
524
525 ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
526 &priv->user.bar);
527 if (ret)
528 return ret;
529
530 nv_subdev(priv)->unit = 0x00000100;
531 nv_subdev(priv)->intr = nve0_fifo_intr;
532 nv_engine(priv)->cclass = &nve0_fifo_cclass;
533 nv_engine(priv)->sclass = nve0_fifo_sclass;
534 return 0;
535}
536
398static void 537static void
399nve0_fifo_destroy(struct drm_device *dev, int engine) 538nve0_fifo_dtor(struct nouveau_object *object)
400{ 539{
401 struct drm_nouveau_private *dev_priv = dev->dev_private; 540 struct nve0_fifo_priv *priv = (void *)object;
402 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
403 int i; 541 int i;
404 542
405 nouveau_gpuobj_unmap(&priv->user.bar); 543 nouveau_gpuobj_unmap(&priv->user.bar);
406 nouveau_gpuobj_ref(NULL, &priv->user.mem); 544 nouveau_gpuobj_ref(NULL, &priv->user.mem);
407 545
408 for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) { 546 for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
409 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
410 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]); 547 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
548 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
411 } 549 }
412 550
413 dev_priv->eng[engine] = NULL; 551 nouveau_fifo_destroy(&priv->base);
414 kfree(priv);
415} 552}
416 553
417int 554static int
418nve0_fifo_create(struct drm_device *dev) 555nve0_fifo_init(struct nouveau_object *object)
419{ 556{
420 struct drm_nouveau_private *dev_priv = dev->dev_private; 557 struct nve0_fifo_priv *priv = (void *)object;
421 struct nve0_fifo_priv *priv; 558 int ret, i;
422 int ret;
423 559
424 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 560 ret = nouveau_fifo_init(&priv->base);
425 if (!priv) 561 if (ret)
426 return -ENOMEM; 562 return ret;
427 563
428 priv->base.base.destroy = nve0_fifo_destroy; 564 /* enable all available PSUBFIFOs */
429 priv->base.base.init = nve0_fifo_init; 565 nv_wr32(priv, 0x000204, 0xffffffff);
430 priv->base.base.fini = nve0_fifo_fini; 566 priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
431 priv->base.base.context_new = nve0_fifo_context_new; 567 nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
432 priv->base.base.context_del = nve0_fifo_context_del;
433 priv->base.channels = 4096;
434 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
435 568
436 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000, 569 /* PSUBFIFO[n] */
437 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); 570 for (i = 0; i < priv->spoon_nr; i++) {
438 if (ret) 571 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
439 goto error; 572 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
573 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
574 }
440 575
441 ret = nouveau_gpuobj_map_bar(priv->user.mem, NV_MEM_ACCESS_RW, 576 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
442 &priv->user.bar);
443 if (ret)
444 goto error;
445 577
446 nouveau_irq_register(dev, 8, nve0_fifo_isr); 578 nv_wr32(priv, 0x002a00, 0xffffffff);
447error: 579 nv_wr32(priv, 0x002100, 0xffffffff);
448 if (ret) 580 nv_wr32(priv, 0x002140, 0xbfffffff);
449 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); 581 return 0;
450 return ret;
451} 582}
583
584struct nouveau_oclass
585nve0_fifo_oclass = {
586 .handle = NV_ENGINE(FIFO, 0xe0),
587 .ofuncs = &(struct nouveau_ofuncs) {
588 .ctor = nve0_fifo_ctor,
589 .dtor = nve0_fifo_dtor,
590 .init = nve0_fifo_init,
591 .fini = _nouveau_fifo_fini,
592 },
593};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
index b0795ececbda..e1947013d3bc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
@@ -2,7 +2,7 @@
2#define __NOUVEAU_GRCTX_H__ 2#define __NOUVEAU_GRCTX_H__
3 3
4struct nouveau_grctx { 4struct nouveau_grctx {
5 struct drm_device *dev; 5 struct nouveau_device *device;
6 6
7 enum { 7 enum {
8 NOUVEAU_GRCTX_PROG, 8 NOUVEAU_GRCTX_PROG,
@@ -10,18 +10,18 @@ struct nouveau_grctx {
10 } mode; 10 } mode;
11 void *data; 11 void *data;
12 12
13 uint32_t ctxprog_max; 13 u32 ctxprog_max;
14 uint32_t ctxprog_len; 14 u32 ctxprog_len;
15 uint32_t ctxprog_reg; 15 u32 ctxprog_reg;
16 int ctxprog_label[32]; 16 int ctxprog_label[32];
17 uint32_t ctxvals_pos; 17 u32 ctxvals_pos;
18 uint32_t ctxvals_base; 18 u32 ctxvals_base;
19}; 19};
20 20
21static inline void 21static inline void
22cp_out(struct nouveau_grctx *ctx, uint32_t inst) 22cp_out(struct nouveau_grctx *ctx, u32 inst)
23{ 23{
24 uint32_t *ctxprog = ctx->data; 24 u32 *ctxprog = ctx->data;
25 25
26 if (ctx->mode != NOUVEAU_GRCTX_PROG) 26 if (ctx->mode != NOUVEAU_GRCTX_PROG)
27 return; 27 return;
@@ -31,13 +31,13 @@ cp_out(struct nouveau_grctx *ctx, uint32_t inst)
31} 31}
32 32
33static inline void 33static inline void
34cp_lsr(struct nouveau_grctx *ctx, uint32_t val) 34cp_lsr(struct nouveau_grctx *ctx, u32 val)
35{ 35{
36 cp_out(ctx, CP_LOAD_SR | val); 36 cp_out(ctx, CP_LOAD_SR | val);
37} 37}
38 38
39static inline void 39static inline void
40cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length) 40cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
41{ 41{
42 ctx->ctxprog_reg = (reg - 0x00400000) >> 2; 42 ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
43 43
@@ -55,7 +55,7 @@ cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
55static inline void 55static inline void
56cp_name(struct nouveau_grctx *ctx, int name) 56cp_name(struct nouveau_grctx *ctx, int name)
57{ 57{
58 uint32_t *ctxprog = ctx->data; 58 u32 *ctxprog = ctx->data;
59 int i; 59 int i;
60 60
61 if (ctx->mode != NOUVEAU_GRCTX_PROG) 61 if (ctx->mode != NOUVEAU_GRCTX_PROG)
@@ -115,7 +115,7 @@ cp_pos(struct nouveau_grctx *ctx, int offset)
115} 115}
116 116
117static inline void 117static inline void
118gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val) 118gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
119{ 119{
120 if (ctx->mode != NOUVEAU_GRCTX_VALS) 120 if (ctx->mode != NOUVEAU_GRCTX_VALS)
121 return; 121 return;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index b17506d7eb60..e45035efb8ca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -22,6 +22,8 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/gpuobj.h>
26
25/* NVIDIA context programs handle a number of other conditions which are 27/* NVIDIA context programs handle a number of other conditions which are
26 * not implemented in our versions. It's not clear why NVIDIA context 28 * not implemented in our versions. It's not clear why NVIDIA context
27 * programs have this code, nor whether it's strictly necessary for 29 * programs have this code, nor whether it's strictly necessary for
@@ -109,8 +111,7 @@
109#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */ 111#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
110#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */ 112#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
111 113
112#include "drmP.h" 114#include "nv40.h"
113#include "nouveau_drv.h"
114#include "ctx.h" 115#include "ctx.h"
115 116
116/* TODO: 117/* TODO:
@@ -118,11 +119,10 @@
118 */ 119 */
119 120
120static int 121static int
121nv40_graph_vs_count(struct drm_device *dev) 122nv40_graph_vs_count(struct nouveau_device *device)
122{ 123{
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 124
125 switch (dev_priv->chipset) { 125 switch (device->chipset) {
126 case 0x47: 126 case 0x47:
127 case 0x49: 127 case 0x49:
128 case 0x4b: 128 case 0x4b:
@@ -160,7 +160,7 @@ enum cp_label {
160static void 160static void
161nv40_graph_construct_general(struct nouveau_grctx *ctx) 161nv40_graph_construct_general(struct nouveau_grctx *ctx)
162{ 162{
163 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 163 struct nouveau_device *device = ctx->device;
164 int i; 164 int i;
165 165
166 cp_ctx(ctx, 0x4000a4, 1); 166 cp_ctx(ctx, 0x4000a4, 1);
@@ -187,7 +187,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
187 cp_ctx(ctx, 0x400724, 1); 187 cp_ctx(ctx, 0x400724, 1);
188 gr_def(ctx, 0x400724, 0x02008821); 188 gr_def(ctx, 0x400724, 0x02008821);
189 cp_ctx(ctx, 0x400770, 3); 189 cp_ctx(ctx, 0x400770, 3);
190 if (dev_priv->chipset == 0x40) { 190 if (device->chipset == 0x40) {
191 cp_ctx(ctx, 0x400814, 4); 191 cp_ctx(ctx, 0x400814, 4);
192 cp_ctx(ctx, 0x400828, 5); 192 cp_ctx(ctx, 0x400828, 5);
193 cp_ctx(ctx, 0x400840, 5); 193 cp_ctx(ctx, 0x400840, 5);
@@ -208,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
208 gr_def(ctx, 0x4009dc, 0x80000000); 208 gr_def(ctx, 0x4009dc, 0x80000000);
209 } else { 209 } else {
210 cp_ctx(ctx, 0x400840, 20); 210 cp_ctx(ctx, 0x400840, 20);
211 if (nv44_graph_class(ctx->dev)) { 211 if (nv44_graph_class(ctx->device)) {
212 for (i = 0; i < 8; i++) 212 for (i = 0; i < 8; i++)
213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001); 213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
214 } 214 }
@@ -217,21 +217,21 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
217 gr_def(ctx, 0x400888, 0x00000040); 217 gr_def(ctx, 0x400888, 0x00000040);
218 cp_ctx(ctx, 0x400894, 11); 218 cp_ctx(ctx, 0x400894, 11);
219 gr_def(ctx, 0x400894, 0x00000040); 219 gr_def(ctx, 0x400894, 0x00000040);
220 if (!nv44_graph_class(ctx->dev)) { 220 if (!nv44_graph_class(ctx->device)) {
221 for (i = 0; i < 8; i++) 221 for (i = 0; i < 8; i++)
222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); 222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
223 } 223 }
224 cp_ctx(ctx, 0x4008e0, 2); 224 cp_ctx(ctx, 0x4008e0, 2);
225 cp_ctx(ctx, 0x4008f8, 2); 225 cp_ctx(ctx, 0x4008f8, 2);
226 if (dev_priv->chipset == 0x4c || 226 if (device->chipset == 0x4c ||
227 (dev_priv->chipset & 0xf0) == 0x60) 227 (device->chipset & 0xf0) == 0x60)
228 cp_ctx(ctx, 0x4009f8, 1); 228 cp_ctx(ctx, 0x4009f8, 1);
229 } 229 }
230 cp_ctx(ctx, 0x400a00, 73); 230 cp_ctx(ctx, 0x400a00, 73);
231 gr_def(ctx, 0x400b0c, 0x0b0b0b0c); 231 gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
232 cp_ctx(ctx, 0x401000, 4); 232 cp_ctx(ctx, 0x401000, 4);
233 cp_ctx(ctx, 0x405004, 1); 233 cp_ctx(ctx, 0x405004, 1);
234 switch (dev_priv->chipset) { 234 switch (device->chipset) {
235 case 0x47: 235 case 0x47:
236 case 0x49: 236 case 0x49:
237 case 0x4b: 237 case 0x4b:
@@ -240,7 +240,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
240 break; 240 break;
241 default: 241 default:
242 cp_ctx(ctx, 0x403440, 1); 242 cp_ctx(ctx, 0x403440, 1);
243 switch (dev_priv->chipset) { 243 switch (device->chipset) {
244 case 0x40: 244 case 0x40:
245 gr_def(ctx, 0x403440, 0x00000010); 245 gr_def(ctx, 0x403440, 0x00000010);
246 break; 246 break;
@@ -266,19 +266,19 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
266static void 266static void
267nv40_graph_construct_state3d(struct nouveau_grctx *ctx) 267nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
268{ 268{
269 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 269 struct nouveau_device *device = ctx->device;
270 int i; 270 int i;
271 271
272 if (dev_priv->chipset == 0x40) { 272 if (device->chipset == 0x40) {
273 cp_ctx(ctx, 0x401880, 51); 273 cp_ctx(ctx, 0x401880, 51);
274 gr_def(ctx, 0x401940, 0x00000100); 274 gr_def(ctx, 0x401940, 0x00000100);
275 } else 275 } else
276 if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 || 276 if (device->chipset == 0x46 || device->chipset == 0x47 ||
277 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) { 277 device->chipset == 0x49 || device->chipset == 0x4b) {
278 cp_ctx(ctx, 0x401880, 32); 278 cp_ctx(ctx, 0x401880, 32);
279 for (i = 0; i < 16; i++) 279 for (i = 0; i < 16; i++)
280 gr_def(ctx, 0x401880 + (i * 4), 0x00000111); 280 gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
281 if (dev_priv->chipset == 0x46) 281 if (device->chipset == 0x46)
282 cp_ctx(ctx, 0x401900, 16); 282 cp_ctx(ctx, 0x401900, 16);
283 cp_ctx(ctx, 0x401940, 3); 283 cp_ctx(ctx, 0x401940, 3);
284 } 284 }
@@ -289,7 +289,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
289 gr_def(ctx, 0x401978, 0xffff0000); 289 gr_def(ctx, 0x401978, 0xffff0000);
290 gr_def(ctx, 0x40197c, 0x00000001); 290 gr_def(ctx, 0x40197c, 0x00000001);
291 gr_def(ctx, 0x401990, 0x46400000); 291 gr_def(ctx, 0x401990, 0x46400000);
292 if (dev_priv->chipset == 0x40) { 292 if (device->chipset == 0x40) {
293 cp_ctx(ctx, 0x4019a0, 2); 293 cp_ctx(ctx, 0x4019a0, 2);
294 cp_ctx(ctx, 0x4019ac, 5); 294 cp_ctx(ctx, 0x4019ac, 5);
295 } else { 295 } else {
@@ -297,7 +297,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
297 cp_ctx(ctx, 0x4019b4, 3); 297 cp_ctx(ctx, 0x4019b4, 3);
298 } 298 }
299 gr_def(ctx, 0x4019bc, 0xffff0000); 299 gr_def(ctx, 0x4019bc, 0xffff0000);
300 switch (dev_priv->chipset) { 300 switch (device->chipset) {
301 case 0x46: 301 case 0x46:
302 case 0x47: 302 case 0x47:
303 case 0x49: 303 case 0x49:
@@ -316,7 +316,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
316 for (i = 0; i < 16; i++) 316 for (i = 0; i < 16; i++)
317 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000); 317 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
318 gr_def(ctx, 0x401a8c, 0x4b7fffff); 318 gr_def(ctx, 0x401a8c, 0x4b7fffff);
319 if (dev_priv->chipset == 0x40) { 319 if (device->chipset == 0x40) {
320 cp_ctx(ctx, 0x401ab8, 3); 320 cp_ctx(ctx, 0x401ab8, 3);
321 } else { 321 } else {
322 cp_ctx(ctx, 0x401ab8, 1); 322 cp_ctx(ctx, 0x401ab8, 1);
@@ -327,10 +327,10 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
327 gr_def(ctx, 0x401ad4, 0x70605040); 327 gr_def(ctx, 0x401ad4, 0x70605040);
328 gr_def(ctx, 0x401ad8, 0xb8a89888); 328 gr_def(ctx, 0x401ad8, 0xb8a89888);
329 gr_def(ctx, 0x401adc, 0xf8e8d8c8); 329 gr_def(ctx, 0x401adc, 0xf8e8d8c8);
330 cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1); 330 cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1);
331 gr_def(ctx, 0x401b10, 0x40100000); 331 gr_def(ctx, 0x401b10, 0x40100000);
332 cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5); 332 cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5);
333 gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ? 333 gr_def(ctx, 0x401b28, device->chipset == 0x40 ?
334 0x00000004 : 0x00000000); 334 0x00000004 : 0x00000000);
335 cp_ctx(ctx, 0x401b30, 25); 335 cp_ctx(ctx, 0x401b30, 25);
336 gr_def(ctx, 0x401b34, 0x0000ffff); 336 gr_def(ctx, 0x401b34, 0x0000ffff);
@@ -341,8 +341,8 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
341 gr_def(ctx, 0x401b84, 0xffffffff); 341 gr_def(ctx, 0x401b84, 0xffffffff);
342 gr_def(ctx, 0x401b88, 0x00ff7000); 342 gr_def(ctx, 0x401b88, 0x00ff7000);
343 gr_def(ctx, 0x401b8c, 0x0000ffff); 343 gr_def(ctx, 0x401b8c, 0x0000ffff);
344 if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a && 344 if (device->chipset != 0x44 && device->chipset != 0x4a &&
345 dev_priv->chipset != 0x4e) 345 device->chipset != 0x4e)
346 cp_ctx(ctx, 0x401b94, 1); 346 cp_ctx(ctx, 0x401b94, 1);
347 cp_ctx(ctx, 0x401b98, 8); 347 cp_ctx(ctx, 0x401b98, 8);
348 gr_def(ctx, 0x401b9c, 0x00ff0000); 348 gr_def(ctx, 0x401b9c, 0x00ff0000);
@@ -371,12 +371,12 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
371static void 371static void
372nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) 372nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
373{ 373{
374 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 374 struct nouveau_device *device = ctx->device;
375 int i; 375 int i;
376 376
377 cp_ctx(ctx, 0x402000, 1); 377 cp_ctx(ctx, 0x402000, 1);
378 cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2); 378 cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2);
379 switch (dev_priv->chipset) { 379 switch (device->chipset) {
380 case 0x40: 380 case 0x40:
381 gr_def(ctx, 0x402404, 0x00000001); 381 gr_def(ctx, 0x402404, 0x00000001);
382 break; 382 break;
@@ -393,9 +393,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
393 default: 393 default:
394 gr_def(ctx, 0x402404, 0x00000021); 394 gr_def(ctx, 0x402404, 0x00000021);
395 } 395 }
396 if (dev_priv->chipset != 0x40) 396 if (device->chipset != 0x40)
397 gr_def(ctx, 0x402408, 0x030c30c3); 397 gr_def(ctx, 0x402408, 0x030c30c3);
398 switch (dev_priv->chipset) { 398 switch (device->chipset) {
399 case 0x44: 399 case 0x44:
400 case 0x46: 400 case 0x46:
401 case 0x4a: 401 case 0x4a:
@@ -408,10 +408,10 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
408 default: 408 default:
409 break; 409 break;
410 } 410 }
411 cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9); 411 cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9);
412 gr_def(ctx, 0x402488, 0x3e020200); 412 gr_def(ctx, 0x402488, 0x3e020200);
413 gr_def(ctx, 0x40248c, 0x00ffffff); 413 gr_def(ctx, 0x40248c, 0x00ffffff);
414 switch (dev_priv->chipset) { 414 switch (device->chipset) {
415 case 0x40: 415 case 0x40:
416 gr_def(ctx, 0x402490, 0x60103f00); 416 gr_def(ctx, 0x402490, 0x60103f00);
417 break; 417 break;
@@ -428,16 +428,16 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
428 gr_def(ctx, 0x402490, 0x0c103f00); 428 gr_def(ctx, 0x402490, 0x0c103f00);
429 break; 429 break;
430 } 430 }
431 gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ? 431 gr_def(ctx, 0x40249c, device->chipset <= 0x43 ?
432 0x00020000 : 0x00040000); 432 0x00020000 : 0x00040000);
433 cp_ctx(ctx, 0x402500, 31); 433 cp_ctx(ctx, 0x402500, 31);
434 gr_def(ctx, 0x402530, 0x00008100); 434 gr_def(ctx, 0x402530, 0x00008100);
435 if (dev_priv->chipset == 0x40) 435 if (device->chipset == 0x40)
436 cp_ctx(ctx, 0x40257c, 6); 436 cp_ctx(ctx, 0x40257c, 6);
437 cp_ctx(ctx, 0x402594, 16); 437 cp_ctx(ctx, 0x402594, 16);
438 cp_ctx(ctx, 0x402800, 17); 438 cp_ctx(ctx, 0x402800, 17);
439 gr_def(ctx, 0x402800, 0x00000001); 439 gr_def(ctx, 0x402800, 0x00000001);
440 switch (dev_priv->chipset) { 440 switch (device->chipset) {
441 case 0x47: 441 case 0x47:
442 case 0x49: 442 case 0x49:
443 case 0x4b: 443 case 0x4b:
@@ -445,7 +445,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
445 gr_def(ctx, 0x402864, 0x00001001); 445 gr_def(ctx, 0x402864, 0x00001001);
446 cp_ctx(ctx, 0x402870, 3); 446 cp_ctx(ctx, 0x402870, 3);
447 gr_def(ctx, 0x402878, 0x00000003); 447 gr_def(ctx, 0x402878, 0x00000003);
448 if (dev_priv->chipset != 0x47) { /* belong at end!! */ 448 if (device->chipset != 0x47) { /* belong at end!! */
449 cp_ctx(ctx, 0x402900, 1); 449 cp_ctx(ctx, 0x402900, 1);
450 cp_ctx(ctx, 0x402940, 1); 450 cp_ctx(ctx, 0x402940, 1);
451 cp_ctx(ctx, 0x402980, 1); 451 cp_ctx(ctx, 0x402980, 1);
@@ -470,9 +470,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
470 } 470 }
471 471
472 cp_ctx(ctx, 0x402c00, 4); 472 cp_ctx(ctx, 0x402c00, 4);
473 gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ? 473 gr_def(ctx, 0x402c00, device->chipset == 0x40 ?
474 0x80800001 : 0x00888001); 474 0x80800001 : 0x00888001);
475 switch (dev_priv->chipset) { 475 switch (device->chipset) {
476 case 0x47: 476 case 0x47:
477 case 0x49: 477 case 0x49:
478 case 0x4b: 478 case 0x4b:
@@ -485,30 +485,30 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
485 break; 485 break;
486 default: 486 default:
487 cp_ctx(ctx, 0x402c10, 4); 487 cp_ctx(ctx, 0x402c10, 4);
488 if (dev_priv->chipset == 0x40) 488 if (device->chipset == 0x40)
489 cp_ctx(ctx, 0x402c20, 36); 489 cp_ctx(ctx, 0x402c20, 36);
490 else 490 else
491 if (dev_priv->chipset <= 0x42) 491 if (device->chipset <= 0x42)
492 cp_ctx(ctx, 0x402c20, 24); 492 cp_ctx(ctx, 0x402c20, 24);
493 else 493 else
494 if (dev_priv->chipset <= 0x4a) 494 if (device->chipset <= 0x4a)
495 cp_ctx(ctx, 0x402c20, 16); 495 cp_ctx(ctx, 0x402c20, 16);
496 else 496 else
497 cp_ctx(ctx, 0x402c20, 8); 497 cp_ctx(ctx, 0x402c20, 8);
498 cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13); 498 cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13);
499 gr_def(ctx, 0x402cd4, 0x00000005); 499 gr_def(ctx, 0x402cd4, 0x00000005);
500 if (dev_priv->chipset != 0x40) 500 if (device->chipset != 0x40)
501 gr_def(ctx, 0x402ce0, 0x0000ffff); 501 gr_def(ctx, 0x402ce0, 0x0000ffff);
502 break; 502 break;
503 } 503 }
504 504
505 cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3); 505 cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
506 cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3); 506 cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
507 cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev)); 507 cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
508 for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++) 508 for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
509 gr_def(ctx, 0x403420 + (i * 4), 0x00005555); 509 gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
510 510
511 if (dev_priv->chipset != 0x40) { 511 if (device->chipset != 0x40) {
512 cp_ctx(ctx, 0x403600, 1); 512 cp_ctx(ctx, 0x403600, 1);
513 gr_def(ctx, 0x403600, 0x00000001); 513 gr_def(ctx, 0x403600, 0x00000001);
514 } 514 }
@@ -516,7 +516,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
516 516
517 cp_ctx(ctx, 0x403c18, 1); 517 cp_ctx(ctx, 0x403c18, 1);
518 gr_def(ctx, 0x403c18, 0x00000001); 518 gr_def(ctx, 0x403c18, 0x00000001);
519 switch (dev_priv->chipset) { 519 switch (device->chipset) {
520 case 0x46: 520 case 0x46:
521 case 0x47: 521 case 0x47:
522 case 0x49: 522 case 0x49:
@@ -527,7 +527,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
527 gr_def(ctx, 0x405c24, 0x000e3000); 527 gr_def(ctx, 0x405c24, 0x000e3000);
528 break; 528 break;
529 } 529 }
530 if (dev_priv->chipset != 0x4e) 530 if (device->chipset != 0x4e)
531 cp_ctx(ctx, 0x405800, 11); 531 cp_ctx(ctx, 0x405800, 11);
532 cp_ctx(ctx, 0x407000, 1); 532 cp_ctx(ctx, 0x407000, 1);
533} 533}
@@ -535,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
535static void 535static void
536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) 536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
537{ 537{
538 int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684; 538 int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
539 539
540 cp_out (ctx, 0x300000); 540 cp_out (ctx, 0x300000);
541 cp_lsr (ctx, len - 4); 541 cp_lsr (ctx, len - 4);
@@ -550,32 +550,31 @@ nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
550static void 550static void
551nv40_graph_construct_shader(struct nouveau_grctx *ctx) 551nv40_graph_construct_shader(struct nouveau_grctx *ctx)
552{ 552{
553 struct drm_device *dev = ctx->dev; 553 struct nouveau_device *device = ctx->device;
554 struct drm_nouveau_private *dev_priv = dev->dev_private;
555 struct nouveau_gpuobj *obj = ctx->data; 554 struct nouveau_gpuobj *obj = ctx->data;
556 int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset; 555 int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
557 int offset, i; 556 int offset, i;
558 557
559 vs_nr = nv40_graph_vs_count(ctx->dev); 558 vs_nr = nv40_graph_vs_count(ctx->device);
560 vs_nr_b0 = 363; 559 vs_nr_b0 = 363;
561 vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64; 560 vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
562 if (dev_priv->chipset == 0x40) { 561 if (device->chipset == 0x40) {
563 b0_offset = 0x2200/4; /* 33a0 */ 562 b0_offset = 0x2200/4; /* 33a0 */
564 b1_offset = 0x55a0/4; /* 1500 */ 563 b1_offset = 0x55a0/4; /* 1500 */
565 vs_len = 0x6aa0/4; 564 vs_len = 0x6aa0/4;
566 } else 565 } else
567 if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) { 566 if (device->chipset == 0x41 || device->chipset == 0x42) {
568 b0_offset = 0x2200/4; /* 2200 */ 567 b0_offset = 0x2200/4; /* 2200 */
569 b1_offset = 0x4400/4; /* 0b00 */ 568 b1_offset = 0x4400/4; /* 0b00 */
570 vs_len = 0x4f00/4; 569 vs_len = 0x4f00/4;
571 } else { 570 } else {
572 b0_offset = 0x1d40/4; /* 2200 */ 571 b0_offset = 0x1d40/4; /* 2200 */
573 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ 572 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
574 vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4; 573 vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
575 } 574 }
576 575
577 cp_lsr(ctx, vs_len * vs_nr + 0x300/4); 576 cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
578 cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041); 577 cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
579 578
580 offset = ctx->ctxvals_pos; 579 offset = ctx->ctxvals_pos;
581 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); 580 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
@@ -661,21 +660,21 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
661} 660}
662 661
663void 662void
664nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem) 663nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
665{ 664{
666 nv40_grctx_generate(&(struct nouveau_grctx) { 665 nv40_grctx_generate(&(struct nouveau_grctx) {
667 .dev = dev, 666 .device = device,
668 .mode = NOUVEAU_GRCTX_VALS, 667 .mode = NOUVEAU_GRCTX_VALS,
669 .data = mem, 668 .data = mem,
670 }); 669 });
671} 670}
672 671
673void 672void
674nv40_grctx_init(struct drm_device *dev, u32 *size) 673nv40_grctx_init(struct nouveau_device *device, u32 *size)
675{ 674{
676 u32 ctxprog[256], i; 675 u32 ctxprog[256], i;
677 struct nouveau_grctx ctx = { 676 struct nouveau_grctx ctx = {
678 .dev = dev, 677 .device = device,
679 .mode = NOUVEAU_GRCTX_PROG, 678 .mode = NOUVEAU_GRCTX_PROG,
680 .data = ctxprog, 679 .data = ctxprog,
681 .ctxprog_max = ARRAY_SIZE(ctxprog) 680 .ctxprog_max = ARRAY_SIZE(ctxprog)
@@ -683,8 +682,8 @@ nv40_grctx_init(struct drm_device *dev, u32 *size)
683 682
684 nv40_grctx_generate(&ctx); 683 nv40_grctx_generate(&ctx);
685 684
686 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 685 nv_wr32(device, 0x400324, 0);
687 for (i = 0; i < ctx.ctxprog_len; i++) 686 for (i = 0; i < ctx.ctxprog_len; i++)
688 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]); 687 nv_wr32(device, 0x400328, ctxprog[i]);
689 *size = ctx.ctxvals_pos * 4; 688 *size = ctx.ctxvals_pos * 4;
690} 689}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
index e17c17bfd89e..552fdbd45ebe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -20,6 +20,8 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#include <core/gpuobj.h>
24
23#define CP_FLAG_CLEAR 0 25#define CP_FLAG_CLEAR 0
24#define CP_FLAG_SET 1 26#define CP_FLAG_SET 1
25#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) 27#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
@@ -105,8 +107,7 @@
105#define CP_SEEK_1 0x00c000ff 107#define CP_SEEK_1 0x00c000ff
106#define CP_SEEK_2 0x00c800ff 108#define CP_SEEK_2 0x00c800ff
107 109
108#include "drmP.h" 110#include "nv50.h"
109#include "nouveau_drv.h"
110#include "ctx.h" 111#include "ctx.h"
111 112
112#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) 113#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
@@ -175,32 +176,6 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
175static int 176static int
176nv50_grctx_generate(struct nouveau_grctx *ctx) 177nv50_grctx_generate(struct nouveau_grctx *ctx)
177{ 178{
178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
179
180 switch (dev_priv->chipset) {
181 case 0x50:
182 case 0x84:
183 case 0x86:
184 case 0x92:
185 case 0x94:
186 case 0x96:
187 case 0x98:
188 case 0xa0:
189 case 0xa3:
190 case 0xa5:
191 case 0xa8:
192 case 0xaa:
193 case 0xac:
194 case 0xaf:
195 break;
196 default:
197 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
198 "your NV%x card.\n", dev_priv->chipset);
199 NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
200 "the devs.\n");
201 return -ENOSYS;
202 }
203
204 cp_set (ctx, STATE, RUNNING); 179 cp_set (ctx, STATE, RUNNING);
205 cp_set (ctx, XFER_SWITCH, ENABLE); 180 cp_set (ctx, XFER_SWITCH, ENABLE);
206 /* decide whether we're loading/unloading the context */ 181 /* decide whether we're loading/unloading the context */
@@ -278,30 +253,36 @@ nv50_grctx_generate(struct nouveau_grctx *ctx)
278} 253}
279 254
280void 255void
281nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem) 256nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
282{ 257{
283 nv50_grctx_generate(&(struct nouveau_grctx) { 258 nv50_grctx_generate(&(struct nouveau_grctx) {
284 .dev = dev, 259 .device = device,
285 .mode = NOUVEAU_GRCTX_VALS, 260 .mode = NOUVEAU_GRCTX_VALS,
286 .data = mem, 261 .data = mem,
287 }); 262 });
288} 263}
289 264
290int 265int
291nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt) 266nv50_grctx_init(struct nouveau_device *device, u32 *size)
292{ 267{
268 u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i;
293 struct nouveau_grctx ctx = { 269 struct nouveau_grctx ctx = {
294 .dev = dev, 270 .device = device,
295 .mode = NOUVEAU_GRCTX_PROG, 271 .mode = NOUVEAU_GRCTX_PROG,
296 .data = data, 272 .data = ctxprog,
297 .ctxprog_max = max 273 .ctxprog_max = 512,
298 }; 274 };
299 int ret;
300 275
301 ret = nv50_grctx_generate(&ctx); 276 if (!ctxprog)
302 *cnt = ctx.ctxvals_pos * 4; 277 return -ENOMEM;
303 *len = ctx.ctxprog_len; 278 nv50_grctx_generate(&ctx);
304 return ret; 279
280 nv_wr32(device, 0x400324, 0);
281 for (i = 0; i < ctx.ctxprog_len; i++)
282 nv_wr32(device, 0x400328, ctxprog[i]);
283 *size = ctx.ctxvals_pos * 4;
284 kfree(ctxprog);
285 return 0;
305} 286}
306 287
307/* 288/*
@@ -315,36 +296,36 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
315static void 296static void
316nv50_graph_construct_mmio(struct nouveau_grctx *ctx) 297nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
317{ 298{
318 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 299 struct nouveau_device *device = ctx->device;
319 int i, j; 300 int i, j;
320 int offset, base; 301 int offset, base;
321 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 302 u32 units = nv_rd32 (ctx->device, 0x1540);
322 303
323 /* 0800: DISPATCH */ 304 /* 0800: DISPATCH */
324 cp_ctx(ctx, 0x400808, 7); 305 cp_ctx(ctx, 0x400808, 7);
325 gr_def(ctx, 0x400814, 0x00000030); 306 gr_def(ctx, 0x400814, 0x00000030);
326 cp_ctx(ctx, 0x400834, 0x32); 307 cp_ctx(ctx, 0x400834, 0x32);
327 if (dev_priv->chipset == 0x50) { 308 if (device->chipset == 0x50) {
328 gr_def(ctx, 0x400834, 0xff400040); 309 gr_def(ctx, 0x400834, 0xff400040);
329 gr_def(ctx, 0x400838, 0xfff00080); 310 gr_def(ctx, 0x400838, 0xfff00080);
330 gr_def(ctx, 0x40083c, 0xfff70090); 311 gr_def(ctx, 0x40083c, 0xfff70090);
331 gr_def(ctx, 0x400840, 0xffe806a8); 312 gr_def(ctx, 0x400840, 0xffe806a8);
332 } 313 }
333 gr_def(ctx, 0x400844, 0x00000002); 314 gr_def(ctx, 0x400844, 0x00000002);
334 if (IS_NVA3F(dev_priv->chipset)) 315 if (IS_NVA3F(device->chipset))
335 gr_def(ctx, 0x400894, 0x00001000); 316 gr_def(ctx, 0x400894, 0x00001000);
336 gr_def(ctx, 0x4008e8, 0x00000003); 317 gr_def(ctx, 0x4008e8, 0x00000003);
337 gr_def(ctx, 0x4008ec, 0x00001000); 318 gr_def(ctx, 0x4008ec, 0x00001000);
338 if (dev_priv->chipset == 0x50) 319 if (device->chipset == 0x50)
339 cp_ctx(ctx, 0x400908, 0xb); 320 cp_ctx(ctx, 0x400908, 0xb);
340 else if (dev_priv->chipset < 0xa0) 321 else if (device->chipset < 0xa0)
341 cp_ctx(ctx, 0x400908, 0xc); 322 cp_ctx(ctx, 0x400908, 0xc);
342 else 323 else
343 cp_ctx(ctx, 0x400908, 0xe); 324 cp_ctx(ctx, 0x400908, 0xe);
344 325
345 if (dev_priv->chipset >= 0xa0) 326 if (device->chipset >= 0xa0)
346 cp_ctx(ctx, 0x400b00, 0x1); 327 cp_ctx(ctx, 0x400b00, 0x1);
347 if (IS_NVA3F(dev_priv->chipset)) { 328 if (IS_NVA3F(device->chipset)) {
348 cp_ctx(ctx, 0x400b10, 0x1); 329 cp_ctx(ctx, 0x400b10, 0x1);
349 gr_def(ctx, 0x400b10, 0x0001629d); 330 gr_def(ctx, 0x400b10, 0x0001629d);
350 cp_ctx(ctx, 0x400b20, 0x1); 331 cp_ctx(ctx, 0x400b20, 0x1);
@@ -358,10 +339,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
358 gr_def(ctx, 0x400c08, 0x0000fe0c); 339 gr_def(ctx, 0x400c08, 0x0000fe0c);
359 340
360 /* 1000 */ 341 /* 1000 */
361 if (dev_priv->chipset < 0xa0) { 342 if (device->chipset < 0xa0) {
362 cp_ctx(ctx, 0x401008, 0x4); 343 cp_ctx(ctx, 0x401008, 0x4);
363 gr_def(ctx, 0x401014, 0x00001000); 344 gr_def(ctx, 0x401014, 0x00001000);
364 } else if (!IS_NVA3F(dev_priv->chipset)) { 345 } else if (!IS_NVA3F(device->chipset)) {
365 cp_ctx(ctx, 0x401008, 0x5); 346 cp_ctx(ctx, 0x401008, 0x5);
366 gr_def(ctx, 0x401018, 0x00001000); 347 gr_def(ctx, 0x401018, 0x00001000);
367 } else { 348 } else {
@@ -372,7 +353,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
372 /* 1400 */ 353 /* 1400 */
373 cp_ctx(ctx, 0x401400, 0x8); 354 cp_ctx(ctx, 0x401400, 0x8);
374 cp_ctx(ctx, 0x401424, 0x3); 355 cp_ctx(ctx, 0x401424, 0x3);
375 if (dev_priv->chipset == 0x50) 356 if (device->chipset == 0x50)
376 gr_def(ctx, 0x40142c, 0x0001fd87); 357 gr_def(ctx, 0x40142c, 0x0001fd87);
377 else 358 else
378 gr_def(ctx, 0x40142c, 0x00000187); 359 gr_def(ctx, 0x40142c, 0x00000187);
@@ -382,10 +363,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
382 /* 1800: STREAMOUT */ 363 /* 1800: STREAMOUT */
383 cp_ctx(ctx, 0x401814, 0x1); 364 cp_ctx(ctx, 0x401814, 0x1);
384 gr_def(ctx, 0x401814, 0x000000ff); 365 gr_def(ctx, 0x401814, 0x000000ff);
385 if (dev_priv->chipset == 0x50) { 366 if (device->chipset == 0x50) {
386 cp_ctx(ctx, 0x40181c, 0xe); 367 cp_ctx(ctx, 0x40181c, 0xe);
387 gr_def(ctx, 0x401850, 0x00000004); 368 gr_def(ctx, 0x401850, 0x00000004);
388 } else if (dev_priv->chipset < 0xa0) { 369 } else if (device->chipset < 0xa0) {
389 cp_ctx(ctx, 0x40181c, 0xf); 370 cp_ctx(ctx, 0x40181c, 0xf);
390 gr_def(ctx, 0x401854, 0x00000004); 371 gr_def(ctx, 0x401854, 0x00000004);
391 } else { 372 } else {
@@ -395,7 +376,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
395 376
396 /* 1C00 */ 377 /* 1C00 */
397 cp_ctx(ctx, 0x401c00, 0x1); 378 cp_ctx(ctx, 0x401c00, 0x1);
398 switch (dev_priv->chipset) { 379 switch (device->chipset) {
399 case 0x50: 380 case 0x50:
400 gr_def(ctx, 0x401c00, 0x0001005f); 381 gr_def(ctx, 0x401c00, 0x0001005f);
401 break; 382 break;
@@ -424,7 +405,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
424 405
425 /* 2400 */ 406 /* 2400 */
426 cp_ctx(ctx, 0x402400, 0x1); 407 cp_ctx(ctx, 0x402400, 0x1);
427 if (dev_priv->chipset == 0x50) 408 if (device->chipset == 0x50)
428 cp_ctx(ctx, 0x402408, 0x1); 409 cp_ctx(ctx, 0x402408, 0x1);
429 else 410 else
430 cp_ctx(ctx, 0x402408, 0x2); 411 cp_ctx(ctx, 0x402408, 0x2);
@@ -432,21 +413,21 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
432 413
433 /* 2800: CSCHED */ 414 /* 2800: CSCHED */
434 cp_ctx(ctx, 0x402800, 0x1); 415 cp_ctx(ctx, 0x402800, 0x1);
435 if (dev_priv->chipset == 0x50) 416 if (device->chipset == 0x50)
436 gr_def(ctx, 0x402800, 0x00000006); 417 gr_def(ctx, 0x402800, 0x00000006);
437 418
438 /* 2C00: ZCULL */ 419 /* 2C00: ZCULL */
439 cp_ctx(ctx, 0x402c08, 0x6); 420 cp_ctx(ctx, 0x402c08, 0x6);
440 if (dev_priv->chipset != 0x50) 421 if (device->chipset != 0x50)
441 gr_def(ctx, 0x402c14, 0x01000000); 422 gr_def(ctx, 0x402c14, 0x01000000);
442 gr_def(ctx, 0x402c18, 0x000000ff); 423 gr_def(ctx, 0x402c18, 0x000000ff);
443 if (dev_priv->chipset == 0x50) 424 if (device->chipset == 0x50)
444 cp_ctx(ctx, 0x402ca0, 0x1); 425 cp_ctx(ctx, 0x402ca0, 0x1);
445 else 426 else
446 cp_ctx(ctx, 0x402ca0, 0x2); 427 cp_ctx(ctx, 0x402ca0, 0x2);
447 if (dev_priv->chipset < 0xa0) 428 if (device->chipset < 0xa0)
448 gr_def(ctx, 0x402ca0, 0x00000400); 429 gr_def(ctx, 0x402ca0, 0x00000400);
449 else if (!IS_NVA3F(dev_priv->chipset)) 430 else if (!IS_NVA3F(device->chipset))
450 gr_def(ctx, 0x402ca0, 0x00000800); 431 gr_def(ctx, 0x402ca0, 0x00000800);
451 else 432 else
452 gr_def(ctx, 0x402ca0, 0x00000400); 433 gr_def(ctx, 0x402ca0, 0x00000400);
@@ -457,14 +438,14 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
457 gr_def(ctx, 0x403004, 0x00000001); 438 gr_def(ctx, 0x403004, 0x00000001);
458 439
459 /* 3400 */ 440 /* 3400 */
460 if (dev_priv->chipset >= 0xa0) { 441 if (device->chipset >= 0xa0) {
461 cp_ctx(ctx, 0x403404, 0x1); 442 cp_ctx(ctx, 0x403404, 0x1);
462 gr_def(ctx, 0x403404, 0x00000001); 443 gr_def(ctx, 0x403404, 0x00000001);
463 } 444 }
464 445
465 /* 5000: CCACHE */ 446 /* 5000: CCACHE */
466 cp_ctx(ctx, 0x405000, 0x1); 447 cp_ctx(ctx, 0x405000, 0x1);
467 switch (dev_priv->chipset) { 448 switch (device->chipset) {
468 case 0x50: 449 case 0x50:
469 gr_def(ctx, 0x405000, 0x00300080); 450 gr_def(ctx, 0x405000, 0x00300080);
470 break; 451 break;
@@ -493,22 +474,22 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
493 cp_ctx(ctx, 0x40502c, 0x1); 474 cp_ctx(ctx, 0x40502c, 0x1);
494 475
495 /* 6000? */ 476 /* 6000? */
496 if (dev_priv->chipset == 0x50) 477 if (device->chipset == 0x50)
497 cp_ctx(ctx, 0x4063e0, 0x1); 478 cp_ctx(ctx, 0x4063e0, 0x1);
498 479
499 /* 6800: M2MF */ 480 /* 6800: M2MF */
500 if (dev_priv->chipset < 0x90) { 481 if (device->chipset < 0x90) {
501 cp_ctx(ctx, 0x406814, 0x2b); 482 cp_ctx(ctx, 0x406814, 0x2b);
502 gr_def(ctx, 0x406818, 0x00000f80); 483 gr_def(ctx, 0x406818, 0x00000f80);
503 gr_def(ctx, 0x406860, 0x007f0080); 484 gr_def(ctx, 0x406860, 0x007f0080);
504 gr_def(ctx, 0x40689c, 0x007f0080); 485 gr_def(ctx, 0x40689c, 0x007f0080);
505 } else { 486 } else {
506 cp_ctx(ctx, 0x406814, 0x4); 487 cp_ctx(ctx, 0x406814, 0x4);
507 if (dev_priv->chipset == 0x98) 488 if (device->chipset == 0x98)
508 gr_def(ctx, 0x406818, 0x00000f80); 489 gr_def(ctx, 0x406818, 0x00000f80);
509 else 490 else
510 gr_def(ctx, 0x406818, 0x00001f80); 491 gr_def(ctx, 0x406818, 0x00001f80);
511 if (IS_NVA3F(dev_priv->chipset)) 492 if (IS_NVA3F(device->chipset))
512 gr_def(ctx, 0x40681c, 0x00000030); 493 gr_def(ctx, 0x40681c, 0x00000030);
513 cp_ctx(ctx, 0x406830, 0x3); 494 cp_ctx(ctx, 0x406830, 0x3);
514 } 495 }
@@ -517,43 +498,43 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
517 for (i = 0; i < 8; i++) { 498 for (i = 0; i < 8; i++) {
518 if (units & (1<<(i+16))) { 499 if (units & (1<<(i+16))) {
519 cp_ctx(ctx, 0x407000 + (i<<8), 3); 500 cp_ctx(ctx, 0x407000 + (i<<8), 3);
520 if (dev_priv->chipset == 0x50) 501 if (device->chipset == 0x50)
521 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); 502 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
522 else if (dev_priv->chipset != 0xa5) 503 else if (device->chipset != 0xa5)
523 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); 504 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
524 else 505 else
525 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); 506 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
526 gr_def(ctx, 0x407004 + (i<<8), 0x89058001); 507 gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
527 508
528 if (dev_priv->chipset == 0x50) { 509 if (device->chipset == 0x50) {
529 cp_ctx(ctx, 0x407010 + (i<<8), 1); 510 cp_ctx(ctx, 0x407010 + (i<<8), 1);
530 } else if (dev_priv->chipset < 0xa0) { 511 } else if (device->chipset < 0xa0) {
531 cp_ctx(ctx, 0x407010 + (i<<8), 2); 512 cp_ctx(ctx, 0x407010 + (i<<8), 2);
532 gr_def(ctx, 0x407010 + (i<<8), 0x00001000); 513 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
533 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); 514 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
534 } else { 515 } else {
535 cp_ctx(ctx, 0x407010 + (i<<8), 3); 516 cp_ctx(ctx, 0x407010 + (i<<8), 3);
536 gr_def(ctx, 0x407010 + (i<<8), 0x00001000); 517 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
537 if (dev_priv->chipset != 0xa5) 518 if (device->chipset != 0xa5)
538 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); 519 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
539 else 520 else
540 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); 521 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
541 } 522 }
542 523
543 cp_ctx(ctx, 0x407080 + (i<<8), 4); 524 cp_ctx(ctx, 0x407080 + (i<<8), 4);
544 if (dev_priv->chipset != 0xa5) 525 if (device->chipset != 0xa5)
545 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); 526 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
546 else 527 else
547 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); 528 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
548 if (dev_priv->chipset == 0x50) 529 if (device->chipset == 0x50)
549 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); 530 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
550 else 531 else
551 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); 532 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
552 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); 533 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
553 534
554 if (dev_priv->chipset < 0xa0) 535 if (device->chipset < 0xa0)
555 cp_ctx(ctx, 0x407094 + (i<<8), 1); 536 cp_ctx(ctx, 0x407094 + (i<<8), 1);
556 else if (!IS_NVA3F(dev_priv->chipset)) 537 else if (!IS_NVA3F(device->chipset))
557 cp_ctx(ctx, 0x407094 + (i<<8), 3); 538 cp_ctx(ctx, 0x407094 + (i<<8), 3);
558 else { 539 else {
559 cp_ctx(ctx, 0x407094 + (i<<8), 4); 540 cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -563,30 +544,30 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
563 } 544 }
564 545
565 cp_ctx(ctx, 0x407c00, 0x3); 546 cp_ctx(ctx, 0x407c00, 0x3);
566 if (dev_priv->chipset < 0x90) 547 if (device->chipset < 0x90)
567 gr_def(ctx, 0x407c00, 0x00010040); 548 gr_def(ctx, 0x407c00, 0x00010040);
568 else if (dev_priv->chipset < 0xa0) 549 else if (device->chipset < 0xa0)
569 gr_def(ctx, 0x407c00, 0x00390040); 550 gr_def(ctx, 0x407c00, 0x00390040);
570 else 551 else
571 gr_def(ctx, 0x407c00, 0x003d0040); 552 gr_def(ctx, 0x407c00, 0x003d0040);
572 gr_def(ctx, 0x407c08, 0x00000022); 553 gr_def(ctx, 0x407c08, 0x00000022);
573 if (dev_priv->chipset >= 0xa0) { 554 if (device->chipset >= 0xa0) {
574 cp_ctx(ctx, 0x407c10, 0x3); 555 cp_ctx(ctx, 0x407c10, 0x3);
575 cp_ctx(ctx, 0x407c20, 0x1); 556 cp_ctx(ctx, 0x407c20, 0x1);
576 cp_ctx(ctx, 0x407c2c, 0x1); 557 cp_ctx(ctx, 0x407c2c, 0x1);
577 } 558 }
578 559
579 if (dev_priv->chipset < 0xa0) { 560 if (device->chipset < 0xa0) {
580 cp_ctx(ctx, 0x407d00, 0x9); 561 cp_ctx(ctx, 0x407d00, 0x9);
581 } else { 562 } else {
582 cp_ctx(ctx, 0x407d00, 0x15); 563 cp_ctx(ctx, 0x407d00, 0x15);
583 } 564 }
584 if (dev_priv->chipset == 0x98) 565 if (device->chipset == 0x98)
585 gr_def(ctx, 0x407d08, 0x00380040); 566 gr_def(ctx, 0x407d08, 0x00380040);
586 else { 567 else {
587 if (dev_priv->chipset < 0x90) 568 if (device->chipset < 0x90)
588 gr_def(ctx, 0x407d08, 0x00010040); 569 gr_def(ctx, 0x407d08, 0x00010040);
589 else if (dev_priv->chipset < 0xa0) 570 else if (device->chipset < 0xa0)
590 gr_def(ctx, 0x407d08, 0x00390040); 571 gr_def(ctx, 0x407d08, 0x00390040);
591 else 572 else
592 gr_def(ctx, 0x407d08, 0x003d0040); 573 gr_def(ctx, 0x407d08, 0x003d0040);
@@ -596,11 +577,11 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
596 /* 8000+: per-TP state */ 577 /* 8000+: per-TP state */
597 for (i = 0; i < 10; i++) { 578 for (i = 0; i < 10; i++) {
598 if (units & (1<<i)) { 579 if (units & (1<<i)) {
599 if (dev_priv->chipset < 0xa0) 580 if (device->chipset < 0xa0)
600 base = 0x408000 + (i<<12); 581 base = 0x408000 + (i<<12);
601 else 582 else
602 base = 0x408000 + (i<<11); 583 base = 0x408000 + (i<<11);
603 if (dev_priv->chipset < 0xa0) 584 if (device->chipset < 0xa0)
604 offset = base + 0xc00; 585 offset = base + 0xc00;
605 else 586 else
606 offset = base + 0x80; 587 offset = base + 0x80;
@@ -609,9 +590,9 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
609 cp_ctx(ctx, offset + 0x08, 1); 590 cp_ctx(ctx, offset + 0x08, 1);
610 591
611 /* per-MP state */ 592 /* per-MP state */
612 for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) { 593 for (j = 0; j < (device->chipset < 0xa0 ? 2 : 4); j++) {
613 if (!(units & (1 << (j+24)))) continue; 594 if (!(units & (1 << (j+24)))) continue;
614 if (dev_priv->chipset < 0xa0) 595 if (device->chipset < 0xa0)
615 offset = base + 0x200 + (j<<7); 596 offset = base + 0x200 + (j<<7);
616 else 597 else
617 offset = base + 0x100 + (j<<7); 598 offset = base + 0x100 + (j<<7);
@@ -620,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
620 gr_def(ctx, offset + 0x04, 0x00160000); 601 gr_def(ctx, offset + 0x04, 0x00160000);
621 gr_def(ctx, offset + 0x08, 0x01800000); 602 gr_def(ctx, offset + 0x08, 0x01800000);
622 gr_def(ctx, offset + 0x18, 0x0003ffff); 603 gr_def(ctx, offset + 0x18, 0x0003ffff);
623 switch (dev_priv->chipset) { 604 switch (device->chipset) {
624 case 0x50: 605 case 0x50:
625 gr_def(ctx, offset + 0x1c, 0x00080000); 606 gr_def(ctx, offset + 0x1c, 0x00080000);
626 break; 607 break;
@@ -651,53 +632,53 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
651 break; 632 break;
652 } 633 }
653 gr_def(ctx, offset + 0x40, 0x00010401); 634 gr_def(ctx, offset + 0x40, 0x00010401);
654 if (dev_priv->chipset == 0x50) 635 if (device->chipset == 0x50)
655 gr_def(ctx, offset + 0x48, 0x00000040); 636 gr_def(ctx, offset + 0x48, 0x00000040);
656 else 637 else
657 gr_def(ctx, offset + 0x48, 0x00000078); 638 gr_def(ctx, offset + 0x48, 0x00000078);
658 gr_def(ctx, offset + 0x50, 0x000000bf); 639 gr_def(ctx, offset + 0x50, 0x000000bf);
659 gr_def(ctx, offset + 0x58, 0x00001210); 640 gr_def(ctx, offset + 0x58, 0x00001210);
660 if (dev_priv->chipset == 0x50) 641 if (device->chipset == 0x50)
661 gr_def(ctx, offset + 0x5c, 0x00000080); 642 gr_def(ctx, offset + 0x5c, 0x00000080);
662 else 643 else
663 gr_def(ctx, offset + 0x5c, 0x08000080); 644 gr_def(ctx, offset + 0x5c, 0x08000080);
664 if (dev_priv->chipset >= 0xa0) 645 if (device->chipset >= 0xa0)
665 gr_def(ctx, offset + 0x68, 0x0000003e); 646 gr_def(ctx, offset + 0x68, 0x0000003e);
666 } 647 }
667 648
668 if (dev_priv->chipset < 0xa0) 649 if (device->chipset < 0xa0)
669 cp_ctx(ctx, base + 0x300, 0x4); 650 cp_ctx(ctx, base + 0x300, 0x4);
670 else 651 else
671 cp_ctx(ctx, base + 0x300, 0x5); 652 cp_ctx(ctx, base + 0x300, 0x5);
672 if (dev_priv->chipset == 0x50) 653 if (device->chipset == 0x50)
673 gr_def(ctx, base + 0x304, 0x00007070); 654 gr_def(ctx, base + 0x304, 0x00007070);
674 else if (dev_priv->chipset < 0xa0) 655 else if (device->chipset < 0xa0)
675 gr_def(ctx, base + 0x304, 0x00027070); 656 gr_def(ctx, base + 0x304, 0x00027070);
676 else if (!IS_NVA3F(dev_priv->chipset)) 657 else if (!IS_NVA3F(device->chipset))
677 gr_def(ctx, base + 0x304, 0x01127070); 658 gr_def(ctx, base + 0x304, 0x01127070);
678 else 659 else
679 gr_def(ctx, base + 0x304, 0x05127070); 660 gr_def(ctx, base + 0x304, 0x05127070);
680 661
681 if (dev_priv->chipset < 0xa0) 662 if (device->chipset < 0xa0)
682 cp_ctx(ctx, base + 0x318, 1); 663 cp_ctx(ctx, base + 0x318, 1);
683 else 664 else
684 cp_ctx(ctx, base + 0x320, 1); 665 cp_ctx(ctx, base + 0x320, 1);
685 if (dev_priv->chipset == 0x50) 666 if (device->chipset == 0x50)
686 gr_def(ctx, base + 0x318, 0x0003ffff); 667 gr_def(ctx, base + 0x318, 0x0003ffff);
687 else if (dev_priv->chipset < 0xa0) 668 else if (device->chipset < 0xa0)
688 gr_def(ctx, base + 0x318, 0x03ffffff); 669 gr_def(ctx, base + 0x318, 0x03ffffff);
689 else 670 else
690 gr_def(ctx, base + 0x320, 0x07ffffff); 671 gr_def(ctx, base + 0x320, 0x07ffffff);
691 672
692 if (dev_priv->chipset < 0xa0) 673 if (device->chipset < 0xa0)
693 cp_ctx(ctx, base + 0x324, 5); 674 cp_ctx(ctx, base + 0x324, 5);
694 else 675 else
695 cp_ctx(ctx, base + 0x328, 4); 676 cp_ctx(ctx, base + 0x328, 4);
696 677
697 if (dev_priv->chipset < 0xa0) { 678 if (device->chipset < 0xa0) {
698 cp_ctx(ctx, base + 0x340, 9); 679 cp_ctx(ctx, base + 0x340, 9);
699 offset = base + 0x340; 680 offset = base + 0x340;
700 } else if (!IS_NVA3F(dev_priv->chipset)) { 681 } else if (!IS_NVA3F(device->chipset)) {
701 cp_ctx(ctx, base + 0x33c, 0xb); 682 cp_ctx(ctx, base + 0x33c, 0xb);
702 offset = base + 0x344; 683 offset = base + 0x344;
703 } else { 684 } else {
@@ -706,12 +687,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
706 } 687 }
707 gr_def(ctx, offset + 0x0, 0x00120407); 688 gr_def(ctx, offset + 0x0, 0x00120407);
708 gr_def(ctx, offset + 0x4, 0x05091507); 689 gr_def(ctx, offset + 0x4, 0x05091507);
709 if (dev_priv->chipset == 0x84) 690 if (device->chipset == 0x84)
710 gr_def(ctx, offset + 0x8, 0x05100202); 691 gr_def(ctx, offset + 0x8, 0x05100202);
711 else 692 else
712 gr_def(ctx, offset + 0x8, 0x05010202); 693 gr_def(ctx, offset + 0x8, 0x05010202);
713 gr_def(ctx, offset + 0xc, 0x00030201); 694 gr_def(ctx, offset + 0xc, 0x00030201);
714 if (dev_priv->chipset == 0xa3) 695 if (device->chipset == 0xa3)
715 cp_ctx(ctx, base + 0x36c, 1); 696 cp_ctx(ctx, base + 0x36c, 1);
716 697
717 cp_ctx(ctx, base + 0x400, 2); 698 cp_ctx(ctx, base + 0x400, 2);
@@ -720,7 +701,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
720 gr_def(ctx, base + 0x40c, 0x0d0c0b0a); 701 gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
721 gr_def(ctx, base + 0x410, 0x00141210); 702 gr_def(ctx, base + 0x410, 0x00141210);
722 703
723 if (dev_priv->chipset < 0xa0) 704 if (device->chipset < 0xa0)
724 offset = base + 0x800; 705 offset = base + 0x800;
725 else 706 else
726 offset = base + 0x500; 707 offset = base + 0x500;
@@ -728,55 +709,55 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
728 gr_def(ctx, offset + 0x0, 0x000001f0); 709 gr_def(ctx, offset + 0x0, 0x000001f0);
729 gr_def(ctx, offset + 0x4, 0x00000001); 710 gr_def(ctx, offset + 0x4, 0x00000001);
730 gr_def(ctx, offset + 0x8, 0x00000003); 711 gr_def(ctx, offset + 0x8, 0x00000003);
731 if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset)) 712 if (device->chipset == 0x50 || IS_NVAAF(device->chipset))
732 gr_def(ctx, offset + 0xc, 0x00008000); 713 gr_def(ctx, offset + 0xc, 0x00008000);
733 gr_def(ctx, offset + 0x14, 0x00039e00); 714 gr_def(ctx, offset + 0x14, 0x00039e00);
734 cp_ctx(ctx, offset + 0x1c, 2); 715 cp_ctx(ctx, offset + 0x1c, 2);
735 if (dev_priv->chipset == 0x50) 716 if (device->chipset == 0x50)
736 gr_def(ctx, offset + 0x1c, 0x00000040); 717 gr_def(ctx, offset + 0x1c, 0x00000040);
737 else 718 else
738 gr_def(ctx, offset + 0x1c, 0x00000100); 719 gr_def(ctx, offset + 0x1c, 0x00000100);
739 gr_def(ctx, offset + 0x20, 0x00003800); 720 gr_def(ctx, offset + 0x20, 0x00003800);
740 721
741 if (dev_priv->chipset >= 0xa0) { 722 if (device->chipset >= 0xa0) {
742 cp_ctx(ctx, base + 0x54c, 2); 723 cp_ctx(ctx, base + 0x54c, 2);
743 if (!IS_NVA3F(dev_priv->chipset)) 724 if (!IS_NVA3F(device->chipset))
744 gr_def(ctx, base + 0x54c, 0x003fe006); 725 gr_def(ctx, base + 0x54c, 0x003fe006);
745 else 726 else
746 gr_def(ctx, base + 0x54c, 0x003fe007); 727 gr_def(ctx, base + 0x54c, 0x003fe007);
747 gr_def(ctx, base + 0x550, 0x003fe000); 728 gr_def(ctx, base + 0x550, 0x003fe000);
748 } 729 }
749 730
750 if (dev_priv->chipset < 0xa0) 731 if (device->chipset < 0xa0)
751 offset = base + 0xa00; 732 offset = base + 0xa00;
752 else 733 else
753 offset = base + 0x680; 734 offset = base + 0x680;
754 cp_ctx(ctx, offset, 1); 735 cp_ctx(ctx, offset, 1);
755 gr_def(ctx, offset, 0x00404040); 736 gr_def(ctx, offset, 0x00404040);
756 737
757 if (dev_priv->chipset < 0xa0) 738 if (device->chipset < 0xa0)
758 offset = base + 0xe00; 739 offset = base + 0xe00;
759 else 740 else
760 offset = base + 0x700; 741 offset = base + 0x700;
761 cp_ctx(ctx, offset, 2); 742 cp_ctx(ctx, offset, 2);
762 if (dev_priv->chipset < 0xa0) 743 if (device->chipset < 0xa0)
763 gr_def(ctx, offset, 0x0077f005); 744 gr_def(ctx, offset, 0x0077f005);
764 else if (dev_priv->chipset == 0xa5) 745 else if (device->chipset == 0xa5)
765 gr_def(ctx, offset, 0x6cf7f007); 746 gr_def(ctx, offset, 0x6cf7f007);
766 else if (dev_priv->chipset == 0xa8) 747 else if (device->chipset == 0xa8)
767 gr_def(ctx, offset, 0x6cfff007); 748 gr_def(ctx, offset, 0x6cfff007);
768 else if (dev_priv->chipset == 0xac) 749 else if (device->chipset == 0xac)
769 gr_def(ctx, offset, 0x0cfff007); 750 gr_def(ctx, offset, 0x0cfff007);
770 else 751 else
771 gr_def(ctx, offset, 0x0cf7f007); 752 gr_def(ctx, offset, 0x0cf7f007);
772 if (dev_priv->chipset == 0x50) 753 if (device->chipset == 0x50)
773 gr_def(ctx, offset + 0x4, 0x00007fff); 754 gr_def(ctx, offset + 0x4, 0x00007fff);
774 else if (dev_priv->chipset < 0xa0) 755 else if (device->chipset < 0xa0)
775 gr_def(ctx, offset + 0x4, 0x003f7fff); 756 gr_def(ctx, offset + 0x4, 0x003f7fff);
776 else 757 else
777 gr_def(ctx, offset + 0x4, 0x02bf7fff); 758 gr_def(ctx, offset + 0x4, 0x02bf7fff);
778 cp_ctx(ctx, offset + 0x2c, 1); 759 cp_ctx(ctx, offset + 0x2c, 1);
779 if (dev_priv->chipset == 0x50) { 760 if (device->chipset == 0x50) {
780 cp_ctx(ctx, offset + 0x50, 9); 761 cp_ctx(ctx, offset + 0x50, 9);
781 gr_def(ctx, offset + 0x54, 0x000003ff); 762 gr_def(ctx, offset + 0x54, 0x000003ff);
782 gr_def(ctx, offset + 0x58, 0x00000003); 763 gr_def(ctx, offset + 0x58, 0x00000003);
@@ -785,7 +766,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
785 gr_def(ctx, offset + 0x64, 0x0000001f); 766 gr_def(ctx, offset + 0x64, 0x0000001f);
786 gr_def(ctx, offset + 0x68, 0x0000000f); 767 gr_def(ctx, offset + 0x68, 0x0000000f);
787 gr_def(ctx, offset + 0x6c, 0x0000000f); 768 gr_def(ctx, offset + 0x6c, 0x0000000f);
788 } else if (dev_priv->chipset < 0xa0) { 769 } else if (device->chipset < 0xa0) {
789 cp_ctx(ctx, offset + 0x50, 1); 770 cp_ctx(ctx, offset + 0x50, 1);
790 cp_ctx(ctx, offset + 0x70, 1); 771 cp_ctx(ctx, offset + 0x70, 1);
791 } else { 772 } else {
@@ -797,7 +778,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
797} 778}
798 779
799static void 780static void
800dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { 781dd_emit(struct nouveau_grctx *ctx, int num, u32 val) {
801 int i; 782 int i;
802 if (val && ctx->mode == NOUVEAU_GRCTX_VALS) 783 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
803 for (i = 0; i < num; i++) 784 for (i = 0; i < num; i++)
@@ -808,7 +789,7 @@ dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
808static void 789static void
809nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx) 790nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
810{ 791{
811 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 792 struct nouveau_device *device = ctx->device;
812 int base, num; 793 int base, num;
813 base = ctx->ctxvals_pos; 794 base = ctx->ctxvals_pos;
814 795
@@ -822,7 +803,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
822 dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ 803 dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
823 dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ 804 dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
824 dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ 805 dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
825 if (dev_priv->chipset >= 0x94) 806 if (device->chipset >= 0x94)
826 dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ 807 dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
827 dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ 808 dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
828 dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ 809 dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
@@ -851,7 +832,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
851 dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ 832 dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
852 dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ 833 dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
853 dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ 834 dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
854 if (IS_NVA3F(dev_priv->chipset)) 835 if (IS_NVA3F(device->chipset))
855 dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ 836 dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
856 dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ 837 dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
857 dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ 838 dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
@@ -863,7 +844,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
863 dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ 844 dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
864 845
865 /* compat 2d state */ 846 /* compat 2d state */
866 if (dev_priv->chipset == 0x50) { 847 if (device->chipset == 0x50) {
867 dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ 848 dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
868 849
869 dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ 850 dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
@@ -923,7 +904,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
923 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ 904 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
924 905
925 /* more compat 2d state */ 906 /* more compat 2d state */
926 if (dev_priv->chipset == 0x50) { 907 if (device->chipset == 0x50) {
927 dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ 908 dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
928 dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ 909 dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
929 910
@@ -957,18 +938,18 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
957 dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ 938 dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
958 dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ 939 dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
959 dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ 940 dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
960 if (IS_NVA3F(dev_priv->chipset)) { 941 if (IS_NVA3F(device->chipset)) {
961 dd_emit(ctx, 1, 0); /* ffffffff */ 942 dd_emit(ctx, 1, 0); /* ffffffff */
962 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ 943 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
963 } else { 944 } else {
964 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ 945 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
965 } 946 }
966 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ 947 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
967 if (dev_priv->chipset != 0x50) 948 if (device->chipset != 0x50)
968 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ 949 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
969 dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ 950 dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
970 dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ 951 dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
971 if (dev_priv->chipset == 0x50) { 952 if (device->chipset == 0x50) {
972 dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ 953 dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
973 dd_emit(ctx, 1, 0); /* 00000001 */ 954 dd_emit(ctx, 1, 0); /* 00000001 */
974 } else { 955 } else {
@@ -994,7 +975,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
994 dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ 975 dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
995 dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ 976 dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
996 dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ 977 dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
997 if (dev_priv->chipset != 0x50) 978 if (device->chipset != 0x50)
998 dd_emit(ctx, 3, 0); /* 1, 1, 1 */ 979 dd_emit(ctx, 3, 0); /* 1, 1, 1 */
999 else 980 else
1000 dd_emit(ctx, 2, 0); /* 1, 1 */ 981 dd_emit(ctx, 2, 0); /* 1, 1 */
@@ -1002,15 +983,15 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1002 dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ 983 dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
1003 dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ 984 dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
1004 dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 985 dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1005 if (IS_NVA3F(dev_priv->chipset)) { 986 if (IS_NVA3F(device->chipset)) {
1006 dd_emit(ctx, 1, 3); /* 00000003 */ 987 dd_emit(ctx, 1, 3); /* 00000003 */
1007 dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ 988 dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
1008 } 989 }
1009 if (dev_priv->chipset != 0x50) 990 if (device->chipset != 0x50)
1010 dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ 991 dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
1011 dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ 992 dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
1012 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ 993 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
1013 if (dev_priv->chipset != 0x50) 994 if (device->chipset != 0x50)
1014 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ 995 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
1015 dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ 996 dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
1016 dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ 997 dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
@@ -1022,16 +1003,16 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1022 dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ 1003 dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
1023 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ 1004 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
1024 dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ 1005 dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
1025 if (dev_priv->chipset >= 0xa0) 1006 if (device->chipset >= 0xa0)
1026 dd_emit(ctx, 1, 0); /* ffffffff */ 1007 dd_emit(ctx, 1, 0); /* ffffffff */
1027 dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ 1008 dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
1028 dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ 1009 dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
1029 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ 1010 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
1030 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ 1011 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
1031 dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ 1012 dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
1032 if (dev_priv->chipset != 0x50) 1013 if (device->chipset != 0x50)
1033 dd_emit(ctx, 8, 0); /* 00000001 */ 1014 dd_emit(ctx, 8, 0); /* 00000001 */
1034 if (dev_priv->chipset >= 0xa0) { 1015 if (device->chipset >= 0xa0) {
1035 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ 1016 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
1036 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ 1017 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
1037 dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ 1018 dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
@@ -1042,20 +1023,20 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1042 dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 1023 dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
1043 dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ 1024 dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
1044 dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ 1025 dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
1045 if (IS_NVA3F(dev_priv->chipset)) 1026 if (IS_NVA3F(device->chipset))
1046 dd_emit(ctx, 1, 0); /* 00000001 */ 1027 dd_emit(ctx, 1, 0); /* 00000001 */
1047 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ 1028 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
1048 if (dev_priv->chipset >= 0xa0) 1029 if (device->chipset >= 0xa0)
1049 dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ 1030 dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
1050 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ 1031 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
1051 if (dev_priv->chipset >= 0xa0) 1032 if (device->chipset >= 0xa0)
1052 dd_emit(ctx, 1, 0); /* 00000003 */ 1033 dd_emit(ctx, 1, 0); /* 00000003 */
1053 dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ 1034 dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
1054 dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ 1035 dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
1055 dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ 1036 dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
1056 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ 1037 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
1057 dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ 1038 dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
1058 if (dev_priv->chipset != 0x50) { 1039 if (device->chipset != 0x50) {
1059 dd_emit(ctx, 1, 0xe00); /* 7fff */ 1040 dd_emit(ctx, 1, 0xe00); /* 7fff */
1060 dd_emit(ctx, 1, 0x1000); /* 7fff */ 1041 dd_emit(ctx, 1, 0x1000); /* 7fff */
1061 dd_emit(ctx, 1, 0x1e00); /* 7fff */ 1042 dd_emit(ctx, 1, 0x1e00); /* 7fff */
@@ -1070,10 +1051,10 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1070 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ 1051 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
1071 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ 1052 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
1072 dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ 1053 dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
1073 if (IS_NVA3F(dev_priv->chipset)) 1054 if (IS_NVA3F(device->chipset))
1074 dd_emit(ctx, 1, 0x200); 1055 dd_emit(ctx, 1, 0x200);
1075 dd_emit(ctx, 1, 0); /* 00000001 */ 1056 dd_emit(ctx, 1, 0); /* 00000001 */
1076 if (dev_priv->chipset < 0xa0) { 1057 if (device->chipset < 0xa0) {
1077 dd_emit(ctx, 1, 1); /* 00000001 */ 1058 dd_emit(ctx, 1, 1); /* 00000001 */
1078 dd_emit(ctx, 1, 0x70); /* 000000ff */ 1059 dd_emit(ctx, 1, 0x70); /* 000000ff */
1079 dd_emit(ctx, 1, 0x80); /* 000000ff */ 1060 dd_emit(ctx, 1, 0x80); /* 000000ff */
@@ -1120,7 +1101,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1120 1101
1121 num = ctx->ctxvals_pos - base; 1102 num = ctx->ctxvals_pos - base;
1122 ctx->ctxvals_pos = base; 1103 ctx->ctxvals_pos = base;
1123 if (IS_NVA3F(dev_priv->chipset)) 1104 if (IS_NVA3F(device->chipset))
1124 cp_ctx(ctx, 0x404800, num); 1105 cp_ctx(ctx, 0x404800, num);
1125 else 1106 else
1126 cp_ctx(ctx, 0x405400, num); 1107 cp_ctx(ctx, 0x405400, num);
@@ -1169,7 +1150,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1169 */ 1150 */
1170 1151
1171static void 1152static void
1172xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { 1153xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
1173 int i; 1154 int i;
1174 if (val && ctx->mode == NOUVEAU_GRCTX_VALS) 1155 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
1175 for (i = 0; i < num; i++) 1156 for (i = 0; i < num; i++)
@@ -1201,16 +1182,16 @@ static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
1201static void 1182static void
1202nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) 1183nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1203{ 1184{
1204 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1185 struct nouveau_device *device = ctx->device;
1205 int i; 1186 int i;
1206 int offset; 1187 int offset;
1207 int size = 0; 1188 int size = 0;
1208 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 1189 u32 units = nv_rd32 (ctx->device, 0x1540);
1209 1190
1210 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; 1191 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
1211 ctx->ctxvals_base = offset; 1192 ctx->ctxvals_base = offset;
1212 1193
1213 if (dev_priv->chipset < 0xa0) { 1194 if (device->chipset < 0xa0) {
1214 /* Strand 0 */ 1195 /* Strand 0 */
1215 ctx->ctxvals_pos = offset; 1196 ctx->ctxvals_pos = offset;
1216 nv50_graph_construct_gene_dispatch(ctx); 1197 nv50_graph_construct_gene_dispatch(ctx);
@@ -1280,7 +1261,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1280 1261
1281 /* Strand 2 */ 1262 /* Strand 2 */
1282 ctx->ctxvals_pos = offset + 2; 1263 ctx->ctxvals_pos = offset + 2;
1283 if (dev_priv->chipset == 0xa0) 1264 if (device->chipset == 0xa0)
1284 nv50_graph_construct_gene_unk14xx(ctx); 1265 nv50_graph_construct_gene_unk14xx(ctx);
1285 nv50_graph_construct_gene_unk24xx(ctx); 1266 nv50_graph_construct_gene_unk24xx(ctx);
1286 if ((ctx->ctxvals_pos-offset)/8 > size) 1267 if ((ctx->ctxvals_pos-offset)/8 > size)
@@ -1327,7 +1308,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1327 1308
1328 /* Strand 7 */ 1309 /* Strand 7 */
1329 ctx->ctxvals_pos = offset + 7; 1310 ctx->ctxvals_pos = offset + 7;
1330 if (dev_priv->chipset == 0xa0) { 1311 if (device->chipset == 0xa0) {
1331 if (units & (1 << 4)) 1312 if (units & (1 << 4))
1332 nv50_graph_construct_xfer_tp(ctx); 1313 nv50_graph_construct_xfer_tp(ctx);
1333 if (units & (1 << 5)) 1314 if (units & (1 << 5))
@@ -1365,24 +1346,24 @@ static void
1365nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx) 1346nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1366{ 1347{
1367 /* start of strand 0 */ 1348 /* start of strand 0 */
1368 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1349 struct nouveau_device *device = ctx->device;
1369 /* SEEK */ 1350 /* SEEK */
1370 if (dev_priv->chipset == 0x50) 1351 if (device->chipset == 0x50)
1371 xf_emit(ctx, 5, 0); 1352 xf_emit(ctx, 5, 0);
1372 else if (!IS_NVA3F(dev_priv->chipset)) 1353 else if (!IS_NVA3F(device->chipset))
1373 xf_emit(ctx, 6, 0); 1354 xf_emit(ctx, 6, 0);
1374 else 1355 else
1375 xf_emit(ctx, 4, 0); 1356 xf_emit(ctx, 4, 0);
1376 /* SEEK */ 1357 /* SEEK */
1377 /* the PGRAPH's internal FIFO */ 1358 /* the PGRAPH's internal FIFO */
1378 if (dev_priv->chipset == 0x50) 1359 if (device->chipset == 0x50)
1379 xf_emit(ctx, 8*3, 0); 1360 xf_emit(ctx, 8*3, 0);
1380 else 1361 else
1381 xf_emit(ctx, 0x100*3, 0); 1362 xf_emit(ctx, 0x100*3, 0);
1382 /* and another bonus slot?!? */ 1363 /* and another bonus slot?!? */
1383 xf_emit(ctx, 3, 0); 1364 xf_emit(ctx, 3, 0);
1384 /* and YET ANOTHER bonus slot? */ 1365 /* and YET ANOTHER bonus slot? */
1385 if (IS_NVA3F(dev_priv->chipset)) 1366 if (IS_NVA3F(device->chipset))
1386 xf_emit(ctx, 3, 0); 1367 xf_emit(ctx, 3, 0);
1387 /* SEEK */ 1368 /* SEEK */
1388 /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */ 1369 /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
@@ -1394,7 +1375,7 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1394 /* SEEK */ 1375 /* SEEK */
1395 xf_emit(ctx, 9, 0); 1376 xf_emit(ctx, 9, 0);
1396 /* SEEK */ 1377 /* SEEK */
1397 if (dev_priv->chipset < 0x90) 1378 if (device->chipset < 0x90)
1398 xf_emit(ctx, 4, 0); 1379 xf_emit(ctx, 4, 0);
1399 /* SEEK */ 1380 /* SEEK */
1400 xf_emit(ctx, 2, 0); 1381 xf_emit(ctx, 2, 0);
@@ -1407,9 +1388,9 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1407 xf_emit(ctx, 6*2, 0); 1388 xf_emit(ctx, 6*2, 0);
1408 xf_emit(ctx, 2, 0); 1389 xf_emit(ctx, 2, 0);
1409 /* SEEK */ 1390 /* SEEK */
1410 if (dev_priv->chipset == 0x50) 1391 if (device->chipset == 0x50)
1411 xf_emit(ctx, 0x1c, 0); 1392 xf_emit(ctx, 0x1c, 0);
1412 else if (dev_priv->chipset < 0xa0) 1393 else if (device->chipset < 0xa0)
1413 xf_emit(ctx, 0x1e, 0); 1394 xf_emit(ctx, 0x1e, 0);
1414 else 1395 else
1415 xf_emit(ctx, 0x22, 0); 1396 xf_emit(ctx, 0x22, 0);
@@ -1421,9 +1402,9 @@ static void
1421nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) 1402nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1422{ 1403{
1423 /* Strand 0, right after dispatch */ 1404 /* Strand 0, right after dispatch */
1424 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1405 struct nouveau_device *device = ctx->device;
1425 int smallm2mf = 0; 1406 int smallm2mf = 0;
1426 if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98) 1407 if (device->chipset < 0x92 || device->chipset == 0x98)
1427 smallm2mf = 1; 1408 smallm2mf = 1;
1428 /* SEEK */ 1409 /* SEEK */
1429 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ 1410 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
@@ -1472,10 +1453,10 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1472static void 1453static void
1473nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx) 1454nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
1474{ 1455{
1475 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1456 struct nouveau_device *device = ctx->device;
1476 xf_emit(ctx, 2, 0); /* RO */ 1457 xf_emit(ctx, 2, 0); /* RO */
1477 xf_emit(ctx, 0x800, 0); /* ffffffff */ 1458 xf_emit(ctx, 0x800, 0); /* ffffffff */
1478 switch (dev_priv->chipset) { 1459 switch (device->chipset) {
1479 case 0x50: 1460 case 0x50:
1480 case 0x92: 1461 case 0x92:
1481 case 0xa0: 1462 case 0xa0:
@@ -1540,7 +1521,7 @@ nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
1540static void 1521static void
1541nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx) 1522nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1542{ 1523{
1543 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1524 struct nouveau_device *device = ctx->device;
1544 int i; 1525 int i;
1545 /* end of area 2 on pre-NVA0, area 1 on NVAx */ 1526 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1546 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1527 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
@@ -1550,14 +1531,14 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1550 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ 1531 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
1551 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1532 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1552 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 1533 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
1553 if (dev_priv->chipset == 0x50) 1534 if (device->chipset == 0x50)
1554 xf_emit(ctx, 1, 0x3ff); 1535 xf_emit(ctx, 1, 0x3ff);
1555 else 1536 else
1556 xf_emit(ctx, 1, 0x7ff); /* 000007ff */ 1537 xf_emit(ctx, 1, 0x7ff); /* 000007ff */
1557 xf_emit(ctx, 1, 0); /* 111/113 */ 1538 xf_emit(ctx, 1, 0); /* 111/113 */
1558 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1539 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1559 for (i = 0; i < 8; i++) { 1540 for (i = 0; i < 8; i++) {
1560 switch (dev_priv->chipset) { 1541 switch (device->chipset) {
1561 case 0x50: 1542 case 0x50:
1562 case 0x86: 1543 case 0x86:
1563 case 0x98: 1544 case 0x98:
@@ -1600,7 +1581,7 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1600static void 1581static void
1601nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx) 1582nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1602{ 1583{
1603 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1584 struct nouveau_device *device = ctx->device;
1604 /* end of area 2 on pre-NVA0, area 1 on NVAx */ 1585 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1605 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ 1586 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
1606 xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ 1587 xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
@@ -1614,9 +1595,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1614 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 1595 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1615 xf_emit(ctx, 1, 0); /* 00000007 */ 1596 xf_emit(ctx, 1, 0); /* 00000007 */
1616 xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ 1597 xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
1617 if (dev_priv->chipset >= 0xa0) 1598 if (device->chipset >= 0xa0)
1618 xf_emit(ctx, 1, 0x0fac6881); 1599 xf_emit(ctx, 1, 0x0fac6881);
1619 if (IS_NVA3F(dev_priv->chipset)) { 1600 if (IS_NVA3F(device->chipset)) {
1620 xf_emit(ctx, 1, 1); 1601 xf_emit(ctx, 1, 1);
1621 xf_emit(ctx, 3, 0); 1602 xf_emit(ctx, 3, 0);
1622 } 1603 }
@@ -1625,9 +1606,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1625static void 1606static void
1626nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx) 1607nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1627{ 1608{
1628 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1609 struct nouveau_device *device = ctx->device;
1629 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ 1610 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
1630 if (dev_priv->chipset != 0x50) { 1611 if (device->chipset != 0x50) {
1631 xf_emit(ctx, 5, 0); /* ffffffff */ 1612 xf_emit(ctx, 5, 0); /* ffffffff */
1632 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1613 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1633 xf_emit(ctx, 1, 0); /* 00000001 */ 1614 xf_emit(ctx, 1, 0); /* 00000001 */
@@ -1643,14 +1624,14 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1643 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1624 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1644 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ 1625 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
1645 xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ 1626 xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
1646 if (dev_priv->chipset != 0x50) 1627 if (device->chipset != 0x50)
1647 xf_emit(ctx, 1, 0); /* 3ff */ 1628 xf_emit(ctx, 1, 0); /* 3ff */
1648 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ 1629 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
1649 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ 1630 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
1650 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ 1631 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
1651 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ 1632 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
1652 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 1633 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1653 if (dev_priv->chipset != 0x50) 1634 if (device->chipset != 0x50)
1654 xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ 1635 xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
1655 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1636 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1656 xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ 1637 xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
@@ -1669,7 +1650,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1669 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ 1650 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
1670 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ 1651 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1671 xf_emit(ctx, 1, 0); /* 0000000f */ 1652 xf_emit(ctx, 1, 0); /* 0000000f */
1672 if (dev_priv->chipset == 0x50) 1653 if (device->chipset == 0x50)
1673 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ 1654 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
1674 else 1655 else
1675 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ 1656 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -1704,11 +1685,11 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1704 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 1685 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
1705 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 1686 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
1706 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ 1687 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
1707 if (IS_NVA3F(dev_priv->chipset)) 1688 if (IS_NVA3F(device->chipset))
1708 xf_emit(ctx, 1, 0); /* 00000001 */ 1689 xf_emit(ctx, 1, 0); /* 00000001 */
1709 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 1690 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1710 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ 1691 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
1711 if (dev_priv->chipset != 0x50) { 1692 if (device->chipset != 0x50) {
1712 xf_emit(ctx, 1, 0); /* ffffffff */ 1693 xf_emit(ctx, 1, 0); /* ffffffff */
1713 xf_emit(ctx, 1, 0); /* 00000001 */ 1694 xf_emit(ctx, 1, 0); /* 00000001 */
1714 xf_emit(ctx, 1, 0); /* 000003ff */ 1695 xf_emit(ctx, 1, 0); /* 000003ff */
@@ -1736,7 +1717,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1736static void 1717static void
1737nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx) 1718nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1738{ 1719{
1739 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1720 struct nouveau_device *device = ctx->device;
1740 /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */ 1721 /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
1741 /* SEEK */ 1722 /* SEEK */
1742 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ 1723 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
@@ -1774,7 +1755,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1774 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ 1755 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
1775 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ 1756 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
1776 xf_emit(ctx, 1, 0); /* 00000007 */ 1757 xf_emit(ctx, 1, 0); /* 00000007 */
1777 if (dev_priv->chipset != 0x50) 1758 if (device->chipset != 0x50)
1778 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ 1759 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
1779 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ 1760 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
1780 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 1761 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
@@ -1789,7 +1770,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1789 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ 1770 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
1790 xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ 1771 xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
1791 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ 1772 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
1792 if (dev_priv->chipset != 0x50) 1773 if (device->chipset != 0x50)
1793 xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ 1774 xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
1794 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ 1775 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
1795} 1776}
@@ -1817,7 +1798,7 @@ nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
1817static void 1798static void
1818nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx) 1799nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1819{ 1800{
1820 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1801 struct nouveau_device *device = ctx->device;
1821 int i; 1802 int i;
1822 /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */ 1803 /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
1823 /* SEEK */ 1804 /* SEEK */
@@ -1829,7 +1810,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1829 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 1810 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1830 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1811 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1831 /* SEEK */ 1812 /* SEEK */
1832 if (IS_NVA3F(dev_priv->chipset)) { 1813 if (IS_NVA3F(device->chipset)) {
1833 xf_emit(ctx, 4, 0); /* RO */ 1814 xf_emit(ctx, 4, 0); /* RO */
1834 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ 1815 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
1835 xf_emit(ctx, 1, 0); /* 1ff */ 1816 xf_emit(ctx, 1, 0); /* 1ff */
@@ -1860,7 +1841,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1860 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1841 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1861 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 1842 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1862 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 1843 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1863 if (dev_priv->chipset != 0x50) 1844 if (device->chipset != 0x50)
1864 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 1845 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
1865 /* SEEK */ 1846 /* SEEK */
1866 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1847 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
@@ -1869,7 +1850,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1869 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1850 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1870 xf_emit(ctx, 1, 1); /* 00000001 */ 1851 xf_emit(ctx, 1, 1); /* 00000001 */
1871 /* SEEK */ 1852 /* SEEK */
1872 if (dev_priv->chipset >= 0xa0) 1853 if (device->chipset >= 0xa0)
1873 xf_emit(ctx, 2, 4); /* 000000ff */ 1854 xf_emit(ctx, 2, 4); /* 000000ff */
1874 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1855 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1875 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 1856 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
@@ -1893,20 +1874,20 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1893 xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ 1874 xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
1894 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 1875 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1895 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 1876 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1896 if (dev_priv->chipset != 0x50) 1877 if (device->chipset != 0x50)
1897 xf_emit(ctx, 1, 0); /* 000003ff */ 1878 xf_emit(ctx, 1, 0); /* 000003ff */
1898} 1879}
1899 1880
1900static void 1881static void
1901nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx) 1882nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1902{ 1883{
1903 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1884 struct nouveau_device *device = ctx->device;
1904 int acnt = 0x10, rep, i; 1885 int acnt = 0x10, rep, i;
1905 /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */ 1886 /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
1906 if (IS_NVA3F(dev_priv->chipset)) 1887 if (IS_NVA3F(device->chipset))
1907 acnt = 0x20; 1888 acnt = 0x20;
1908 /* SEEK */ 1889 /* SEEK */
1909 if (dev_priv->chipset >= 0xa0) { 1890 if (device->chipset >= 0xa0) {
1910 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ 1891 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
1911 xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ 1892 xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
1912 } 1893 }
@@ -1923,9 +1904,9 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1923 xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ 1904 xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
1924 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1905 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1925 /* SEEK */ 1906 /* SEEK */
1926 if (IS_NVA3F(dev_priv->chipset)) 1907 if (IS_NVA3F(device->chipset))
1927 xf_emit(ctx, 0xb, 0); /* RO */ 1908 xf_emit(ctx, 0xb, 0); /* RO */
1928 else if (dev_priv->chipset >= 0xa0) 1909 else if (device->chipset >= 0xa0)
1929 xf_emit(ctx, 0x9, 0); /* RO */ 1910 xf_emit(ctx, 0x9, 0); /* RO */
1930 else 1911 else
1931 xf_emit(ctx, 0x8, 0); /* RO */ 1912 xf_emit(ctx, 0x8, 0); /* RO */
@@ -1944,11 +1925,11 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1944 xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ 1925 xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
1945 xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ 1926 xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
1946 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1927 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1947 if (dev_priv->chipset == 0x50) 1928 if (device->chipset == 0x50)
1948 xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ 1929 xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
1949 else 1930 else
1950 xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ 1931 xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
1951 if (dev_priv->chipset == 0xa8) 1932 if (device->chipset == 0xa8)
1952 xf_emit(ctx, 1, 0x1e00); /* 7fff */ 1933 xf_emit(ctx, 1, 0x1e00); /* 7fff */
1953 /* SEEK */ 1934 /* SEEK */
1954 xf_emit(ctx, 0xc, 0); /* RO or close */ 1935 xf_emit(ctx, 0xc, 0); /* RO or close */
@@ -1956,13 +1937,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1956 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ 1937 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
1957 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ 1938 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
1958 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ 1939 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
1959 if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0) 1940 if (device->chipset > 0x50 && device->chipset < 0xa0)
1960 xf_emit(ctx, 2, 0); /* ffffffff */ 1941 xf_emit(ctx, 2, 0); /* ffffffff */
1961 else 1942 else
1962 xf_emit(ctx, 1, 0); /* ffffffff */ 1943 xf_emit(ctx, 1, 0); /* ffffffff */
1963 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ 1944 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
1964 /* SEEK */ 1945 /* SEEK */
1965 if (IS_NVA3F(dev_priv->chipset)) { 1946 if (IS_NVA3F(device->chipset)) {
1966 xf_emit(ctx, 0x10, 0); /* 0? */ 1947 xf_emit(ctx, 0x10, 0); /* 0? */
1967 xf_emit(ctx, 2, 0); /* weird... */ 1948 xf_emit(ctx, 2, 0); /* weird... */
1968 xf_emit(ctx, 2, 0); /* RO */ 1949 xf_emit(ctx, 2, 0); /* RO */
@@ -1975,7 +1956,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1975 xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ 1956 xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
1976 xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ 1957 xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
1977 xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ 1958 xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
1978 if (dev_priv->chipset >= 0xa0) 1959 if (device->chipset >= 0xa0)
1979 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ 1960 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
1980 /* SEEK */ 1961 /* SEEK */
1981 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ 1962 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
@@ -2013,23 +1994,23 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2013 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ 1994 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
2014 xf_emit(ctx, 3, 0); /* f/1f */ 1995 xf_emit(ctx, 3, 0); /* f/1f */
2015 /* SEEK */ 1996 /* SEEK */
2016 if (IS_NVA3F(dev_priv->chipset)) { 1997 if (IS_NVA3F(device->chipset)) {
2017 xf_emit(ctx, acnt, 0); /* f */ 1998 xf_emit(ctx, acnt, 0); /* f */
2018 xf_emit(ctx, 3, 0); /* f/1f */ 1999 xf_emit(ctx, 3, 0); /* f/1f */
2019 } 2000 }
2020 /* SEEK */ 2001 /* SEEK */
2021 if (IS_NVA3F(dev_priv->chipset)) 2002 if (IS_NVA3F(device->chipset))
2022 xf_emit(ctx, 2, 0); /* RO */ 2003 xf_emit(ctx, 2, 0); /* RO */
2023 else 2004 else
2024 xf_emit(ctx, 5, 0); /* RO */ 2005 xf_emit(ctx, 5, 0); /* RO */
2025 /* SEEK */ 2006 /* SEEK */
2026 xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ 2007 xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
2027 /* SEEK */ 2008 /* SEEK */
2028 if (dev_priv->chipset < 0xa0) { 2009 if (device->chipset < 0xa0) {
2029 xf_emit(ctx, 0x41, 0); /* RO */ 2010 xf_emit(ctx, 0x41, 0); /* RO */
2030 /* SEEK */ 2011 /* SEEK */
2031 xf_emit(ctx, 0x11, 0); /* RO */ 2012 xf_emit(ctx, 0x11, 0); /* RO */
2032 } else if (!IS_NVA3F(dev_priv->chipset)) 2013 } else if (!IS_NVA3F(device->chipset))
2033 xf_emit(ctx, 0x50, 0); /* RO */ 2014 xf_emit(ctx, 0x50, 0); /* RO */
2034 else 2015 else
2035 xf_emit(ctx, 0x58, 0); /* RO */ 2016 xf_emit(ctx, 0x58, 0); /* RO */
@@ -2041,7 +2022,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2041 xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ 2022 xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
2042 xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ 2023 xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
2043 /* SEEK */ 2024 /* SEEK */
2044 if (IS_NVA3F(dev_priv->chipset)) 2025 if (IS_NVA3F(device->chipset))
2045 xf_emit(ctx, 0x1d, 0); /* RO */ 2026 xf_emit(ctx, 0x1d, 0); /* RO */
2046 else 2027 else
2047 xf_emit(ctx, 0x16, 0); /* RO */ 2028 xf_emit(ctx, 0x16, 0); /* RO */
@@ -2049,21 +2030,21 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2049 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ 2030 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
2050 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ 2031 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
2051 /* SEEK */ 2032 /* SEEK */
2052 if (dev_priv->chipset < 0xa0) 2033 if (device->chipset < 0xa0)
2053 xf_emit(ctx, 8, 0); /* RO */ 2034 xf_emit(ctx, 8, 0); /* RO */
2054 else if (IS_NVA3F(dev_priv->chipset)) 2035 else if (IS_NVA3F(device->chipset))
2055 xf_emit(ctx, 0xc, 0); /* RO */ 2036 xf_emit(ctx, 0xc, 0); /* RO */
2056 else 2037 else
2057 xf_emit(ctx, 7, 0); /* RO */ 2038 xf_emit(ctx, 7, 0); /* RO */
2058 /* SEEK */ 2039 /* SEEK */
2059 xf_emit(ctx, 0xa, 0); /* RO */ 2040 xf_emit(ctx, 0xa, 0); /* RO */
2060 if (dev_priv->chipset == 0xa0) 2041 if (device->chipset == 0xa0)
2061 rep = 0xc; 2042 rep = 0xc;
2062 else 2043 else
2063 rep = 4; 2044 rep = 4;
2064 for (i = 0; i < rep; i++) { 2045 for (i = 0; i < rep; i++) {
2065 /* SEEK */ 2046 /* SEEK */
2066 if (IS_NVA3F(dev_priv->chipset)) 2047 if (IS_NVA3F(device->chipset))
2067 xf_emit(ctx, 0x20, 0); /* ffffffff */ 2048 xf_emit(ctx, 0x20, 0); /* ffffffff */
2068 xf_emit(ctx, 0x200, 0); /* ffffffff */ 2049 xf_emit(ctx, 0x200, 0); /* ffffffff */
2069 xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ 2050 xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
@@ -2077,7 +2058,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2077 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ 2058 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
2078 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2059 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2079 /* SEEK */ 2060 /* SEEK */
2080 if (IS_NVA3F(dev_priv->chipset)) 2061 if (IS_NVA3F(device->chipset))
2081 xf_emit(ctx, 7, 0); /* weird... */ 2062 xf_emit(ctx, 7, 0); /* weird... */
2082 else 2063 else
2083 xf_emit(ctx, 5, 0); /* weird... */ 2064 xf_emit(ctx, 5, 0); /* weird... */
@@ -2086,13 +2067,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2086static void 2067static void
2087nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx) 2068nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
2088{ 2069{
2089 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2070 struct nouveau_device *device = ctx->device;
2090 /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */ 2071 /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
2091 /* SEEK */ 2072 /* SEEK */
2092 xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ 2073 xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
2093 xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ 2074 xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
2094 xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ 2075 xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
2095 if (dev_priv->chipset < 0xa0) { 2076 if (device->chipset < 0xa0) {
2096 /* this is useless on everything but the original NV50, 2077 /* this is useless on everything but the original NV50,
2097 * guess they forgot to nuke it. Or just didn't bother. */ 2078 * guess they forgot to nuke it. Or just didn't bother. */
2098 xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ 2079 xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
@@ -2148,7 +2129,7 @@ nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
2148static void 2129static void
2149nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx) 2130nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2150{ 2131{
2151 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2132 struct nouveau_device *device = ctx->device;
2152 /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */ 2133 /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
2153 /* SEEK */ 2134 /* SEEK */
2154 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ 2135 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
@@ -2173,7 +2154,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2173 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ 2154 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
2174 /* SEEK */ 2155 /* SEEK */
2175 xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ 2156 xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
2176 switch (dev_priv->chipset) { 2157 switch (device->chipset) {
2177 case 0x50: 2158 case 0x50:
2178 case 0x92: 2159 case 0x92:
2179 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ 2160 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
@@ -2247,7 +2228,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2247static void 2228static void
2248nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx) 2229nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2249{ 2230{
2250 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2231 struct nouveau_device *device = ctx->device;
2251 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ 2232 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
2252 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ 2233 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
2253 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 2234 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2277,9 +2258,9 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2277 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ 2258 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
2278 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2259 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2279 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2260 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2280 if (IS_NVA3F(dev_priv->chipset)) 2261 if (IS_NVA3F(device->chipset))
2281 xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ 2262 xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
2282 else if (dev_priv->chipset >= 0xa0) 2263 else if (device->chipset >= 0xa0)
2283 xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ 2264 xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
2284 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ 2265 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
2285 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ 2266 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
@@ -2293,11 +2274,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2293 xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ 2274 xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
2294 xf_emit(ctx, 1, 0); /* 00000001 */ 2275 xf_emit(ctx, 1, 0); /* 00000001 */
2295 xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ 2276 xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
2296 if (dev_priv->chipset != 0x50) { 2277 if (device->chipset != 0x50) {
2297 xf_emit(ctx, 1, 0); /* 3ff */ 2278 xf_emit(ctx, 1, 0); /* 3ff */
2298 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ 2279 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
2299 } 2280 }
2300 if (IS_NVA3F(dev_priv->chipset)) 2281 if (IS_NVA3F(device->chipset))
2301 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ 2282 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
2302 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ 2283 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
2303 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ 2284 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
@@ -2316,11 +2297,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2316 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2297 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2317 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 2298 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
2318 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ 2299 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
2319 if (dev_priv->chipset != 0x50) 2300 if (device->chipset != 0x50)
2320 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 2301 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
2321 if (dev_priv->chipset < 0xa0) 2302 if (device->chipset < 0xa0)
2322 xf_emit(ctx, 0x1c, 0); /* RO */ 2303 xf_emit(ctx, 0x1c, 0); /* RO */
2323 else if (IS_NVA3F(dev_priv->chipset)) 2304 else if (IS_NVA3F(device->chipset))
2324 xf_emit(ctx, 0x9, 0); 2305 xf_emit(ctx, 0x9, 0);
2325 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 2306 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2326 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 2307 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2328,13 +2309,13 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2328 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ 2309 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
2329 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 2310 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
2330 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 2311 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
2331 if (dev_priv->chipset != 0x50) { 2312 if (device->chipset != 0x50) {
2332 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 2313 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
2333 xf_emit(ctx, 1, 0); /* 3ff */ 2314 xf_emit(ctx, 1, 0); /* 3ff */
2334 } 2315 }
2335 /* XXX: the following block could belong either to unk1cxx, or 2316 /* XXX: the following block could belong either to unk1cxx, or
2336 * to STRMOUT. Rather hard to tell. */ 2317 * to STRMOUT. Rather hard to tell. */
2337 if (dev_priv->chipset < 0xa0) 2318 if (device->chipset < 0xa0)
2338 xf_emit(ctx, 0x25, 0); 2319 xf_emit(ctx, 0x25, 0);
2339 else 2320 else
2340 xf_emit(ctx, 0x3b, 0); 2321 xf_emit(ctx, 0x3b, 0);
@@ -2343,18 +2324,18 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2343static void 2324static void
2344nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx) 2325nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2345{ 2326{
2346 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2327 struct nouveau_device *device = ctx->device;
2347 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ 2328 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
2348 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ 2329 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
2349 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ 2330 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
2350 if (dev_priv->chipset >= 0xa0) { 2331 if (device->chipset >= 0xa0) {
2351 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ 2332 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
2352 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ 2333 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
2353 } 2334 }
2354 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 2335 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
2355 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 2336 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
2356 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2337 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2357 if (dev_priv->chipset == 0x50) 2338 if (device->chipset == 0x50)
2358 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ 2339 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
2359 else 2340 else
2360 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ 2341 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -2365,7 +2346,7 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2365 xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ 2346 xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
2366 xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ 2347 xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
2367 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ 2348 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
2368 if (dev_priv->chipset >= 0xa0) { 2349 if (device->chipset >= 0xa0) {
2369 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ 2350 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
2370 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ 2351 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
2371 } 2352 }
@@ -2385,12 +2366,12 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2385static void 2366static void
2386nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx) 2367nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
2387{ 2368{
2388 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2369 struct nouveau_device *device = ctx->device;
2389 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ 2370 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
2390 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ 2371 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
2391 xf_emit(ctx, 1, 0); /* 00000007 */ 2372 xf_emit(ctx, 1, 0); /* 00000007 */
2392 xf_emit(ctx, 1, 0); /* 000003ff */ 2373 xf_emit(ctx, 1, 0); /* 000003ff */
2393 if (IS_NVA3F(dev_priv->chipset)) 2374 if (IS_NVA3F(device->chipset))
2394 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ 2375 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
2395 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2376 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2396} 2377}
@@ -2398,7 +2379,7 @@ nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
2398static void 2379static void
2399nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx) 2380nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2400{ 2381{
2401 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2382 struct nouveau_device *device = ctx->device;
2402 /* SEEK */ 2383 /* SEEK */
2403 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ 2384 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
2404 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ 2385 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
@@ -2416,7 +2397,7 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2416 xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ 2397 xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
2417 xf_emit(ctx, 1, 0); /* ff/3ff */ 2398 xf_emit(ctx, 1, 0); /* ff/3ff */
2418 xf_emit(ctx, 1, 0); /* 00000007 */ 2399 xf_emit(ctx, 1, 0); /* 00000007 */
2419 if (IS_NVA3F(dev_priv->chipset)) 2400 if (IS_NVA3F(device->chipset))
2420 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ 2401 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
2421 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2402 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2422} 2403}
@@ -2424,11 +2405,11 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2424static void 2405static void
2425nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) 2406nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2426{ 2407{
2427 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2408 struct nouveau_device *device = ctx->device;
2428 int magic2; 2409 int magic2;
2429 if (dev_priv->chipset == 0x50) { 2410 if (device->chipset == 0x50) {
2430 magic2 = 0x00003e60; 2411 magic2 = 0x00003e60;
2431 } else if (!IS_NVA3F(dev_priv->chipset)) { 2412 } else if (!IS_NVA3F(device->chipset)) {
2432 magic2 = 0x001ffe67; 2413 magic2 = 0x001ffe67;
2433 } else { 2414 } else {
2434 magic2 = 0x00087e67; 2415 magic2 = 0x00087e67;
@@ -2446,14 +2427,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2446 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ 2427 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
2447 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ 2428 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2448 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ 2429 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
2449 if (IS_NVA3F(dev_priv->chipset)) 2430 if (IS_NVA3F(device->chipset))
2450 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2431 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2451 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ 2432 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
2452 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ 2433 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
2453 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ 2434 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
2454 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ 2435 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
2455 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ 2436 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2456 if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset)) 2437 if (device->chipset >= 0xa0 && !IS_NVAAF(device->chipset))
2457 xf_emit(ctx, 1, 0x15); /* 000000ff */ 2438 xf_emit(ctx, 1, 0x15); /* 000000ff */
2458 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ 2439 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
2459 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ 2440 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
@@ -2462,14 +2443,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2462 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 2443 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2463 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2444 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2464 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2445 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2465 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { 2446 if (device->chipset == 0x86 || device->chipset == 0x92 || device->chipset == 0x98 || device->chipset >= 0xa0) {
2466 xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ 2447 xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
2467 xf_emit(ctx, 1, 4); /* 7 */ 2448 xf_emit(ctx, 1, 4); /* 7 */
2468 xf_emit(ctx, 1, 0x400); /* fffffff */ 2449 xf_emit(ctx, 1, 0x400); /* fffffff */
2469 xf_emit(ctx, 1, 0x300); /* ffff */ 2450 xf_emit(ctx, 1, 0x300); /* ffff */
2470 xf_emit(ctx, 1, 0x1001); /* 1fff */ 2451 xf_emit(ctx, 1, 0x1001); /* 1fff */
2471 if (dev_priv->chipset != 0xa0) { 2452 if (device->chipset != 0xa0) {
2472 if (IS_NVA3F(dev_priv->chipset)) 2453 if (IS_NVA3F(device->chipset))
2473 xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ 2454 xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
2474 else 2455 else
2475 xf_emit(ctx, 1, 0x15); /* ff */ 2456 xf_emit(ctx, 1, 0x15); /* ff */
@@ -2547,7 +2528,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2547 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2528 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2548 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ 2529 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
2549 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ 2530 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
2550 if (dev_priv->chipset >= 0xa0) { 2531 if (device->chipset >= 0xa0) {
2551 xf_emit(ctx, 2, 0); 2532 xf_emit(ctx, 2, 0);
2552 xf_emit(ctx, 1, 0x1001); 2533 xf_emit(ctx, 1, 0x1001);
2553 xf_emit(ctx, 0xb, 0); 2534 xf_emit(ctx, 0xb, 0);
@@ -2564,7 +2545,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2564 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ 2545 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
2565 xf_emit(ctx, 1, 0x11); /* 3f/7f */ 2546 xf_emit(ctx, 1, 0x11); /* 3f/7f */
2566 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2547 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2567 if (dev_priv->chipset != 0x50) { 2548 if (device->chipset != 0x50) {
2568 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ 2549 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
2569 xf_emit(ctx, 1, 0); /* 000000ff */ 2550 xf_emit(ctx, 1, 0); /* 000000ff */
2570 } 2551 }
@@ -2581,7 +2562,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2581 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2562 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2582 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2563 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2583 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ 2564 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2584 if (IS_NVA3F(dev_priv->chipset)) { 2565 if (IS_NVA3F(device->chipset)) {
2585 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ 2566 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
2586 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2567 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2587 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ 2568 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2600,7 +2581,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2600 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2581 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2601 xf_emit(ctx, 1, 0); /* 00000001 */ 2582 xf_emit(ctx, 1, 0); /* 00000001 */
2602 xf_emit(ctx, 1, 0); /* 000003ff */ 2583 xf_emit(ctx, 1, 0); /* 000003ff */
2603 } else if (dev_priv->chipset >= 0xa0) { 2584 } else if (device->chipset >= 0xa0) {
2604 xf_emit(ctx, 2, 0); /* 00000001 */ 2585 xf_emit(ctx, 2, 0); /* 00000001 */
2605 xf_emit(ctx, 1, 0); /* 00000007 */ 2586 xf_emit(ctx, 1, 0); /* 00000007 */
2606 xf_emit(ctx, 1, 0); /* 00000003 */ 2587 xf_emit(ctx, 1, 0); /* 00000003 */
@@ -2614,7 +2595,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2614 xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ 2595 xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
2615 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ 2596 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
2616 xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ 2597 xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
2617 if (dev_priv->chipset >= 0xa0) 2598 if (device->chipset >= 0xa0)
2618 xf_emit(ctx, 2, 0); /* 00000001 */ 2599 xf_emit(ctx, 2, 0); /* 00000001 */
2619 xf_emit(ctx, 1, 0); /* 000003ff */ 2600 xf_emit(ctx, 1, 0); /* 000003ff */
2620 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ 2601 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
@@ -2628,9 +2609,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2628 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ 2609 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
2629 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2610 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2630 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ 2611 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
2631 if (dev_priv->chipset >= 0xa0) 2612 if (device->chipset >= 0xa0)
2632 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ 2613 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
2633 if (IS_NVA3F(dev_priv->chipset)) { 2614 if (IS_NVA3F(device->chipset)) {
2634 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ 2615 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
2635 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2616 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2636 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ 2617 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
@@ -2659,9 +2640,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2659static void 2640static void
2660nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx) 2641nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2661{ 2642{
2662 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2643 struct nouveau_device *device = ctx->device;
2663 int magic3; 2644 int magic3;
2664 switch (dev_priv->chipset) { 2645 switch (device->chipset) {
2665 case 0x50: 2646 case 0x50:
2666 magic3 = 0x1000; 2647 magic3 = 0x1000;
2667 break; 2648 break;
@@ -2681,16 +2662,16 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2681 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2662 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2682 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2663 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2683 xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ 2664 xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
2684 if (IS_NVA3F(dev_priv->chipset)) 2665 if (IS_NVA3F(device->chipset))
2685 xf_emit(ctx, 0x1f, 0); /* ffffffff */ 2666 xf_emit(ctx, 0x1f, 0); /* ffffffff */
2686 else if (dev_priv->chipset >= 0xa0) 2667 else if (device->chipset >= 0xa0)
2687 xf_emit(ctx, 0x0f, 0); /* ffffffff */ 2668 xf_emit(ctx, 0x0f, 0); /* ffffffff */
2688 else 2669 else
2689 xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ 2670 xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
2690 xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ 2671 xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
2691 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ 2672 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
2692 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ 2673 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
2693 if (dev_priv->chipset >= 0xa0) 2674 if (device->chipset >= 0xa0)
2694 xf_emit(ctx, 1, 0x03020100); /* ffffffff */ 2675 xf_emit(ctx, 1, 0x03020100); /* ffffffff */
2695 else 2676 else
2696 xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ 2677 xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
@@ -2733,11 +2714,11 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2733 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2714 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2734 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2715 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2735 xf_emit(ctx, 1, 0); /* 111/113 */ 2716 xf_emit(ctx, 1, 0); /* 111/113 */
2736 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) 2717 if (device->chipset == 0x94 || device->chipset == 0x96)
2737 xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ 2718 xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
2738 else if (dev_priv->chipset < 0xa0) 2719 else if (device->chipset < 0xa0)
2739 xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ 2720 xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
2740 else if (!IS_NVA3F(dev_priv->chipset)) 2721 else if (!IS_NVA3F(device->chipset))
2741 xf_emit(ctx, 0x210, 0); /* ffffffff */ 2722 xf_emit(ctx, 0x210, 0); /* ffffffff */
2742 else 2723 else
2743 xf_emit(ctx, 0x410, 0); /* ffffffff */ 2724 xf_emit(ctx, 0x410, 0); /* ffffffff */
@@ -2751,12 +2732,12 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2751static void 2732static void
2752nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx) 2733nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2753{ 2734{
2754 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2735 struct nouveau_device *device = ctx->device;
2755 int magic1, magic2; 2736 int magic1, magic2;
2756 if (dev_priv->chipset == 0x50) { 2737 if (device->chipset == 0x50) {
2757 magic1 = 0x3ff; 2738 magic1 = 0x3ff;
2758 magic2 = 0x00003e60; 2739 magic2 = 0x00003e60;
2759 } else if (!IS_NVA3F(dev_priv->chipset)) { 2740 } else if (!IS_NVA3F(device->chipset)) {
2760 magic1 = 0x7ff; 2741 magic1 = 0x7ff;
2761 magic2 = 0x001ffe67; 2742 magic2 = 0x001ffe67;
2762 } else { 2743 } else {
@@ -2766,7 +2747,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2766 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2747 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2767 xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ 2748 xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
2768 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2749 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2769 if (IS_NVA3F(dev_priv->chipset)) 2750 if (IS_NVA3F(device->chipset))
2770 xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ 2751 xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
2771 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2752 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2772 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ 2753 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
@@ -2800,11 +2781,11 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2800 xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ 2781 xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
2801 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2782 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2802 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2783 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2803 if (IS_NVA3F(dev_priv->chipset)) { 2784 if (IS_NVA3F(device->chipset)) {
2804 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ 2785 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
2805 xf_emit(ctx, 1, 0); /* 00000003 */ 2786 xf_emit(ctx, 1, 0); /* 00000003 */
2806 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ 2787 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
2807 } else if (dev_priv->chipset >= 0xa0) { 2788 } else if (device->chipset >= 0xa0) {
2808 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ 2789 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
2809 xf_emit(ctx, 1, 0); /* 00000003 */ 2790 xf_emit(ctx, 1, 0); /* 00000003 */
2810 } else { 2791 } else {
@@ -2818,7 +2799,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2818 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ 2799 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
2819 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ 2800 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
2820 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ 2801 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
2821 if (IS_NVA3F(dev_priv->chipset)) { 2802 if (IS_NVA3F(device->chipset)) {
2822 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ 2803 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
2823 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2804 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2824 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ 2805 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2846,7 +2827,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2846 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ 2827 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
2847 xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ 2828 xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
2848 xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ 2829 xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
2849 if (IS_NVA3F(dev_priv->chipset)) 2830 if (IS_NVA3F(device->chipset))
2850 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2831 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2851 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2832 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2852 xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ 2833 xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
@@ -2870,9 +2851,9 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2870 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ 2851 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2871 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ 2852 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2872 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ 2853 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2873 if (IS_NVA3F(dev_priv->chipset)) 2854 if (IS_NVA3F(device->chipset))
2874 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2855 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2875 if (dev_priv->chipset == 0x50) 2856 if (device->chipset == 0x50)
2876 xf_emit(ctx, 1, 0); /* ff */ 2857 xf_emit(ctx, 1, 0); /* ff */
2877 else 2858 else
2878 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ 2859 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
@@ -2907,7 +2888,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2907 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2888 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2908 xf_emit(ctx, 1, 0); /* 00000007 */ 2889 xf_emit(ctx, 1, 0); /* 00000007 */
2909 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2890 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2910 if (IS_NVA3F(dev_priv->chipset)) 2891 if (IS_NVA3F(device->chipset))
2911 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2892 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2912 xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ 2893 xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
2913 xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ 2894 xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
@@ -2945,7 +2926,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2945 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ 2926 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
2946 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ 2927 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
2947 xf_emit(ctx, 1, 0); /* 00000007 */ 2928 xf_emit(ctx, 1, 0); /* 00000007 */
2948 if (IS_NVA3F(dev_priv->chipset)) 2929 if (IS_NVA3F(device->chipset))
2949 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2930 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2950 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2931 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2951 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2932 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
@@ -2974,7 +2955,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2974 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ 2955 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
2975 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2956 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2976 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2957 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2977 if (IS_NVA3F(dev_priv->chipset)) 2958 if (IS_NVA3F(device->chipset))
2978 xf_emit(ctx, 1, 0); /* 00000001 */ 2959 xf_emit(ctx, 1, 0); /* 00000001 */
2979 xf_emit(ctx, 1, 0); /* ffff0ff3 */ 2960 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2980 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ 2961 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
@@ -2988,14 +2969,14 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2988 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ 2969 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
2989 xf_emit(ctx, 1, 0); /* 7 */ 2970 xf_emit(ctx, 1, 0); /* 7 */
2990 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2971 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2991 if (IS_NVA3F(dev_priv->chipset)) { 2972 if (IS_NVA3F(device->chipset)) {
2992 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ 2973 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
2993 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2974 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2994 } 2975 }
2995 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2976 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2996 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 2977 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2997 xf_emit(ctx, 1, 0); /* ffff0ff3 */ 2978 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2998 if (dev_priv->chipset >= 0xa0) 2979 if (device->chipset >= 0xa0)
2999 xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ 2980 xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
3000 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2981 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
3001 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ 2982 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
@@ -3012,12 +2993,12 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3012 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ 2993 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
3013 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 2994 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
3014 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2995 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
3015 if (IS_NVA3F(dev_priv->chipset)) { 2996 if (IS_NVA3F(device->chipset)) {
3016 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2997 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3017 xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ 2998 xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
3018 } 2999 }
3019 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 3000 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
3020 if (dev_priv->chipset >= 0xa0) { 3001 if (device->chipset >= 0xa0) {
3021 xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ 3002 xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
3022 xf_emit(ctx, 1, 0xfac6881); /* fffffff */ 3003 xf_emit(ctx, 1, 0xfac6881); /* fffffff */
3023 xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ 3004 xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
@@ -3027,7 +3008,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3027 xf_emit(ctx, 2, 0); /* 7, f */ 3008 xf_emit(ctx, 2, 0); /* 7, f */
3028 xf_emit(ctx, 1, 1); /* 1 */ 3009 xf_emit(ctx, 1, 1); /* 1 */
3029 xf_emit(ctx, 1, 0); /* 7/f */ 3010 xf_emit(ctx, 1, 0); /* 7/f */
3030 if (IS_NVA3F(dev_priv->chipset)) 3011 if (IS_NVA3F(device->chipset))
3031 xf_emit(ctx, 0x9, 0); /* 1 */ 3012 xf_emit(ctx, 0x9, 0); /* 1 */
3032 else 3013 else
3033 xf_emit(ctx, 0x8, 0); /* 1 */ 3014 xf_emit(ctx, 0x8, 0); /* 1 */
@@ -3041,7 +3022,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3041 xf_emit(ctx, 1, 0x11); /* 7f */ 3022 xf_emit(ctx, 1, 0x11); /* 7f */
3042 xf_emit(ctx, 1, 1); /* 1 */ 3023 xf_emit(ctx, 1, 1); /* 1 */
3043 xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ 3024 xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
3044 if (IS_NVA3F(dev_priv->chipset)) { 3025 if (IS_NVA3F(device->chipset)) {
3045 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ 3026 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
3046 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3027 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3047 } 3028 }
@@ -3051,15 +3032,15 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3051static void 3032static void
3052nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx) 3033nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3053{ 3034{
3054 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3035 struct nouveau_device *device = ctx->device;
3055 xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ 3036 xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
3056 if (dev_priv->chipset != 0x50) 3037 if (device->chipset != 0x50)
3057 xf_emit(ctx, 1, 0); /* 3 */ 3038 xf_emit(ctx, 1, 0); /* 3 */
3058 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ 3039 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
3059 xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ 3040 xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
3060 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ 3041 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
3061 xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ 3042 xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
3062 if (dev_priv->chipset == 0x50) 3043 if (device->chipset == 0x50)
3063 xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ 3044 xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
3064 else 3045 else
3065 xf_emit(ctx, 2, 0); /* 3ff, 1 */ 3046 xf_emit(ctx, 2, 0); /* 3ff, 1 */
@@ -3071,13 +3052,13 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3071 xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ 3052 xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
3072 xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ 3053 xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
3073 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ 3054 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
3074 if (dev_priv->chipset == 0x50) { 3055 if (device->chipset == 0x50) {
3075 xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ 3056 xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
3076 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ 3057 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
3077 xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ 3058 xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
3078 xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ 3059 xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
3079 xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ 3060 xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
3080 } else if (!IS_NVAAF(dev_priv->chipset)) { 3061 } else if (!IS_NVAAF(device->chipset)) {
3081 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ 3062 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
3082 xf_emit(ctx, 1, 0); /* 00000003 */ 3063 xf_emit(ctx, 1, 0); /* 00000003 */
3083 xf_emit(ctx, 1, 0); /* 000003ff */ 3064 xf_emit(ctx, 1, 0); /* 000003ff */
@@ -3097,7 +3078,7 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3097static void 3078static void
3098nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx) 3079nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3099{ 3080{
3100 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3081 struct nouveau_device *device = ctx->device;
3101 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 3082 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
3102 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 3083 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
3103 xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ 3084 xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
@@ -3109,7 +3090,7 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3109 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 3090 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
3110 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ 3091 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
3111 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ 3092 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
3112 if (IS_NVA3F(dev_priv->chipset)) 3093 if (IS_NVA3F(device->chipset))
3113 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3094 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3114 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ 3095 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
3115 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 3096 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
@@ -3136,8 +3117,8 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3136static void 3117static void
3137nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) 3118nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
3138{ 3119{
3139 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3120 struct nouveau_device *device = ctx->device;
3140 if (dev_priv->chipset < 0xa0) { 3121 if (device->chipset < 0xa0) {
3141 nv50_graph_construct_xfer_unk84xx(ctx); 3122 nv50_graph_construct_xfer_unk84xx(ctx);
3142 nv50_graph_construct_xfer_tprop(ctx); 3123 nv50_graph_construct_xfer_tprop(ctx);
3143 nv50_graph_construct_xfer_tex(ctx); 3124 nv50_graph_construct_xfer_tex(ctx);
@@ -3153,9 +3134,9 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
3153static void 3134static void
3154nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx) 3135nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3155{ 3136{
3156 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3137 struct nouveau_device *device = ctx->device;
3157 int i, mpcnt = 2; 3138 int i, mpcnt = 2;
3158 switch (dev_priv->chipset) { 3139 switch (device->chipset) {
3159 case 0x98: 3140 case 0x98:
3160 case 0xaa: 3141 case 0xaa:
3161 mpcnt = 1; 3142 mpcnt = 1;
@@ -3182,34 +3163,34 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3182 xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ 3163 xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
3183 xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ 3164 xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
3184 xf_emit(ctx, 1, 0x04000400); /* ffffffff */ 3165 xf_emit(ctx, 1, 0x04000400); /* ffffffff */
3185 if (dev_priv->chipset >= 0xa0) 3166 if (device->chipset >= 0xa0)
3186 xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ 3167 xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
3187 xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ 3168 xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
3188 xf_emit(ctx, 1, 0); /* ff/3ff */ 3169 xf_emit(ctx, 1, 0); /* ff/3ff */
3189 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 3170 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
3190 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) { 3171 if (device->chipset == 0x86 || device->chipset == 0x98 || device->chipset == 0xa8 || IS_NVAAF(device->chipset)) {
3191 xf_emit(ctx, 1, 0xe00); /* 7fff */ 3172 xf_emit(ctx, 1, 0xe00); /* 7fff */
3192 xf_emit(ctx, 1, 0x1e00); /* 7fff */ 3173 xf_emit(ctx, 1, 0x1e00); /* 7fff */
3193 } 3174 }
3194 xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ 3175 xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
3195 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ 3176 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
3196 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 3177 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
3197 if (dev_priv->chipset == 0x50) 3178 if (device->chipset == 0x50)
3198 xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ 3179 xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
3199 xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ 3180 xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
3200 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 3181 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
3201 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ 3182 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
3202 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ 3183 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
3203 if (IS_NVAAF(dev_priv->chipset)) 3184 if (IS_NVAAF(device->chipset))
3204 xf_emit(ctx, 0xb, 0); /* RO */ 3185 xf_emit(ctx, 0xb, 0); /* RO */
3205 else if (dev_priv->chipset >= 0xa0) 3186 else if (device->chipset >= 0xa0)
3206 xf_emit(ctx, 0xc, 0); /* RO */ 3187 xf_emit(ctx, 0xc, 0); /* RO */
3207 else 3188 else
3208 xf_emit(ctx, 0xa, 0); /* RO */ 3189 xf_emit(ctx, 0xa, 0); /* RO */
3209 } 3190 }
3210 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 3191 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
3211 xf_emit(ctx, 1, 0); /* ff/3ff */ 3192 xf_emit(ctx, 1, 0); /* ff/3ff */
3212 if (dev_priv->chipset >= 0xa0) { 3193 if (device->chipset >= 0xa0) {
3213 xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ 3194 xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
3214 } 3195 }
3215 xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ 3196 xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
@@ -3223,7 +3204,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3223 xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ 3204 xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
3224 xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ 3205 xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
3225 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ 3206 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
3226 if (IS_NVA3F(dev_priv->chipset)) 3207 if (IS_NVA3F(device->chipset))
3227 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3208 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3228 xf_emit(ctx, 1, 0); /* ff/3ff */ 3209 xf_emit(ctx, 1, 0); /* ff/3ff */
3229 xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ 3210 xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
@@ -3238,7 +3219,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3238 xf_emit(ctx, 1, 0); /* 00000007 */ 3219 xf_emit(ctx, 1, 0); /* 00000007 */
3239 xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ 3220 xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
3240 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ 3221 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
3241 if (IS_NVA3F(dev_priv->chipset)) 3222 if (IS_NVA3F(device->chipset))
3242 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ 3223 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
3243 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 3224 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
3244 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 3225 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
@@ -3253,7 +3234,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3253 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ 3234 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
3254 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ 3235 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
3255 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ 3236 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
3256 if (IS_NVA3F(dev_priv->chipset)) { 3237 if (IS_NVA3F(device->chipset)) {
3257 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ 3238 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
3258 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ 3239 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
3259 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ 3240 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
@@ -3268,11 +3249,11 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3268 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ 3249 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
3269 xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ 3250 xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
3270 /* XXX: demagic this part some day */ 3251 /* XXX: demagic this part some day */
3271 if (dev_priv->chipset == 0x50) 3252 if (device->chipset == 0x50)
3272 xf_emit(ctx, 0x3a0, 0); 3253 xf_emit(ctx, 0x3a0, 0);
3273 else if (dev_priv->chipset < 0x94) 3254 else if (device->chipset < 0x94)
3274 xf_emit(ctx, 0x3a2, 0); 3255 xf_emit(ctx, 0x3a2, 0);
3275 else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) 3256 else if (device->chipset == 0x98 || device->chipset == 0xaa)
3276 xf_emit(ctx, 0x39f, 0); 3257 xf_emit(ctx, 0x39f, 0);
3277 else 3258 else
3278 xf_emit(ctx, 0x3a3, 0); 3259 xf_emit(ctx, 0x3a3, 0);
@@ -3285,15 +3266,15 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3285static void 3266static void
3286nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) 3267nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
3287{ 3268{
3288 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3269 struct nouveau_device *device = ctx->device;
3289 int i; 3270 int i;
3290 uint32_t offset; 3271 u32 offset;
3291 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 3272 u32 units = nv_rd32 (ctx->device, 0x1540);
3292 int size = 0; 3273 int size = 0;
3293 3274
3294 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; 3275 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
3295 3276
3296 if (dev_priv->chipset < 0xa0) { 3277 if (device->chipset < 0xa0) {
3297 for (i = 0; i < 8; i++) { 3278 for (i = 0; i < 8; i++) {
3298 ctx->ctxvals_pos = offset + i; 3279 ctx->ctxvals_pos = offset + i;
3299 /* that little bugger belongs to csched. No idea 3280 /* that little bugger belongs to csched. No idea
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index b19a406e55d9..c12e7668dbfe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -22,13 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include <core/mm.h>
28#include "nvc0.h" 25#include "nvc0.h"
29 26
30void 27void
31nv_icmd(struct drm_device *priv, u32 icmd, u32 data) 28nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data)
32{ 29{
33 nv_wr32(priv, 0x400204, data); 30 nv_wr32(priv, 0x400204, data);
34 nv_wr32(priv, 0x400200, icmd); 31 nv_wr32(priv, 0x400200, icmd);
@@ -36,21 +33,22 @@ nv_icmd(struct drm_device *priv, u32 icmd, u32 data)
36} 33}
37 34
38int 35int
39nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv, 36nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
40 struct nvc0_grctx *info)
41{ 37{
38 struct nouveau_bar *bar = nouveau_bar(priv);
39 struct nouveau_object *parent = nv_object(priv);
42 struct nouveau_gpuobj *chan; 40 struct nouveau_gpuobj *chan;
43 u32 size = (0x80000 + oprv->size + 4095) & ~4095; 41 u32 size = (0x80000 + priv->size + 4095) & ~4095;
44 int ret, i; 42 int ret, i;
45 43
46 /* allocate memory to for a "channel", which we'll use to generate 44 /* allocate memory to for a "channel", which we'll use to generate
47 * the default context values 45 * the default context values
48 */ 46 */
49 ret = nouveau_gpuobj_new(priv, NULL, size, 0x1000, 47 ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
50 NVOBJ_FLAG_ZERO_ALLOC, &info->chan); 48 NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
51 chan = info->chan; 49 chan = info->chan;
52 if (ret) { 50 if (ret) {
53 NV_ERROR(priv, "failed to allocate channel memory, %d\n", ret); 51 nv_error(priv, "failed to allocate channel memory, %d\n", ret);
54 return ret; 52 return ret;
55 } 53 }
56 54
@@ -75,32 +73,31 @@ nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv,
75 nv_wo32(chan, 0x0210, 0x00080004); 73 nv_wo32(chan, 0x0210, 0x00080004);
76 nv_wo32(chan, 0x0214, 0x00000000); 74 nv_wo32(chan, 0x0214, 0x00000000);
77 75
78 nvimem_flush(priv); 76 bar->flush(bar);
79 77
80 nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8); 78 nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
81 nv_wr32(priv, 0x100cbc, 0x80000001); 79 nv_wr32(priv, 0x100cbc, 0x80000001);
82 nv_wait(priv, 0x100c80, 0x00008000, 0x00008000); 80 nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
83 81
84 /* setup default state for mmio list construction */ 82 /* setup default state for mmio list construction */
85 info->dev = priv; 83 info->data = priv->mmio_data;
86 info->data = oprv->mmio_data; 84 info->mmio = priv->mmio_list;
87 info->mmio = oprv->mmio_list;
88 info->addr = 0x2000 + (i * 8); 85 info->addr = 0x2000 + (i * 8);
89 info->priv = oprv; 86 info->priv = priv;
90 info->buffer_nr = 0; 87 info->buffer_nr = 0;
91 88
92 if (oprv->firmware) { 89 if (priv->firmware) {
93 nv_wr32(priv, 0x409840, 0x00000030); 90 nv_wr32(priv, 0x409840, 0x00000030);
94 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12); 91 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
95 nv_wr32(priv, 0x409504, 0x00000003); 92 nv_wr32(priv, 0x409504, 0x00000003);
96 if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010)) 93 if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
97 NV_ERROR(priv, "load_ctx timeout\n"); 94 nv_error(priv, "load_ctx timeout\n");
98 95
99 nv_wo32(chan, 0x8001c, 1); 96 nv_wo32(chan, 0x8001c, 1);
100 nv_wo32(chan, 0x80020, 0); 97 nv_wo32(chan, 0x80020, 0);
101 nv_wo32(chan, 0x80028, 0); 98 nv_wo32(chan, 0x80028, 0);
102 nv_wo32(chan, 0x8002c, 0); 99 nv_wo32(chan, 0x8002c, 0);
103 nvimem_flush(priv); 100 bar->flush(bar);
104 return 0; 101 return 0;
105 } 102 }
106 103
@@ -109,7 +106,7 @@ nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv,
109 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12); 106 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
110 nv_wr32(priv, 0x409504, 0x00000001); 107 nv_wr32(priv, 0x409504, 0x00000001);
111 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) { 108 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
112 NV_ERROR(priv, "HUB_SET_CHAN timeout\n"); 109 nv_error(priv, "HUB_SET_CHAN timeout\n");
113 nvc0_graph_ctxctl_debug(priv); 110 nvc0_graph_ctxctl_debug(priv);
114 nouveau_gpuobj_ref(NULL, &info->chan); 111 nouveau_gpuobj_ref(NULL, &info->chan);
115 return -EBUSY; 112 return -EBUSY;
@@ -135,6 +132,8 @@ nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
135void 132void
136nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf) 133nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
137{ 134{
135 struct nvc0_graph_priv *priv = info->priv;
136
138 info->mmio->addr = addr; 137 info->mmio->addr = addr;
139 info->mmio->data = data; 138 info->mmio->data = data;
140 info->mmio->shift = shift; 139 info->mmio->shift = shift;
@@ -143,7 +142,7 @@ nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
143 142
144 if (shift) 143 if (shift)
145 data |= info->buffer[buf] >> shift; 144 data |= info->buffer[buf] >> shift;
146 nv_wr32(info->dev, addr, data); 145 nv_wr32(priv, addr, data);
147} 146}
148 147
149int 148int
@@ -153,11 +152,11 @@ nvc0_grctx_fini(struct nvc0_grctx *info)
153 int i; 152 int i;
154 153
155 if (priv->firmware) { 154 if (priv->firmware) {
156 nv_wr32(info->dev, 0x409840, 0x00000003); 155 nv_wr32(priv, 0x409840, 0x00000003);
157 nv_wr32(info->dev, 0x409500, 0x80000000 | info->chan->addr >> 12); 156 nv_wr32(priv, 0x409500, 0x80000000 | info->chan->addr >> 12);
158 nv_wr32(info->dev, 0x409504, 0x00000009); 157 nv_wr32(priv, 0x409504, 0x00000009);
159 if (!nv_wait(info->dev, 0x409800, 0x00000001, 0x00000000)) { 158 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000000)) {
160 NV_ERROR(info->dev, "unload_ctx timeout\n"); 159 nv_error(priv, "unload_ctx timeout\n");
161 return -EBUSY; 160 return -EBUSY;
162 } 161 }
163 162
@@ -165,12 +164,12 @@ nvc0_grctx_fini(struct nvc0_grctx *info)
165 } 164 }
166 165
167 /* HUB_FUC(CTX_SAVE) */ 166 /* HUB_FUC(CTX_SAVE) */
168 nv_wr32(info->dev, 0x409840, 0x80000000); 167 nv_wr32(priv, 0x409840, 0x80000000);
169 nv_wr32(info->dev, 0x409500, 0x80000000 | info->chan->addr >> 12); 168 nv_wr32(priv, 0x409500, 0x80000000 | info->chan->addr >> 12);
170 nv_wr32(info->dev, 0x409504, 0x00000002); 169 nv_wr32(priv, 0x409504, 0x00000002);
171 if (!nv_wait(info->dev, 0x409800, 0x80000000, 0x80000000)) { 170 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
172 NV_ERROR(info->dev, "HUB_CTX_SAVE timeout\n"); 171 nv_error(priv, "HUB_CTX_SAVE timeout\n");
173 nvc0_graph_ctxctl_debug(info->dev); 172 nvc0_graph_ctxctl_debug(priv);
174 return -EBUSY; 173 return -EBUSY;
175 } 174 }
176 175
@@ -186,7 +185,7 @@ save:
186} 185}
187 186
188static void 187static void
189nvc0_grctx_generate_9097(struct drm_device *priv) 188nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv)
190{ 189{
191 u32 fermi = nvc0_graph_class(priv); 190 u32 fermi = nvc0_graph_class(priv);
192 u32 mthd; 191 u32 mthd;
@@ -1343,7 +1342,7 @@ nvc0_grctx_generate_9097(struct drm_device *priv)
1343} 1342}
1344 1343
1345static void 1344static void
1346nvc0_grctx_generate_9197(struct drm_device *priv) 1345nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv)
1347{ 1346{
1348 u32 fermi = nvc0_graph_class(priv); 1347 u32 fermi = nvc0_graph_class(priv);
1349 u32 mthd; 1348 u32 mthd;
@@ -1356,7 +1355,7 @@ nvc0_grctx_generate_9197(struct drm_device *priv)
1356} 1355}
1357 1356
1358static void 1357static void
1359nvc0_grctx_generate_9297(struct drm_device *priv) 1358nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv)
1360{ 1359{
1361 u32 fermi = nvc0_graph_class(priv); 1360 u32 fermi = nvc0_graph_class(priv);
1362 u32 mthd; 1361 u32 mthd;
@@ -1374,7 +1373,7 @@ nvc0_grctx_generate_9297(struct drm_device *priv)
1374} 1373}
1375 1374
1376static void 1375static void
1377nvc0_grctx_generate_902d(struct drm_device *priv) 1376nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv)
1378{ 1377{
1379 nv_mthd(priv, 0x902d, 0x0200, 0x000000cf); 1378 nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
1380 nv_mthd(priv, 0x902d, 0x0204, 0x00000001); 1379 nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
@@ -1396,7 +1395,7 @@ nvc0_grctx_generate_902d(struct drm_device *priv)
1396} 1395}
1397 1396
1398static void 1397static void
1399nvc0_grctx_generate_9039(struct drm_device *priv) 1398nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv)
1400{ 1399{
1401 nv_mthd(priv, 0x9039, 0x030c, 0x00000000); 1400 nv_mthd(priv, 0x9039, 0x030c, 0x00000000);
1402 nv_mthd(priv, 0x9039, 0x0310, 0x00000000); 1401 nv_mthd(priv, 0x9039, 0x0310, 0x00000000);
@@ -1409,12 +1408,11 @@ nvc0_grctx_generate_9039(struct drm_device *priv)
1409} 1408}
1410 1409
1411static void 1410static void
1412nvc0_grctx_generate_90c0(struct drm_device *priv) 1411nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
1413{ 1412{
1414 struct drm_nouveau_private *dev_priv = priv->dev_private;
1415 int i; 1413 int i;
1416 1414
1417 for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) { 1415 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
1418 nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000); 1416 nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
1419 nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000); 1417 nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
1420 nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000); 1418 nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
@@ -1430,7 +1428,7 @@ nvc0_grctx_generate_90c0(struct drm_device *priv)
1430 nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000); 1428 nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
1431 nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000); 1429 nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
1432 nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000); 1430 nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
1433 for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) { 1431 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
1434 nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000); 1432 nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
1435 nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000); 1433 nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
1436 nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040); 1434 nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
@@ -1458,7 +1456,7 @@ nvc0_grctx_generate_90c0(struct drm_device *priv)
1458} 1456}
1459 1457
1460static void 1458static void
1461nvc0_grctx_generate_dispatch(struct drm_device *priv) 1459nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv)
1462{ 1460{
1463 int i; 1461 int i;
1464 1462
@@ -1511,7 +1509,7 @@ nvc0_grctx_generate_dispatch(struct drm_device *priv)
1511} 1509}
1512 1510
1513static void 1511static void
1514nvc0_grctx_generate_macro(struct drm_device *priv) 1512nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv)
1515{ 1513{
1516 nv_wr32(priv, 0x404404, 0x00000000); 1514 nv_wr32(priv, 0x404404, 0x00000000);
1517 nv_wr32(priv, 0x404408, 0x00000000); 1515 nv_wr32(priv, 0x404408, 0x00000000);
@@ -1536,7 +1534,7 @@ nvc0_grctx_generate_macro(struct drm_device *priv)
1536} 1534}
1537 1535
1538static void 1536static void
1539nvc0_grctx_generate_m2mf(struct drm_device *priv) 1537nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv)
1540{ 1538{
1541 nv_wr32(priv, 0x404604, 0x00000015); 1539 nv_wr32(priv, 0x404604, 0x00000015);
1542 nv_wr32(priv, 0x404608, 0x00000000); 1540 nv_wr32(priv, 0x404608, 0x00000000);
@@ -1600,7 +1598,7 @@ nvc0_grctx_generate_m2mf(struct drm_device *priv)
1600} 1598}
1601 1599
1602static void 1600static void
1603nvc0_grctx_generate_unk47xx(struct drm_device *priv) 1601nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv)
1604{ 1602{
1605 nv_wr32(priv, 0x404700, 0x00000000); 1603 nv_wr32(priv, 0x404700, 0x00000000);
1606 nv_wr32(priv, 0x404704, 0x00000000); 1604 nv_wr32(priv, 0x404704, 0x00000000);
@@ -1627,16 +1625,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *priv)
1627} 1625}
1628 1626
1629static void 1627static void
1630nvc0_grctx_generate_shaders(struct drm_device *priv) 1628nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
1631{ 1629{
1632 struct drm_nouveau_private *dev_priv = priv->dev_private;
1633 1630
1634 if (dev_priv->chipset == 0xd9) { 1631 if (nv_device(priv)->chipset == 0xd9) {
1635 nv_wr32(priv, 0x405800, 0x0f8000bf); 1632 nv_wr32(priv, 0x405800, 0x0f8000bf);
1636 nv_wr32(priv, 0x405830, 0x02180218); 1633 nv_wr32(priv, 0x405830, 0x02180218);
1637 nv_wr32(priv, 0x405834, 0x08000000); 1634 nv_wr32(priv, 0x405834, 0x08000000);
1638 } else 1635 } else
1639 if (dev_priv->chipset == 0xc1) { 1636 if (nv_device(priv)->chipset == 0xc1) {
1640 nv_wr32(priv, 0x405800, 0x0f8000bf); 1637 nv_wr32(priv, 0x405800, 0x0f8000bf);
1641 nv_wr32(priv, 0x405830, 0x02180218); 1638 nv_wr32(priv, 0x405830, 0x02180218);
1642 nv_wr32(priv, 0x405834, 0x00000000); 1639 nv_wr32(priv, 0x405834, 0x00000000);
@@ -1657,7 +1654,7 @@ nvc0_grctx_generate_shaders(struct drm_device *priv)
1657} 1654}
1658 1655
1659static void 1656static void
1660nvc0_grctx_generate_unk60xx(struct drm_device *priv) 1657nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv)
1661{ 1658{
1662 nv_wr32(priv, 0x406020, 0x000103c1); 1659 nv_wr32(priv, 0x406020, 0x000103c1);
1663 nv_wr32(priv, 0x406028, 0x00000001); 1660 nv_wr32(priv, 0x406028, 0x00000001);
@@ -1667,25 +1664,24 @@ nvc0_grctx_generate_unk60xx(struct drm_device *priv)
1667} 1664}
1668 1665
1669static void 1666static void
1670nvc0_grctx_generate_unk64xx(struct drm_device *priv) 1667nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
1671{ 1668{
1672 struct drm_nouveau_private *dev_priv = priv->dev_private;
1673 1669
1674 nv_wr32(priv, 0x4064a8, 0x00000000); 1670 nv_wr32(priv, 0x4064a8, 0x00000000);
1675 nv_wr32(priv, 0x4064ac, 0x00003fff); 1671 nv_wr32(priv, 0x4064ac, 0x00003fff);
1676 nv_wr32(priv, 0x4064b4, 0x00000000); 1672 nv_wr32(priv, 0x4064b4, 0x00000000);
1677 nv_wr32(priv, 0x4064b8, 0x00000000); 1673 nv_wr32(priv, 0x4064b8, 0x00000000);
1678 if (dev_priv->chipset == 0xd9) 1674 if (nv_device(priv)->chipset == 0xd9)
1679 nv_wr32(priv, 0x4064bc, 0x00000000); 1675 nv_wr32(priv, 0x4064bc, 0x00000000);
1680 if (dev_priv->chipset == 0xc1 || 1676 if (nv_device(priv)->chipset == 0xc1 ||
1681 dev_priv->chipset == 0xd9) { 1677 nv_device(priv)->chipset == 0xd9) {
1682 nv_wr32(priv, 0x4064c0, 0x80140078); 1678 nv_wr32(priv, 0x4064c0, 0x80140078);
1683 nv_wr32(priv, 0x4064c4, 0x0086ffff); 1679 nv_wr32(priv, 0x4064c4, 0x0086ffff);
1684 } 1680 }
1685} 1681}
1686 1682
1687static void 1683static void
1688nvc0_grctx_generate_tpbus(struct drm_device *priv) 1684nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv)
1689{ 1685{
1690 nv_wr32(priv, 0x407804, 0x00000023); 1686 nv_wr32(priv, 0x407804, 0x00000023);
1691 nv_wr32(priv, 0x40780c, 0x0a418820); 1687 nv_wr32(priv, 0x40780c, 0x0a418820);
@@ -1698,7 +1694,7 @@ nvc0_grctx_generate_tpbus(struct drm_device *priv)
1698} 1694}
1699 1695
1700static void 1696static void
1701nvc0_grctx_generate_ccache(struct drm_device *priv) 1697nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv)
1702{ 1698{
1703 nv_wr32(priv, 0x408000, 0x00000000); 1699 nv_wr32(priv, 0x408000, 0x00000000);
1704 nv_wr32(priv, 0x408004, 0x00000000); 1700 nv_wr32(priv, 0x408004, 0x00000000);
@@ -1711,10 +1707,9 @@ nvc0_grctx_generate_ccache(struct drm_device *priv)
1711} 1707}
1712 1708
1713static void 1709static void
1714nvc0_grctx_generate_rop(struct drm_device *priv) 1710nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
1715{ 1711{
1716 struct drm_nouveau_private *dev_priv = priv->dev_private; 1712 int chipset = nv_device(priv)->chipset;
1717 int chipset = dev_priv->chipset;
1718 1713
1719 /* ROPC_BROADCAST */ 1714 /* ROPC_BROADCAST */
1720 nv_wr32(priv, 0x408800, 0x02802a3c); 1715 nv_wr32(priv, 0x408800, 0x02802a3c);
@@ -1741,10 +1736,9 @@ nvc0_grctx_generate_rop(struct drm_device *priv)
1741} 1736}
1742 1737
1743static void 1738static void
1744nvc0_grctx_generate_gpc(struct drm_device *priv) 1739nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1745{ 1740{
1746 struct drm_nouveau_private *dev_priv = priv->dev_private; 1741 int chipset = nv_device(priv)->chipset;
1747 int chipset = dev_priv->chipset;
1748 int i; 1742 int i;
1749 1743
1750 /* GPC_BROADCAST */ 1744 /* GPC_BROADCAST */
@@ -1834,10 +1828,9 @@ nvc0_grctx_generate_gpc(struct drm_device *priv)
1834} 1828}
1835 1829
1836static void 1830static void
1837nvc0_grctx_generate_tp(struct drm_device *priv) 1831nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1838{ 1832{
1839 struct drm_nouveau_private *dev_priv = priv->dev_private; 1833 int chipset = nv_device(priv)->chipset;
1840 int chipset = dev_priv->chipset;
1841 1834
1842 /* GPC_BROADCAST.TP_BROADCAST */ 1835 /* GPC_BROADCAST.TP_BROADCAST */
1843 nv_wr32(priv, 0x419818, 0x00000000); 1836 nv_wr32(priv, 0x419818, 0x00000000);
@@ -1876,7 +1869,7 @@ nvc0_grctx_generate_tp(struct drm_device *priv)
1876 nv_wr32(priv, 0x419c04, 0x00000006); 1869 nv_wr32(priv, 0x419c04, 0x00000006);
1877 nv_wr32(priv, 0x419c08, 0x00000002); 1870 nv_wr32(priv, 0x419c08, 0x00000002);
1878 nv_wr32(priv, 0x419c20, 0x00000000); 1871 nv_wr32(priv, 0x419c20, 0x00000000);
1879 if (dev_priv->chipset == 0xd9) { 1872 if (nv_device(priv)->chipset == 0xd9) {
1880 nv_wr32(priv, 0x419c24, 0x00084210); 1873 nv_wr32(priv, 0x419c24, 0x00084210);
1881 nv_wr32(priv, 0x419c28, 0x3cf3cf3c); 1874 nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
1882 nv_wr32(priv, 0x419cb0, 0x00020048); 1875 nv_wr32(priv, 0x419cb0, 0x00020048);
@@ -1929,16 +1922,14 @@ nvc0_grctx_generate_tp(struct drm_device *priv)
1929} 1922}
1930 1923
1931int 1924int
1932nvc0_grctx_generate(struct drm_device *priv) 1925nvc0_grctx_generate(struct nvc0_graph_priv *priv)
1933{ 1926{
1934 struct drm_nouveau_private *dev_priv = priv->dev_private;
1935 struct nvc0_graph_priv *oprv = nv_engine(priv, NVOBJ_ENGINE_GR);
1936 struct nvc0_grctx info; 1927 struct nvc0_grctx info;
1937 int ret, i, gpc, tpc, id; 1928 int ret, i, gpc, tpc, id;
1938 u32 fermi = nvc0_graph_class(priv); 1929 u32 fermi = nvc0_graph_class(priv);
1939 u32 r000260, tmp; 1930 u32 r000260, tmp;
1940 1931
1941 ret = nvc0_grctx_init(priv, oprv, &info); 1932 ret = nvc0_grctx_init(priv, &info);
1942 if (ret) 1933 if (ret)
1943 return ret; 1934 return ret;
1944 1935
@@ -1975,11 +1966,11 @@ nvc0_grctx_generate(struct drm_device *priv)
1975 mmio_list(0x419008, 0x00000000, 0, 0); 1966 mmio_list(0x419008, 0x00000000, 0, 0);
1976 mmio_list(0x418808, 0x00000000, 8, 0); 1967 mmio_list(0x418808, 0x00000000, 8, 0);
1977 mmio_list(0x41880c, 0x80000018, 0, 0); 1968 mmio_list(0x41880c, 0x80000018, 0, 0);
1978 if (dev_priv->chipset != 0xc1) { 1969 if (nv_device(priv)->chipset != 0xc1) {
1979 tmp = 0x02180000; 1970 tmp = 0x02180000;
1980 mmio_list(0x405830, tmp, 0, 0); 1971 mmio_list(0x405830, tmp, 0, 0);
1981 for (gpc = 0; gpc < oprv->gpc_nr; gpc++) { 1972 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1982 for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) { 1973 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1983 u32 reg = TPC_UNIT(gpc, tpc, 0x0520); 1974 u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
1984 mmio_list(reg, tmp, 0, 0); 1975 mmio_list(reg, tmp, 0, 0);
1985 tmp += 0x0324; 1976 tmp += 0x0324;
@@ -1989,13 +1980,13 @@ nvc0_grctx_generate(struct drm_device *priv)
1989 tmp = 0x02180000; 1980 tmp = 0x02180000;
1990 mmio_list(0x405830, 0x00000218 | tmp, 0, 0); 1981 mmio_list(0x405830, 0x00000218 | tmp, 0, 0);
1991 mmio_list(0x4064c4, 0x0086ffff, 0, 0); 1982 mmio_list(0x4064c4, 0x0086ffff, 0, 0);
1992 for (gpc = 0; gpc < oprv->gpc_nr; gpc++) { 1983 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1993 for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) { 1984 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1994 u32 reg = TPC_UNIT(gpc, tpc, 0x0520); 1985 u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
1995 mmio_list(reg, 0x10000000 | tmp, 0, 0); 1986 mmio_list(reg, 0x10000000 | tmp, 0, 0);
1996 tmp += 0x0324; 1987 tmp += 0x0324;
1997 } 1988 }
1998 for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) { 1989 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1999 u32 reg = TPC_UNIT(gpc, tpc, 0x0544); 1990 u32 reg = TPC_UNIT(gpc, tpc, 0x0544);
2000 mmio_list(reg, tmp, 0, 0); 1991 mmio_list(reg, tmp, 0, 0);
2001 tmp += 0x0324; 1992 tmp += 0x0324;
@@ -2004,8 +1995,8 @@ nvc0_grctx_generate(struct drm_device *priv)
2004 } 1995 }
2005 1996
2006 for (tpc = 0, id = 0; tpc < 4; tpc++) { 1997 for (tpc = 0, id = 0; tpc < 4; tpc++) {
2007 for (gpc = 0; gpc < oprv->gpc_nr; gpc++) { 1998 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2008 if (tpc < oprv->tpc_nr[gpc]) { 1999 if (tpc < priv->tpc_nr[gpc]) {
2009 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id); 2000 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
2010 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id); 2001 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
2011 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id); 2002 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
@@ -2013,14 +2004,14 @@ nvc0_grctx_generate(struct drm_device *priv)
2013 id++; 2004 id++;
2014 } 2005 }
2015 2006
2016 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), oprv->tpc_nr[gpc]); 2007 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
2017 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), oprv->tpc_nr[gpc]); 2008 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
2018 } 2009 }
2019 } 2010 }
2020 2011
2021 tmp = 0; 2012 tmp = 0;
2022 for (i = 0; i < oprv->gpc_nr; i++) 2013 for (i = 0; i < priv->gpc_nr; i++)
2023 tmp |= oprv->tpc_nr[i] << (i * 4); 2014 tmp |= priv->tpc_nr[i] << (i * 4);
2024 nv_wr32(priv, 0x406028, tmp); 2015 nv_wr32(priv, 0x406028, tmp);
2025 nv_wr32(priv, 0x405870, tmp); 2016 nv_wr32(priv, 0x405870, tmp);
2026 2017
@@ -2034,13 +2025,13 @@ nvc0_grctx_generate(struct drm_device *priv)
2034 if (1) { 2025 if (1) {
2035 u8 tpcnr[GPC_MAX], data[TPC_MAX]; 2026 u8 tpcnr[GPC_MAX], data[TPC_MAX];
2036 2027
2037 memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr)); 2028 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2038 memset(data, 0x1f, sizeof(data)); 2029 memset(data, 0x1f, sizeof(data));
2039 2030
2040 gpc = -1; 2031 gpc = -1;
2041 for (tpc = 0; tpc < oprv->tpc_total; tpc++) { 2032 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2042 do { 2033 do {
2043 gpc = (gpc + 1) % oprv->gpc_nr; 2034 gpc = (gpc + 1) % priv->gpc_nr;
2044 } while (!tpcnr[gpc]); 2035 } while (!tpcnr[gpc]);
2045 tpcnr[gpc]--; 2036 tpcnr[gpc]--;
2046 data[tpc] = gpc; 2037 data[tpc] = gpc;
@@ -2056,12 +2047,12 @@ nvc0_grctx_generate(struct drm_device *priv)
2056 u8 shift, ntpcv; 2047 u8 shift, ntpcv;
2057 2048
2058 /* calculate first set of magics */ 2049 /* calculate first set of magics */
2059 memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr)); 2050 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2060 2051
2061 gpc = -1; 2052 gpc = -1;
2062 for (tpc = 0; tpc < oprv->tpc_total; tpc++) { 2053 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2063 do { 2054 do {
2064 gpc = (gpc + 1) % oprv->gpc_nr; 2055 gpc = (gpc + 1) % priv->gpc_nr;
2065 } while (!tpcnr[gpc]); 2056 } while (!tpcnr[gpc]);
2066 tpcnr[gpc]--; 2057 tpcnr[gpc]--;
2067 2058
@@ -2073,7 +2064,7 @@ nvc0_grctx_generate(struct drm_device *priv)
2073 2064
2074 /* and the second... */ 2065 /* and the second... */
2075 shift = 0; 2066 shift = 0;
2076 ntpcv = oprv->tpc_total; 2067 ntpcv = priv->tpc_total;
2077 while (!(ntpcv & (1 << 4))) { 2068 while (!(ntpcv & (1 << 4))) {
2078 ntpcv <<= 1; 2069 ntpcv <<= 1;
2079 shift++; 2070 shift++;
@@ -2086,22 +2077,22 @@ nvc0_grctx_generate(struct drm_device *priv)
2086 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); 2077 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2087 2078
2088 /* GPC_BROADCAST */ 2079 /* GPC_BROADCAST */
2089 nv_wr32(priv, 0x418bb8, (oprv->tpc_total << 8) | 2080 nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
2090 oprv->magic_not_rop_nr); 2081 priv->magic_not_rop_nr);
2091 for (i = 0; i < 6; i++) 2082 for (i = 0; i < 6; i++)
2092 nv_wr32(priv, 0x418b08 + (i * 4), data[i]); 2083 nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
2093 2084
2094 /* GPC_BROADCAST.TP_BROADCAST */ 2085 /* GPC_BROADCAST.TP_BROADCAST */
2095 nv_wr32(priv, 0x419bd0, (oprv->tpc_total << 8) | 2086 nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
2096 oprv->magic_not_rop_nr | 2087 priv->magic_not_rop_nr |
2097 data2[0]); 2088 data2[0]);
2098 nv_wr32(priv, 0x419be4, data2[1]); 2089 nv_wr32(priv, 0x419be4, data2[1]);
2099 for (i = 0; i < 6; i++) 2090 for (i = 0; i < 6; i++)
2100 nv_wr32(priv, 0x419b00 + (i * 4), data[i]); 2091 nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
2101 2092
2102 /* UNK78xx */ 2093 /* UNK78xx */
2103 nv_wr32(priv, 0x4078bc, (oprv->tpc_total << 8) | 2094 nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
2104 oprv->magic_not_rop_nr); 2095 priv->magic_not_rop_nr);
2105 for (i = 0; i < 6; i++) 2096 for (i = 0; i < 6; i++)
2106 nv_wr32(priv, 0x40780c + (i * 4), data[i]); 2097 nv_wr32(priv, 0x40780c + (i * 4), data[i]);
2107 } 2098 }
@@ -2110,18 +2101,18 @@ nvc0_grctx_generate(struct drm_device *priv)
2110 u32 tpc_mask = 0, tpc_set = 0; 2101 u32 tpc_mask = 0, tpc_set = 0;
2111 u8 tpcnr[GPC_MAX], a, b; 2102 u8 tpcnr[GPC_MAX], a, b;
2112 2103
2113 memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr)); 2104 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2114 for (gpc = 0; gpc < oprv->gpc_nr; gpc++) 2105 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2115 tpc_mask |= ((1 << oprv->tpc_nr[gpc]) - 1) << (gpc * 8); 2106 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2116 2107
2117 for (i = 0, gpc = -1, b = -1; i < 32; i++) { 2108 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2118 a = (i * (oprv->tpc_total - 1)) / 32; 2109 a = (i * (priv->tpc_total - 1)) / 32;
2119 if (a != b) { 2110 if (a != b) {
2120 b = a; 2111 b = a;
2121 do { 2112 do {
2122 gpc = (gpc + 1) % oprv->gpc_nr; 2113 gpc = (gpc + 1) % priv->gpc_nr;
2123 } while (!tpcnr[gpc]); 2114 } while (!tpcnr[gpc]);
2124 tpc = oprv->tpc_nr[gpc] - tpcnr[gpc]--; 2115 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2125 2116
2126 tpc_set |= 1 << ((gpc * 8) + tpc); 2117 tpc_set |= 1 << ((gpc * 8) + tpc);
2127 } 2118 }
@@ -2232,7 +2223,7 @@ nvc0_grctx_generate(struct drm_device *priv)
2232 nv_icmd(priv, 0x00000215, 0x00000040); 2223 nv_icmd(priv, 0x00000215, 0x00000040);
2233 nv_icmd(priv, 0x00000216, 0x00000040); 2224 nv_icmd(priv, 0x00000216, 0x00000040);
2234 nv_icmd(priv, 0x00000217, 0x00000040); 2225 nv_icmd(priv, 0x00000217, 0x00000040);
2235 if (dev_priv->chipset == 0xd9) { 2226 if (nv_device(priv)->chipset == 0xd9) {
2236 for (i = 0x0400; i <= 0x0417; i++) 2227 for (i = 0x0400; i <= 0x0417; i++)
2237 nv_icmd(priv, i, 0x00000040); 2228 nv_icmd(priv, i, 0x00000040);
2238 } 2229 }
@@ -2244,7 +2235,7 @@ nvc0_grctx_generate(struct drm_device *priv)
2244 nv_icmd(priv, 0x0000021d, 0x0000c080); 2235 nv_icmd(priv, 0x0000021d, 0x0000c080);
2245 nv_icmd(priv, 0x0000021e, 0x0000c080); 2236 nv_icmd(priv, 0x0000021e, 0x0000c080);
2246 nv_icmd(priv, 0x0000021f, 0x0000c080); 2237 nv_icmd(priv, 0x0000021f, 0x0000c080);
2247 if (dev_priv->chipset == 0xd9) { 2238 if (nv_device(priv)->chipset == 0xd9) {
2248 for (i = 0x0440; i <= 0x0457; i++) 2239 for (i = 0x0440; i <= 0x0457; i++)
2249 nv_icmd(priv, i, 0x0000c080); 2240 nv_icmd(priv, i, 0x0000c080);
2250 } 2241 }
@@ -2810,8 +2801,8 @@ nvc0_grctx_generate(struct drm_device *priv)
2810 nv_icmd(priv, 0x0000053f, 0xffff0000); 2801 nv_icmd(priv, 0x0000053f, 0xffff0000);
2811 nv_icmd(priv, 0x00000585, 0x0000003f); 2802 nv_icmd(priv, 0x00000585, 0x0000003f);
2812 nv_icmd(priv, 0x00000576, 0x00000003); 2803 nv_icmd(priv, 0x00000576, 0x00000003);
2813 if (dev_priv->chipset == 0xc1 || 2804 if (nv_device(priv)->chipset == 0xc1 ||
2814 dev_priv->chipset == 0xd9) 2805 nv_device(priv)->chipset == 0xd9)
2815 nv_icmd(priv, 0x0000057b, 0x00000059); 2806 nv_icmd(priv, 0x0000057b, 0x00000059);
2816 nv_icmd(priv, 0x00000586, 0x00000040); 2807 nv_icmd(priv, 0x00000586, 0x00000040);
2817 nv_icmd(priv, 0x00000582, 0x00000080); 2808 nv_icmd(priv, 0x00000582, 0x00000080);
@@ -2913,7 +2904,7 @@ nvc0_grctx_generate(struct drm_device *priv)
2913 nv_icmd(priv, 0x00000957, 0x00000003); 2904 nv_icmd(priv, 0x00000957, 0x00000003);
2914 nv_icmd(priv, 0x0000095e, 0x20164010); 2905 nv_icmd(priv, 0x0000095e, 0x20164010);
2915 nv_icmd(priv, 0x0000095f, 0x00000020); 2906 nv_icmd(priv, 0x0000095f, 0x00000020);
2916 if (dev_priv->chipset == 0xd9) 2907 if (nv_device(priv)->chipset == 0xd9)
2917 nv_icmd(priv, 0x0000097d, 0x00000020); 2908 nv_icmd(priv, 0x0000097d, 0x00000020);
2918 nv_icmd(priv, 0x00000683, 0x00000006); 2909 nv_icmd(priv, 0x00000683, 0x00000006);
2919 nv_icmd(priv, 0x00000685, 0x003fffff); 2910 nv_icmd(priv, 0x00000685, 0x003fffff);
@@ -3056,5 +3047,6 @@ nvc0_grctx_generate(struct drm_device *priv)
3056 nvc0_grctx_generate_90c0(priv); 3047 nvc0_grctx_generate_90c0(priv);
3057 3048
3058 nv_wr32(priv, 0x000260, r000260); 3049 nv_wr32(priv, 0x000260, r000260);
3050
3059 return nvc0_grctx_fini(&info); 3051 return nvc0_grctx_fini(&info);
3060} 3052}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
index e5503170d68c..6d8c63931ee6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -22,13 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include <core/mm.h>
28#include "nvc0.h" 25#include "nvc0.h"
29 26
30static void 27static void
31nve0_grctx_generate_icmd(struct drm_device *priv) 28nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
32{ 29{
33 nv_wr32(priv, 0x400208, 0x80000000); 30 nv_wr32(priv, 0x400208, 0x80000000);
34 nv_icmd(priv, 0x001000, 0x00000004); 31 nv_icmd(priv, 0x001000, 0x00000004);
@@ -916,7 +913,7 @@ nve0_grctx_generate_icmd(struct drm_device *priv)
916} 913}
917 914
918static void 915static void
919nve0_grctx_generate_a097(struct drm_device *priv) 916nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
920{ 917{
921 nv_mthd(priv, 0xa097, 0x0800, 0x00000000); 918 nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
922 nv_mthd(priv, 0xa097, 0x0840, 0x00000000); 919 nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
@@ -2146,7 +2143,7 @@ nve0_grctx_generate_a097(struct drm_device *priv)
2146} 2143}
2147 2144
2148static void 2145static void
2149nve0_grctx_generate_902d(struct drm_device *priv) 2146nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
2150{ 2147{
2151 nv_mthd(priv, 0x902d, 0x0200, 0x000000cf); 2148 nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
2152 nv_mthd(priv, 0x902d, 0x0204, 0x00000001); 2149 nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
@@ -2169,7 +2166,7 @@ nve0_grctx_generate_902d(struct drm_device *priv)
2169} 2166}
2170 2167
2171static void 2168static void
2172nve0_graph_generate_unk40xx(struct drm_device *priv) 2169nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
2173{ 2170{
2174 nv_wr32(priv, 0x404010, 0x0); 2171 nv_wr32(priv, 0x404010, 0x0);
2175 nv_wr32(priv, 0x404014, 0x0); 2172 nv_wr32(priv, 0x404014, 0x0);
@@ -2213,7 +2210,7 @@ nve0_graph_generate_unk40xx(struct drm_device *priv)
2213} 2210}
2214 2211
2215static void 2212static void
2216nve0_graph_generate_unk44xx(struct drm_device *priv) 2213nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
2217{ 2214{
2218 nv_wr32(priv, 0x404404, 0x0); 2215 nv_wr32(priv, 0x404404, 0x0);
2219 nv_wr32(priv, 0x404408, 0x0); 2216 nv_wr32(priv, 0x404408, 0x0);
@@ -2238,7 +2235,7 @@ nve0_graph_generate_unk44xx(struct drm_device *priv)
2238} 2235}
2239 2236
2240static void 2237static void
2241nve0_graph_generate_unk46xx(struct drm_device *priv) 2238nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
2242{ 2239{
2243 nv_wr32(priv, 0x404604, 0x14); 2240 nv_wr32(priv, 0x404604, 0x14);
2244 nv_wr32(priv, 0x404608, 0x0); 2241 nv_wr32(priv, 0x404608, 0x0);
@@ -2278,7 +2275,7 @@ nve0_graph_generate_unk46xx(struct drm_device *priv)
2278} 2275}
2279 2276
2280static void 2277static void
2281nve0_graph_generate_unk47xx(struct drm_device *priv) 2278nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
2282{ 2279{
2283 nv_wr32(priv, 0x404700, 0x0); 2280 nv_wr32(priv, 0x404700, 0x0);
2284 nv_wr32(priv, 0x404704, 0x0); 2281 nv_wr32(priv, 0x404704, 0x0);
@@ -2299,7 +2296,7 @@ nve0_graph_generate_unk47xx(struct drm_device *priv)
2299} 2296}
2300 2297
2301static void 2298static void
2302nve0_graph_generate_unk58xx(struct drm_device *priv) 2299nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
2303{ 2300{
2304 nv_wr32(priv, 0x405800, 0xf8000bf); 2301 nv_wr32(priv, 0x405800, 0xf8000bf);
2305 nv_wr32(priv, 0x405830, 0x2180648); 2302 nv_wr32(priv, 0x405830, 0x2180648);
@@ -2318,7 +2315,7 @@ nve0_graph_generate_unk58xx(struct drm_device *priv)
2318} 2315}
2319 2316
2320static void 2317static void
2321nve0_graph_generate_unk60xx(struct drm_device *priv) 2318nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
2322{ 2319{
2323 nv_wr32(priv, 0x406020, 0x4103c1); 2320 nv_wr32(priv, 0x406020, 0x4103c1);
2324 nv_wr32(priv, 0x406028, 0x1); 2321 nv_wr32(priv, 0x406028, 0x1);
@@ -2328,7 +2325,7 @@ nve0_graph_generate_unk60xx(struct drm_device *priv)
2328} 2325}
2329 2326
2330static void 2327static void
2331nve0_graph_generate_unk64xx(struct drm_device *priv) 2328nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
2332{ 2329{
2333 nv_wr32(priv, 0x4064a8, 0x0); 2330 nv_wr32(priv, 0x4064a8, 0x0);
2334 nv_wr32(priv, 0x4064ac, 0x3fff); 2331 nv_wr32(priv, 0x4064ac, 0x3fff);
@@ -2350,13 +2347,13 @@ nve0_graph_generate_unk64xx(struct drm_device *priv)
2350} 2347}
2351 2348
2352static void 2349static void
2353nve0_graph_generate_unk70xx(struct drm_device *priv) 2350nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
2354{ 2351{
2355 nv_wr32(priv, 0x407040, 0x0); 2352 nv_wr32(priv, 0x407040, 0x0);
2356} 2353}
2357 2354
2358static void 2355static void
2359nve0_graph_generate_unk78xx(struct drm_device *priv) 2356nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
2360{ 2357{
2361 nv_wr32(priv, 0x407804, 0x23); 2358 nv_wr32(priv, 0x407804, 0x23);
2362 nv_wr32(priv, 0x40780c, 0xa418820); 2359 nv_wr32(priv, 0x40780c, 0xa418820);
@@ -2369,7 +2366,7 @@ nve0_graph_generate_unk78xx(struct drm_device *priv)
2369} 2366}
2370 2367
2371static void 2368static void
2372nve0_graph_generate_unk80xx(struct drm_device *priv) 2369nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
2373{ 2370{
2374 nv_wr32(priv, 0x408000, 0x0); 2371 nv_wr32(priv, 0x408000, 0x0);
2375 nv_wr32(priv, 0x408004, 0x0); 2372 nv_wr32(priv, 0x408004, 0x0);
@@ -2382,7 +2379,7 @@ nve0_graph_generate_unk80xx(struct drm_device *priv)
2382} 2379}
2383 2380
2384static void 2381static void
2385nve0_graph_generate_unk88xx(struct drm_device *priv) 2382nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
2386{ 2383{
2387 nv_wr32(priv, 0x408800, 0x2802a3c); 2384 nv_wr32(priv, 0x408800, 0x2802a3c);
2388 nv_wr32(priv, 0x408804, 0x40); 2385 nv_wr32(priv, 0x408804, 0x40);
@@ -2395,7 +2392,7 @@ nve0_graph_generate_unk88xx(struct drm_device *priv)
2395} 2392}
2396 2393
2397static void 2394static void
2398nve0_graph_generate_gpc(struct drm_device *priv) 2395nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
2399{ 2396{
2400 nv_wr32(priv, 0x418380, 0x16); 2397 nv_wr32(priv, 0x418380, 0x16);
2401 nv_wr32(priv, 0x418400, 0x38004e00); 2398 nv_wr32(priv, 0x418400, 0x38004e00);
@@ -2521,7 +2518,7 @@ nve0_graph_generate_gpc(struct drm_device *priv)
2521} 2518}
2522 2519
2523static void 2520static void
2524nve0_graph_generate_tpc(struct drm_device *priv) 2521nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
2525{ 2522{
2526 nv_wr32(priv, 0x419848, 0x0); 2523 nv_wr32(priv, 0x419848, 0x0);
2527 nv_wr32(priv, 0x419864, 0x129); 2524 nv_wr32(priv, 0x419864, 0x129);
@@ -2586,7 +2583,7 @@ nve0_graph_generate_tpc(struct drm_device *priv)
2586} 2583}
2587 2584
2588static void 2585static void
2589nve0_graph_generate_tpcunk(struct drm_device *priv) 2586nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
2590{ 2587{
2591 nv_wr32(priv, 0x41be24, 0x6); 2588 nv_wr32(priv, 0x41be24, 0x6);
2592 nv_wr32(priv, 0x41bec0, 0x12180000); 2589 nv_wr32(priv, 0x41bec0, 0x12180000);
@@ -2604,9 +2601,8 @@ nve0_graph_generate_tpcunk(struct drm_device *priv)
2604} 2601}
2605 2602
2606int 2603int
2607nve0_grctx_generate(struct drm_device *priv) 2604nve0_grctx_generate(struct nvc0_graph_priv *priv)
2608{ 2605{
2609 struct nvc0_graph_priv *oprv = nv_engine(priv, NVOBJ_ENGINE_GR);
2610 struct nvc0_grctx info; 2606 struct nvc0_grctx info;
2611 int ret, i, gpc, tpc, id; 2607 int ret, i, gpc, tpc, id;
2612 u32 data[6] = {}, data2[2] = {}, tmp; 2608 u32 data[6] = {}, data2[2] = {}, tmp;
@@ -2615,7 +2611,7 @@ nve0_grctx_generate(struct drm_device *priv)
2615 u8 tpcnr[GPC_MAX], a, b; 2611 u8 tpcnr[GPC_MAX], a, b;
2616 u8 shift, ntpcv; 2612 u8 shift, ntpcv;
2617 2613
2618 ret = nvc0_grctx_init(priv, oprv, &info); 2614 ret = nvc0_grctx_init(priv, &info);
2619 if (ret) 2615 if (ret)
2620 return ret; 2616 return ret;
2621 2617
@@ -2657,17 +2653,17 @@ nve0_grctx_generate(struct drm_device *priv)
2657 mmio_list(0x419848, 0x10000000, 12, 2); 2653 mmio_list(0x419848, 0x10000000, 12, 2);
2658 mmio_list(0x405830, 0x02180648, 0, 0); 2654 mmio_list(0x405830, 0x02180648, 0, 0);
2659 mmio_list(0x4064c4, 0x0192ffff, 0, 0); 2655 mmio_list(0x4064c4, 0x0192ffff, 0, 0);
2660 for (gpc = 0, offset = 0; gpc < oprv->gpc_nr; gpc++) { 2656 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
2661 u16 magic0 = 0x0218 * oprv->tpc_nr[gpc]; 2657 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
2662 u16 magic1 = 0x0648 * oprv->tpc_nr[gpc]; 2658 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
2663 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; 2659 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
2664 magic[gpc][1] = 0x00000000 | (magic1 << 16); 2660 magic[gpc][1] = 0x00000000 | (magic1 << 16);
2665 offset += 0x0324 * oprv->tpc_nr[gpc]; 2661 offset += 0x0324 * priv->tpc_nr[gpc];
2666 } 2662 }
2667 for (gpc = 0; gpc < oprv->gpc_nr; gpc++) { 2663 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2668 mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); 2664 mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
2669 mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); 2665 mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
2670 offset += 0x07ff * oprv->tpc_nr[gpc]; 2666 offset += 0x07ff * priv->tpc_nr[gpc];
2671 } 2667 }
2672 mmio_list(0x17e91c, 0x06060609, 0, 0); 2668 mmio_list(0x17e91c, 0x06060609, 0, 0);
2673 mmio_list(0x17e920, 0x00090a05, 0, 0); 2669 mmio_list(0x17e920, 0x00090a05, 0, 0);
@@ -2680,22 +2676,22 @@ nve0_grctx_generate(struct drm_device *priv)
2680 nv_wr32(priv, 0x419c00, 0xa); 2676 nv_wr32(priv, 0x419c00, 0xa);
2681 2677
2682 for (tpc = 0, id = 0; tpc < 4; tpc++) { 2678 for (tpc = 0, id = 0; tpc < 4; tpc++) {
2683 for (gpc = 0; gpc < oprv->gpc_nr; gpc++) { 2679 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2684 if (tpc < oprv->tpc_nr[gpc]) { 2680 if (tpc < priv->tpc_nr[gpc]) {
2685 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id); 2681 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
2686 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id); 2682 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
2687 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id); 2683 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
2688 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++); 2684 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
2689 } 2685 }
2690 2686
2691 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), oprv->tpc_nr[gpc]); 2687 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
2692 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), oprv->tpc_nr[gpc]); 2688 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
2693 } 2689 }
2694 } 2690 }
2695 2691
2696 tmp = 0; 2692 tmp = 0;
2697 for (i = 0; i < oprv->gpc_nr; i++) 2693 for (i = 0; i < priv->gpc_nr; i++)
2698 tmp |= oprv->tpc_nr[i] << (i * 4); 2694 tmp |= priv->tpc_nr[i] << (i * 4);
2699 nv_wr32(priv, 0x406028, tmp); 2695 nv_wr32(priv, 0x406028, tmp);
2700 nv_wr32(priv, 0x405870, tmp); 2696 nv_wr32(priv, 0x405870, tmp);
2701 2697
@@ -2707,12 +2703,12 @@ nve0_grctx_generate(struct drm_device *priv)
2707 nv_wr32(priv, 0x40587c, 0x0); 2703 nv_wr32(priv, 0x40587c, 0x0);
2708 2704
2709 /* calculate first set of magics */ 2705 /* calculate first set of magics */
2710 memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr)); 2706 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2711 2707
2712 gpc = -1; 2708 gpc = -1;
2713 for (tpc = 0; tpc < oprv->tpc_total; tpc++) { 2709 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2714 do { 2710 do {
2715 gpc = (gpc + 1) % oprv->gpc_nr; 2711 gpc = (gpc + 1) % priv->gpc_nr;
2716 } while (!tpcnr[gpc]); 2712 } while (!tpcnr[gpc]);
2717 tpcnr[gpc]--; 2713 tpcnr[gpc]--;
2718 2714
@@ -2724,7 +2720,7 @@ nve0_grctx_generate(struct drm_device *priv)
2724 2720
2725 /* and the second... */ 2721 /* and the second... */
2726 shift = 0; 2722 shift = 0;
2727 ntpcv = oprv->tpc_total; 2723 ntpcv = priv->tpc_total;
2728 while (!(ntpcv & (1 << 4))) { 2724 while (!(ntpcv & (1 << 4))) {
2729 ntpcv <<= 1; 2725 ntpcv <<= 1;
2730 shift++; 2726 shift++;
@@ -2733,13 +2729,13 @@ nve0_grctx_generate(struct drm_device *priv)
2733 data2[0] = ntpcv << 16; 2729 data2[0] = ntpcv << 16;
2734 data2[0] |= shift << 21; 2730 data2[0] |= shift << 21;
2735 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24); 2731 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2736 data2[0] |= oprv->tpc_total << 8; 2732 data2[0] |= priv->tpc_total << 8;
2737 data2[0] |= oprv->magic_not_rop_nr; 2733 data2[0] |= priv->magic_not_rop_nr;
2738 for (i = 1; i < 7; i++) 2734 for (i = 1; i < 7; i++)
2739 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); 2735 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2740 2736
2741 /* and write it all the various parts of PGRAPH */ 2737 /* and write it all the various parts of PGRAPH */
2742 nv_wr32(priv, 0x418bb8, (oprv->tpc_total << 8) | oprv->magic_not_rop_nr); 2738 nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2743 for (i = 0; i < 6; i++) 2739 for (i = 0; i < 6; i++)
2744 nv_wr32(priv, 0x418b08 + (i * 4), data[i]); 2740 nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
2745 2741
@@ -2748,23 +2744,23 @@ nve0_grctx_generate(struct drm_device *priv)
2748 for (i = 0; i < 6; i++) 2744 for (i = 0; i < 6; i++)
2749 nv_wr32(priv, 0x41bf00 + (i * 4), data[i]); 2745 nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
2750 2746
2751 nv_wr32(priv, 0x4078bc, (oprv->tpc_total << 8) | oprv->magic_not_rop_nr); 2747 nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2752 for (i = 0; i < 6; i++) 2748 for (i = 0; i < 6; i++)
2753 nv_wr32(priv, 0x40780c + (i * 4), data[i]); 2749 nv_wr32(priv, 0x40780c + (i * 4), data[i]);
2754 2750
2755 2751
2756 memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr)); 2752 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2757 for (gpc = 0; gpc < oprv->gpc_nr; gpc++) 2753 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2758 tpc_mask |= ((1 << oprv->tpc_nr[gpc]) - 1) << (gpc * 8); 2754 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2759 2755
2760 for (i = 0, gpc = -1, b = -1; i < 32; i++) { 2756 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2761 a = (i * (oprv->tpc_total - 1)) / 32; 2757 a = (i * (priv->tpc_total - 1)) / 32;
2762 if (a != b) { 2758 if (a != b) {
2763 b = a; 2759 b = a;
2764 do { 2760 do {
2765 gpc = (gpc + 1) % oprv->gpc_nr; 2761 gpc = (gpc + 1) % priv->gpc_nr;
2766 } while (!tpcnr[gpc]); 2762 } while (!tpcnr[gpc]);
2767 tpc = oprv->tpc_nr[gpc] - tpcnr[gpc]--; 2763 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2768 2764
2769 tpc_set |= 1 << ((gpc * 8) + tpc); 2765 tpc_set |= 1 << ((gpc * 8) + tpc);
2770 } 2766 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 7f3a275157bb..e5b01899dece 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -22,19 +22,22 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include "drm.h" 26#include <core/class.h>
27#include <nouveau_drm.h> 27#include <core/handle.h>
28#include "nouveau_drv.h" 28#include <core/namedb.h>
29#include "nouveau_hw.h" 29
30#include "nouveau_util.h" 30#include <subdev/fb.h>
31#include <core/ramht.h> 31#include <subdev/instmem.h>
32 32#include <subdev/timer.h>
33struct nv04_graph_engine { 33
34 struct nouveau_exec_engine base; 34#include <engine/fifo.h>
35}; 35#include <engine/graph.h>
36 36
37static uint32_t nv04_graph_ctx_regs[] = { 37#include "regs.h"
38
39static u32
40nv04_graph_ctx_regs[] = {
38 0x0040053c, 41 0x0040053c,
39 0x00400544, 42 0x00400544,
40 0x00400540, 43 0x00400540,
@@ -348,205 +351,28 @@ static uint32_t nv04_graph_ctx_regs[] = {
348 NV04_PGRAPH_DEBUG_3 351 NV04_PGRAPH_DEBUG_3
349}; 352};
350 353
351struct graph_state { 354struct nv04_graph_priv {
352 uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; 355 struct nouveau_graph base;
356 struct nv04_graph_chan *chan[16];
357 spinlock_t lock;
353}; 358};
354 359
355static struct nouveau_channel * 360struct nv04_graph_chan {
356nv04_graph_channel(struct drm_device *dev) 361 struct nouveau_object base;
357{ 362 int chid;
358 struct drm_nouveau_private *dev_priv = dev->dev_private; 363 u32 nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
359 int chid = 15; 364};
360
361 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
362 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
363
364 if (chid > 15)
365 return NULL;
366
367 return dev_priv->channels.ptr[chid];
368}
369
370static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
371{
372 int i;
373
374 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
375 if (nv04_graph_ctx_regs[i] == reg)
376 return &ctx->nv04[i];
377 }
378
379 return NULL;
380}
381
382static int
383nv04_graph_load_context(struct nouveau_channel *chan)
384{
385 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
386 struct drm_device *dev = chan->dev;
387 uint32_t tmp;
388 int i;
389
390 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
391 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
392
393 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
394
395 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
396 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
397
398 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
399 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
400
401 return 0;
402}
403
404static int
405nv04_graph_unload_context(struct drm_device *dev)
406{
407 struct nouveau_channel *chan = NULL;
408 struct graph_state *ctx;
409 uint32_t tmp;
410 int i;
411
412 chan = nv04_graph_channel(dev);
413 if (!chan)
414 return 0;
415 ctx = chan->engctx[NVOBJ_ENGINE_GR];
416
417 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
418 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
419
420 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
421 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
422 tmp |= 15 << 24;
423 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
424 return 0;
425}
426
427static int
428nv04_graph_context_new(struct nouveau_channel *chan, int engine)
429{
430 struct graph_state *pgraph_ctx;
431 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
432
433 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
434 if (pgraph_ctx == NULL)
435 return -ENOMEM;
436
437 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
438
439 chan->engctx[engine] = pgraph_ctx;
440 return 0;
441}
442
443static void
444nv04_graph_context_del(struct nouveau_channel *chan, int engine)
445{
446 struct drm_device *dev = chan->dev;
447 struct drm_nouveau_private *dev_priv = dev->dev_private;
448 struct graph_state *pgraph_ctx = chan->engctx[engine];
449 unsigned long flags;
450
451 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
452 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
453
454 /* Unload the context if it's the currently active one */
455 if (nv04_graph_channel(dev) == chan)
456 nv04_graph_unload_context(dev);
457
458 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
459 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
460
461 /* Free the context resources */
462 kfree(pgraph_ctx);
463 chan->engctx[engine] = NULL;
464}
465
466int
467nv04_graph_object_new(struct nouveau_channel *chan, int engine,
468 u32 handle, u16 class)
469{
470 struct drm_device *dev = chan->dev;
471 struct nouveau_gpuobj *obj = NULL;
472 int ret;
473
474 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
475 if (ret)
476 return ret;
477 obj->engine = 1;
478 obj->class = class;
479
480#ifdef __BIG_ENDIAN
481 nv_wo32(obj, 0x00, 0x00080000 | class);
482#else
483 nv_wo32(obj, 0x00, class);
484#endif
485 nv_wo32(obj, 0x04, 0x00000000);
486 nv_wo32(obj, 0x08, 0x00000000);
487 nv_wo32(obj, 0x0c, 0x00000000);
488 365
489 ret = nouveau_ramht_insert(chan, handle, obj);
490 nouveau_gpuobj_ref(NULL, &obj);
491 return ret;
492}
493 366
494static int 367static inline struct nv04_graph_priv *
495nv04_graph_init(struct drm_device *dev, int engine) 368nv04_graph_priv(struct nv04_graph_chan *chan)
496{ 369{
497 uint32_t tmp; 370 return (void *)nv_object(chan)->engine;
498
499 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
500 ~NV_PMC_ENABLE_PGRAPH);
501 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
502 NV_PMC_ENABLE_PGRAPH);
503
504 /* Enable PGRAPH interrupts */
505 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
506 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
507
508 nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
509 nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
510 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
511 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
512 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
513 /*1231C000 blob, 001 haiku*/
514 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
515 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
516 /*0x72111100 blob , 01 haiku*/
517 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
518 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
519 /*haiku same*/
520
521 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
522 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
523 /*haiku and blob 10d4*/
524
525 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
526 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
527 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
528 tmp |= 15 << 24;
529 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
530
531 /* These don't belong here, they're part of a per-channel context */
532 nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
533 nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
534
535 return 0;
536} 371}
537 372
538static int 373/*******************************************************************************
539nv04_graph_fini(struct drm_device *dev, int engine, bool suspend) 374 * Graphics object classes
540{ 375 ******************************************************************************/
541 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
542 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
543 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
544 return -EBUSY;
545 }
546 nv04_graph_unload_context(dev);
547 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
548 return 0;
549}
550 376
551/* 377/*
552 * Software methods, why they are needed, and how they all work: 378 * Software methods, why they are needed, and how they all work:
@@ -623,37 +449,35 @@ nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
623 */ 449 */
624 450
625static void 451static void
626nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value) 452nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value)
627{ 453{
628 struct drm_device *dev = chan->dev; 454 struct nv04_graph_priv *priv = (void *)object->engine;
629 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; 455 int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
630 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
631 u32 tmp; 456 u32 tmp;
632 457
633 tmp = nv_ri32(dev, instance); 458 tmp = nv_ro32(object, 0x00);
634 tmp &= ~mask; 459 tmp &= ~mask;
635 tmp |= value; 460 tmp |= value;
461 nv_wo32(object, 0x00, tmp);
636 462
637 nv_wi32(dev, instance, tmp); 463 nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
638 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); 464 nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
639 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
640} 465}
641 466
642static void 467static void
643nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value) 468nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value)
644{ 469{
645 struct drm_device *dev = chan->dev;
646 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
647 u32 tmp, ctx1;
648 int class, op, valid = 1; 470 int class, op, valid = 1;
471 u32 tmp, ctx1;
649 472
650 ctx1 = nv_ri32(dev, instance); 473 ctx1 = nv_ro32(object, 0x00);
651 class = ctx1 & 0xff; 474 class = ctx1 & 0xff;
652 op = (ctx1 >> 15) & 7; 475 op = (ctx1 >> 15) & 7;
653 tmp = nv_ri32(dev, instance + 0xc); 476
477 tmp = nv_ro32(object, 0x0c);
654 tmp &= ~mask; 478 tmp &= ~mask;
655 tmp |= value; 479 tmp |= value;
656 nv_wi32(dev, instance + 0xc, tmp); 480 nv_wo32(object, 0x0c, tmp);
657 481
658 /* check for valid surf2d/surf_dst/surf_color */ 482 /* check for valid surf2d/surf_dst/surf_color */
659 if (!(tmp & 0x02000000)) 483 if (!(tmp & 0x02000000))
@@ -685,30 +509,34 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
685 break; 509 break;
686 } 510 }
687 511
688 nv04_graph_set_ctx1(chan, 0x01000000, valid << 24); 512 nv04_graph_set_ctx1(object, 0x01000000, valid << 24);
689} 513}
690 514
691static int 515static int
692nv04_graph_mthd_set_operation(struct nouveau_channel *chan, 516nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd,
693 u32 class, u32 mthd, u32 data) 517 void *args, u32 size)
694{ 518{
519 u32 class = nv_ro32(object, 0) & 0xff;
520 u32 data = *(u32 *)args;
695 if (data > 5) 521 if (data > 5)
696 return 1; 522 return 1;
697 /* Old versions of the objects only accept first three operations. */ 523 /* Old versions of the objects only accept first three operations. */
698 if (data > 2 && class < 0x40) 524 if (data > 2 && class < 0x40)
699 return 1; 525 return 1;
700 nv04_graph_set_ctx1(chan, 0x00038000, data << 15); 526 nv04_graph_set_ctx1(object, 0x00038000, data << 15);
701 /* changing operation changes set of objects needed for validation */ 527 /* changing operation changes set of objects needed for validation */
702 nv04_graph_set_ctx_val(chan, 0, 0); 528 nv04_graph_set_ctx_val(object, 0, 0);
703 return 0; 529 return 0;
704} 530}
705 531
706static int 532static int
707nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, 533nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd,
708 u32 class, u32 mthd, u32 data) 534 void *args, u32 size)
709{ 535{
710 uint32_t min = data & 0xffff, max; 536 struct nv04_graph_priv *priv = (void *)object->engine;
711 uint32_t w = data >> 16; 537 u32 data = *(u32 *)args;
538 u32 min = data & 0xffff, max;
539 u32 w = data >> 16;
712 if (min & 0x8000) 540 if (min & 0x8000)
713 /* too large */ 541 /* too large */
714 return 1; 542 return 1;
@@ -717,17 +545,19 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
717 w |= 0xffff0000; 545 w |= 0xffff0000;
718 max = min + w; 546 max = min + w;
719 max &= 0x3ffff; 547 max &= 0x3ffff;
720 nv_wr32(chan->dev, 0x40053c, min); 548 nv_wr32(priv, 0x40053c, min);
721 nv_wr32(chan->dev, 0x400544, max); 549 nv_wr32(priv, 0x400544, max);
722 return 0; 550 return 0;
723} 551}
724 552
725static int 553static int
726nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, 554nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd,
727 u32 class, u32 mthd, u32 data) 555 void *args, u32 size)
728{ 556{
729 uint32_t min = data & 0xffff, max; 557 struct nv04_graph_priv *priv = (void *)object->engine;
730 uint32_t w = data >> 16; 558 u32 data = *(u32 *)args;
559 u32 min = data & 0xffff, max;
560 u32 w = data >> 16;
731 if (min & 0x8000) 561 if (min & 0x8000)
732 /* too large */ 562 /* too large */
733 return 1; 563 return 1;
@@ -736,223 +566,661 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
736 w |= 0xffff0000; 566 w |= 0xffff0000;
737 max = min + w; 567 max = min + w;
738 max &= 0x3ffff; 568 max &= 0x3ffff;
739 nv_wr32(chan->dev, 0x400540, min); 569 nv_wr32(priv, 0x400540, min);
740 nv_wr32(chan->dev, 0x400548, max); 570 nv_wr32(priv, 0x400548, max);
741 return 0; 571 return 0;
742} 572}
743 573
574static u16
575nv04_graph_mthd_bind_class(struct nouveau_object *object, u32 *args, u32 size)
576{
577 struct nouveau_instmem *imem = nouveau_instmem(object);
578 u32 inst = *(u32 *)args << 4;
579 return nv_ro32(imem, inst);
580}
581
744static int 582static int
745nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, 583nv04_graph_mthd_bind_surf2d(struct nouveau_object *object, u32 mthd,
746 u32 class, u32 mthd, u32 data) 584 void *args, u32 size)
747{ 585{
748 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 586 switch (nv04_graph_mthd_bind_class(object, args, size)) {
749 case 0x30: 587 case 0x30:
750 nv04_graph_set_ctx1(chan, 0x00004000, 0); 588 nv04_graph_set_ctx1(object, 0x00004000, 0);
751 nv04_graph_set_ctx_val(chan, 0x02000000, 0); 589 nv04_graph_set_ctx_val(object, 0x02000000, 0);
752 return 0; 590 return 0;
753 case 0x42: 591 case 0x42:
754 nv04_graph_set_ctx1(chan, 0x00004000, 0); 592 nv04_graph_set_ctx1(object, 0x00004000, 0);
755 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); 593 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
756 return 0; 594 return 0;
757 } 595 }
758 return 1; 596 return 1;
759} 597}
760 598
761static int 599static int
762nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, 600nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_object *object, u32 mthd,
763 u32 class, u32 mthd, u32 data) 601 void *args, u32 size)
764{ 602{
765 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 603 switch (nv04_graph_mthd_bind_class(object, args, size)) {
766 case 0x30: 604 case 0x30:
767 nv04_graph_set_ctx1(chan, 0x00004000, 0); 605 nv04_graph_set_ctx1(object, 0x00004000, 0);
768 nv04_graph_set_ctx_val(chan, 0x02000000, 0); 606 nv04_graph_set_ctx_val(object, 0x02000000, 0);
769 return 0; 607 return 0;
770 case 0x42: 608 case 0x42:
771 nv04_graph_set_ctx1(chan, 0x00004000, 0); 609 nv04_graph_set_ctx1(object, 0x00004000, 0);
772 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); 610 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
773 return 0; 611 return 0;
774 case 0x52: 612 case 0x52:
775 nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000); 613 nv04_graph_set_ctx1(object, 0x00004000, 0x00004000);
776 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); 614 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
777 return 0; 615 return 0;
778 } 616 }
779 return 1; 617 return 1;
780} 618}
781 619
782static int 620static int
783nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, 621nv01_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
784 u32 class, u32 mthd, u32 data) 622 void *args, u32 size)
785{ 623{
786 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 624 switch (nv04_graph_mthd_bind_class(object, args, size)) {
787 case 0x30: 625 case 0x30:
788 nv04_graph_set_ctx_val(chan, 0x08000000, 0); 626 nv04_graph_set_ctx_val(object, 0x08000000, 0);
789 return 0; 627 return 0;
790 case 0x18: 628 case 0x18:
791 nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); 629 nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
792 return 0; 630 return 0;
793 } 631 }
794 return 1; 632 return 1;
795} 633}
796 634
797static int 635static int
798nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, 636nv04_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
799 u32 class, u32 mthd, u32 data) 637 void *args, u32 size)
800{ 638{
801 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 639 switch (nv04_graph_mthd_bind_class(object, args, size)) {
802 case 0x30: 640 case 0x30:
803 nv04_graph_set_ctx_val(chan, 0x08000000, 0); 641 nv04_graph_set_ctx_val(object, 0x08000000, 0);
804 return 0; 642 return 0;
805 case 0x44: 643 case 0x44:
806 nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); 644 nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
807 return 0; 645 return 0;
808 } 646 }
809 return 1; 647 return 1;
810} 648}
811 649
812static int 650static int
813nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, 651nv04_graph_mthd_bind_rop(struct nouveau_object *object, u32 mthd,
814 u32 class, u32 mthd, u32 data) 652 void *args, u32 size)
815{ 653{
816 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 654 switch (nv04_graph_mthd_bind_class(object, args, size)) {
817 case 0x30: 655 case 0x30:
818 nv04_graph_set_ctx_val(chan, 0x10000000, 0); 656 nv04_graph_set_ctx_val(object, 0x10000000, 0);
819 return 0; 657 return 0;
820 case 0x43: 658 case 0x43:
821 nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000); 659 nv04_graph_set_ctx_val(object, 0x10000000, 0x10000000);
822 return 0; 660 return 0;
823 } 661 }
824 return 1; 662 return 1;
825} 663}
826 664
827static int 665static int
828nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, 666nv04_graph_mthd_bind_beta1(struct nouveau_object *object, u32 mthd,
829 u32 class, u32 mthd, u32 data) 667 void *args, u32 size)
830{ 668{
831 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 669 switch (nv04_graph_mthd_bind_class(object, args, size)) {
832 case 0x30: 670 case 0x30:
833 nv04_graph_set_ctx_val(chan, 0x20000000, 0); 671 nv04_graph_set_ctx_val(object, 0x20000000, 0);
834 return 0; 672 return 0;
835 case 0x12: 673 case 0x12:
836 nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000); 674 nv04_graph_set_ctx_val(object, 0x20000000, 0x20000000);
837 return 0; 675 return 0;
838 } 676 }
839 return 1; 677 return 1;
840} 678}
841 679
842static int 680static int
843nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, 681nv04_graph_mthd_bind_beta4(struct nouveau_object *object, u32 mthd,
844 u32 class, u32 mthd, u32 data) 682 void *args, u32 size)
845{ 683{
846 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 684 switch (nv04_graph_mthd_bind_class(object, args, size)) {
847 case 0x30: 685 case 0x30:
848 nv04_graph_set_ctx_val(chan, 0x40000000, 0); 686 nv04_graph_set_ctx_val(object, 0x40000000, 0);
849 return 0; 687 return 0;
850 case 0x72: 688 case 0x72:
851 nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000); 689 nv04_graph_set_ctx_val(object, 0x40000000, 0x40000000);
852 return 0; 690 return 0;
853 } 691 }
854 return 1; 692 return 1;
855} 693}
856 694
857static int 695static int
858nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, 696nv04_graph_mthd_bind_surf_dst(struct nouveau_object *object, u32 mthd,
859 u32 class, u32 mthd, u32 data) 697 void *args, u32 size)
860{ 698{
861 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 699 switch (nv04_graph_mthd_bind_class(object, args, size)) {
862 case 0x30: 700 case 0x30:
863 nv04_graph_set_ctx_val(chan, 0x02000000, 0); 701 nv04_graph_set_ctx_val(object, 0x02000000, 0);
864 return 0; 702 return 0;
865 case 0x58: 703 case 0x58:
866 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); 704 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
867 return 0; 705 return 0;
868 } 706 }
869 return 1; 707 return 1;
870} 708}
871 709
872static int 710static int
873nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, 711nv04_graph_mthd_bind_surf_src(struct nouveau_object *object, u32 mthd,
874 u32 class, u32 mthd, u32 data) 712 void *args, u32 size)
875{ 713{
876 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 714 switch (nv04_graph_mthd_bind_class(object, args, size)) {
877 case 0x30: 715 case 0x30:
878 nv04_graph_set_ctx_val(chan, 0x04000000, 0); 716 nv04_graph_set_ctx_val(object, 0x04000000, 0);
879 return 0; 717 return 0;
880 case 0x59: 718 case 0x59:
881 nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); 719 nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
882 return 0; 720 return 0;
883 } 721 }
884 return 1; 722 return 1;
885} 723}
886 724
887static int 725static int
888nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, 726nv04_graph_mthd_bind_surf_color(struct nouveau_object *object, u32 mthd,
889 u32 class, u32 mthd, u32 data) 727 void *args, u32 size)
890{ 728{
891 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 729 switch (nv04_graph_mthd_bind_class(object, args, size)) {
892 case 0x30: 730 case 0x30:
893 nv04_graph_set_ctx_val(chan, 0x02000000, 0); 731 nv04_graph_set_ctx_val(object, 0x02000000, 0);
894 return 0; 732 return 0;
895 case 0x5a: 733 case 0x5a:
896 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); 734 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
897 return 0; 735 return 0;
898 } 736 }
899 return 1; 737 return 1;
900} 738}
901 739
902static int 740static int
903nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, 741nv04_graph_mthd_bind_surf_zeta(struct nouveau_object *object, u32 mthd,
904 u32 class, u32 mthd, u32 data) 742 void *args, u32 size)
905{ 743{
906 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 744 switch (nv04_graph_mthd_bind_class(object, args, size)) {
907 case 0x30: 745 case 0x30:
908 nv04_graph_set_ctx_val(chan, 0x04000000, 0); 746 nv04_graph_set_ctx_val(object, 0x04000000, 0);
909 return 0; 747 return 0;
910 case 0x5b: 748 case 0x5b:
911 nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); 749 nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
912 return 0; 750 return 0;
913 } 751 }
914 return 1; 752 return 1;
915} 753}
916 754
917static int 755static int
918nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, 756nv01_graph_mthd_bind_clip(struct nouveau_object *object, u32 mthd,
919 u32 class, u32 mthd, u32 data) 757 void *args, u32 size)
920{ 758{
921 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 759 switch (nv04_graph_mthd_bind_class(object, args, size)) {
922 case 0x30: 760 case 0x30:
923 nv04_graph_set_ctx1(chan, 0x2000, 0); 761 nv04_graph_set_ctx1(object, 0x2000, 0);
924 return 0; 762 return 0;
925 case 0x19: 763 case 0x19:
926 nv04_graph_set_ctx1(chan, 0x2000, 0x2000); 764 nv04_graph_set_ctx1(object, 0x2000, 0x2000);
927 return 0; 765 return 0;
928 } 766 }
929 return 1; 767 return 1;
930} 768}
931 769
932static int 770static int
933nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, 771nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
934 u32 class, u32 mthd, u32 data) 772 void *args, u32 size)
935{ 773{
936 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 774 switch (nv04_graph_mthd_bind_class(object, args, size)) {
937 case 0x30: 775 case 0x30:
938 nv04_graph_set_ctx1(chan, 0x1000, 0); 776 nv04_graph_set_ctx1(object, 0x1000, 0);
939 return 0; 777 return 0;
940 /* Yes, for some reason even the old versions of objects 778 /* Yes, for some reason even the old versions of objects
941 * accept 0x57 and not 0x17. Consistency be damned. 779 * accept 0x57 and not 0x17. Consistency be damned.
942 */ 780 */
943 case 0x57: 781 case 0x57:
944 nv04_graph_set_ctx1(chan, 0x1000, 0x1000); 782 nv04_graph_set_ctx1(object, 0x1000, 0x1000);
945 return 0; 783 return 0;
946 } 784 }
947 return 1; 785 return 1;
948} 786}
949 787
950static struct nouveau_bitfield nv04_graph_intr[] = { 788static struct nouveau_omthds
789nv03_graph_gdi_omthds[] = {
790 { 0x0184, nv01_graph_mthd_bind_patt },
791 { 0x0188, nv04_graph_mthd_bind_rop },
792 { 0x018c, nv04_graph_mthd_bind_beta1 },
793 { 0x0190, nv04_graph_mthd_bind_surf_dst },
794 { 0x02fc, nv04_graph_mthd_set_operation },
795 {}
796};
797
798static struct nouveau_omthds
799nv04_graph_gdi_omthds[] = {
800 { 0x0188, nv04_graph_mthd_bind_patt },
801 { 0x018c, nv04_graph_mthd_bind_rop },
802 { 0x0190, nv04_graph_mthd_bind_beta1 },
803 { 0x0194, nv04_graph_mthd_bind_beta4 },
804 { 0x0198, nv04_graph_mthd_bind_surf2d },
805 { 0x02fc, nv04_graph_mthd_set_operation },
806 {}
807};
808
809static struct nouveau_omthds
810nv01_graph_blit_omthds[] = {
811 { 0x0184, nv01_graph_mthd_bind_chroma },
812 { 0x0188, nv01_graph_mthd_bind_clip },
813 { 0x018c, nv01_graph_mthd_bind_patt },
814 { 0x0190, nv04_graph_mthd_bind_rop },
815 { 0x0194, nv04_graph_mthd_bind_beta1 },
816 { 0x0198, nv04_graph_mthd_bind_surf_dst },
817 { 0x019c, nv04_graph_mthd_bind_surf_src },
818 { 0x02fc, nv04_graph_mthd_set_operation },
819 {}
820};
821
822static struct nouveau_omthds
823nv04_graph_blit_omthds[] = {
824 { 0x0184, nv01_graph_mthd_bind_chroma },
825 { 0x0188, nv01_graph_mthd_bind_clip },
826 { 0x018c, nv04_graph_mthd_bind_patt },
827 { 0x0190, nv04_graph_mthd_bind_rop },
828 { 0x0194, nv04_graph_mthd_bind_beta1 },
829 { 0x0198, nv04_graph_mthd_bind_beta4 },
830 { 0x019c, nv04_graph_mthd_bind_surf2d },
831 { 0x02fc, nv04_graph_mthd_set_operation },
832 {}
833};
834
835static struct nouveau_omthds
836nv04_graph_iifc_omthds[] = {
837 { 0x0188, nv01_graph_mthd_bind_chroma },
838 { 0x018c, nv01_graph_mthd_bind_clip },
839 { 0x0190, nv04_graph_mthd_bind_patt },
840 { 0x0194, nv04_graph_mthd_bind_rop },
841 { 0x0198, nv04_graph_mthd_bind_beta1 },
842 { 0x019c, nv04_graph_mthd_bind_beta4 },
843 { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
844 { 0x03e4, nv04_graph_mthd_set_operation },
845 {}
846};
847
848static struct nouveau_omthds
849nv01_graph_ifc_omthds[] = {
850 { 0x0184, nv01_graph_mthd_bind_chroma },
851 { 0x0188, nv01_graph_mthd_bind_clip },
852 { 0x018c, nv01_graph_mthd_bind_patt },
853 { 0x0190, nv04_graph_mthd_bind_rop },
854 { 0x0194, nv04_graph_mthd_bind_beta1 },
855 { 0x0198, nv04_graph_mthd_bind_surf_dst },
856 { 0x02fc, nv04_graph_mthd_set_operation },
857 {}
858};
859
860static struct nouveau_omthds
861nv04_graph_ifc_omthds[] = {
862 { 0x0184, nv01_graph_mthd_bind_chroma },
863 { 0x0188, nv01_graph_mthd_bind_clip },
864 { 0x018c, nv04_graph_mthd_bind_patt },
865 { 0x0190, nv04_graph_mthd_bind_rop },
866 { 0x0194, nv04_graph_mthd_bind_beta1 },
867 { 0x0198, nv04_graph_mthd_bind_beta4 },
868 { 0x019c, nv04_graph_mthd_bind_surf2d },
869 { 0x02fc, nv04_graph_mthd_set_operation },
870 {}
871};
872
873static struct nouveau_omthds
874nv03_graph_sifc_omthds[] = {
875 { 0x0184, nv01_graph_mthd_bind_chroma },
876 { 0x0188, nv01_graph_mthd_bind_patt },
877 { 0x018c, nv04_graph_mthd_bind_rop },
878 { 0x0190, nv04_graph_mthd_bind_beta1 },
879 { 0x0194, nv04_graph_mthd_bind_surf_dst },
880 { 0x02fc, nv04_graph_mthd_set_operation },
881 {}
882};
883
884static struct nouveau_omthds
885nv04_graph_sifc_omthds[] = {
886 { 0x0184, nv01_graph_mthd_bind_chroma },
887 { 0x0188, nv04_graph_mthd_bind_patt },
888 { 0x018c, nv04_graph_mthd_bind_rop },
889 { 0x0190, nv04_graph_mthd_bind_beta1 },
890 { 0x0194, nv04_graph_mthd_bind_beta4 },
891 { 0x0198, nv04_graph_mthd_bind_surf2d },
892 { 0x02fc, nv04_graph_mthd_set_operation },
893 {}
894};
895
896static struct nouveau_omthds
897nv03_graph_sifm_omthds[] = {
898 { 0x0188, nv01_graph_mthd_bind_patt },
899 { 0x018c, nv04_graph_mthd_bind_rop },
900 { 0x0190, nv04_graph_mthd_bind_beta1 },
901 { 0x0194, nv04_graph_mthd_bind_surf_dst },
902 { 0x0304, nv04_graph_mthd_set_operation },
903 {}
904};
905
906static struct nouveau_omthds
907nv04_graph_sifm_omthds[] = {
908 { 0x0188, nv04_graph_mthd_bind_patt },
909 { 0x018c, nv04_graph_mthd_bind_rop },
910 { 0x0190, nv04_graph_mthd_bind_beta1 },
911 { 0x0194, nv04_graph_mthd_bind_beta4 },
912 { 0x0198, nv04_graph_mthd_bind_surf2d },
913 { 0x0304, nv04_graph_mthd_set_operation },
914 {}
915};
916
917static struct nouveau_omthds
918nv04_graph_surf3d_omthds[] = {
919 { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
920 { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
921 {}
922};
923
924static struct nouveau_omthds
925nv03_graph_ttri_omthds[] = {
926 { 0x0188, nv01_graph_mthd_bind_clip },
927 { 0x018c, nv04_graph_mthd_bind_surf_color },
928 { 0x0190, nv04_graph_mthd_bind_surf_zeta },
929 {}
930};
931
932static struct nouveau_omthds
933nv01_graph_prim_omthds[] = {
934 { 0x0184, nv01_graph_mthd_bind_clip },
935 { 0x0188, nv01_graph_mthd_bind_patt },
936 { 0x018c, nv04_graph_mthd_bind_rop },
937 { 0x0190, nv04_graph_mthd_bind_beta1 },
938 { 0x0194, nv04_graph_mthd_bind_surf_dst },
939 { 0x02fc, nv04_graph_mthd_set_operation },
940 {}
941};
942
943static struct nouveau_omthds
944nv04_graph_prim_omthds[] = {
945 { 0x0184, nv01_graph_mthd_bind_clip },
946 { 0x0188, nv04_graph_mthd_bind_patt },
947 { 0x018c, nv04_graph_mthd_bind_rop },
948 { 0x0190, nv04_graph_mthd_bind_beta1 },
949 { 0x0194, nv04_graph_mthd_bind_beta4 },
950 { 0x0198, nv04_graph_mthd_bind_surf2d },
951 { 0x02fc, nv04_graph_mthd_set_operation },
952 {}
953};
954
955static int
956nv04_graph_object_ctor(struct nouveau_object *parent,
957 struct nouveau_object *engine,
958 struct nouveau_oclass *oclass, void *data, u32 size,
959 struct nouveau_object **pobject)
960{
961 struct nouveau_gpuobj *obj;
962 int ret;
963
964 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
965 16, 16, 0, &obj);
966 *pobject = nv_object(obj);
967 if (ret)
968 return ret;
969
970 nv_wo32(obj, 0x00, nv_mclass(obj));
971#ifdef __BIG_ENDIAN
972 nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
973#endif
974 nv_wo32(obj, 0x04, 0x00000000);
975 nv_wo32(obj, 0x08, 0x00000000);
976 nv_wo32(obj, 0x0c, 0x00000000);
977 return 0;
978}
979
980struct nouveau_ofuncs
981nv04_graph_ofuncs = {
982 .ctor = nv04_graph_object_ctor,
983 .dtor = _nouveau_gpuobj_dtor,
984 .init = _nouveau_gpuobj_init,
985 .fini = _nouveau_gpuobj_fini,
986 .rd32 = _nouveau_gpuobj_rd32,
987 .wr32 = _nouveau_gpuobj_wr32,
988};
989
990static struct nouveau_oclass
991nv04_graph_sclass[] = {
992 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
993 { 0x0017, &nv04_graph_ofuncs }, /* chroma */
994 { 0x0018, &nv04_graph_ofuncs }, /* pattern (nv01) */
995 { 0x0019, &nv04_graph_ofuncs }, /* clip */
996 { 0x001c, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* line */
997 { 0x001d, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* tri */
998 { 0x001e, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* rect */
999 { 0x001f, &nv04_graph_ofuncs, nv01_graph_blit_omthds },
1000 { 0x0021, &nv04_graph_ofuncs, nv01_graph_ifc_omthds },
1001 { 0x0030, &nv04_graph_ofuncs }, /* null */
1002 { 0x0036, &nv04_graph_ofuncs, nv03_graph_sifc_omthds },
1003 { 0x0037, &nv04_graph_ofuncs, nv03_graph_sifm_omthds },
1004 { 0x0038, &nv04_graph_ofuncs }, /* dvd subpicture */
1005 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
1006 { 0x0042, &nv04_graph_ofuncs }, /* surf2d */
1007 { 0x0043, &nv04_graph_ofuncs }, /* rop */
1008 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
1009 { 0x0048, &nv04_graph_ofuncs, nv03_graph_ttri_omthds },
1010 { 0x004a, &nv04_graph_ofuncs, nv04_graph_gdi_omthds },
1011 { 0x004b, &nv04_graph_ofuncs, nv03_graph_gdi_omthds },
1012 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
1013 { 0x0053, &nv04_graph_ofuncs, nv04_graph_surf3d_omthds },
1014 { 0x0054, &nv04_graph_ofuncs }, /* ttri */
1015 { 0x0055, &nv04_graph_ofuncs }, /* mtri */
1016 { 0x0057, &nv04_graph_ofuncs }, /* chroma */
1017 { 0x0058, &nv04_graph_ofuncs }, /* surf_dst */
1018 { 0x0059, &nv04_graph_ofuncs }, /* surf_src */
1019 { 0x005a, &nv04_graph_ofuncs }, /* surf_color */
1020 { 0x005b, &nv04_graph_ofuncs }, /* surf_zeta */
1021 { 0x005c, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* line */
1022 { 0x005d, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* tri */
1023 { 0x005e, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* rect */
1024 { 0x005f, &nv04_graph_ofuncs, nv04_graph_blit_omthds },
1025 { 0x0060, &nv04_graph_ofuncs, nv04_graph_iifc_omthds },
1026 { 0x0061, &nv04_graph_ofuncs, nv04_graph_ifc_omthds },
1027 { 0x0064, &nv04_graph_ofuncs }, /* iifc (nv05) */
1028 { 0x0065, &nv04_graph_ofuncs }, /* ifc (nv05) */
1029 { 0x0066, &nv04_graph_ofuncs }, /* sifc (nv05) */
1030 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
1031 { 0x0076, &nv04_graph_ofuncs, nv04_graph_sifc_omthds },
1032 { 0x0077, &nv04_graph_ofuncs, nv04_graph_sifm_omthds },
1033 {},
1034};
1035
1036/*******************************************************************************
1037 * PGRAPH context
1038 ******************************************************************************/
1039
1040static struct nv04_graph_chan *
1041nv04_graph_channel(struct nv04_graph_priv *priv)
1042{
1043 struct nv04_graph_chan *chan = NULL;
1044 if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
1045 int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
1046 if (chid < ARRAY_SIZE(priv->chan))
1047 chan = priv->chan[chid];
1048 }
1049 return chan;
1050}
1051
1052static int
1053nv04_graph_load_context(struct nv04_graph_chan *chan, int chid)
1054{
1055 struct nv04_graph_priv *priv = nv04_graph_priv(chan);
1056 int i;
1057
1058 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
1059 nv_wr32(priv, nv04_graph_ctx_regs[i], chan->nv04[i]);
1060
1061 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
1062 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
1063 nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
1064 return 0;
1065}
1066
1067static int
1068nv04_graph_unload_context(struct nv04_graph_chan *chan)
1069{
1070 struct nv04_graph_priv *priv = nv04_graph_priv(chan);
1071 int i;
1072
1073 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
1074 chan->nv04[i] = nv_rd32(priv, nv04_graph_ctx_regs[i]);
1075
1076 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
1077 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1078 return 0;
1079}
1080
1081static void
1082nv04_graph_context_switch(struct nv04_graph_priv *priv)
1083{
1084 struct nv04_graph_chan *prev = NULL;
1085 struct nv04_graph_chan *next = NULL;
1086 unsigned long flags;
1087 int chid;
1088
1089 spin_lock_irqsave(&priv->lock, flags);
1090 nv04_graph_idle(priv);
1091
1092 /* If previous context is valid, we need to save it */
1093 prev = nv04_graph_channel(priv);
1094 if (prev)
1095 nv04_graph_unload_context(prev);
1096
1097 /* load context for next channel */
1098 chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
1099 next = priv->chan[chid];
1100 if (next)
1101 nv04_graph_load_context(next, chid);
1102
1103 spin_unlock_irqrestore(&priv->lock, flags);
1104}
1105
1106static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg)
1107{
1108 int i;
1109
1110 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
1111 if (nv04_graph_ctx_regs[i] == reg)
1112 return &chan->nv04[i];
1113 }
1114
1115 return NULL;
1116}
1117
1118static int
1119nv04_graph_context_ctor(struct nouveau_object *parent,
1120 struct nouveau_object *engine,
1121 struct nouveau_oclass *oclass, void *data, u32 size,
1122 struct nouveau_object **pobject)
1123{
1124 struct nouveau_fifo_chan *fifo = (void *)parent;
1125 struct nv04_graph_priv *priv = (void *)engine;
1126 struct nv04_graph_chan *chan;
1127 unsigned long flags;
1128 int ret;
1129
1130 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
1131 *pobject = nv_object(chan);
1132 if (ret)
1133 return ret;
1134
1135 spin_lock_irqsave(&priv->lock, flags);
1136 if (priv->chan[fifo->chid]) {
1137 *pobject = nv_object(priv->chan[fifo->chid]);
1138 atomic_inc(&(*pobject)->refcount);
1139 spin_unlock_irqrestore(&priv->lock, flags);
1140 nouveau_object_destroy(&chan->base);
1141 return 1;
1142 }
1143
1144 *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
1145
1146 priv->chan[fifo->chid] = chan;
1147 chan->chid = fifo->chid;
1148 spin_unlock_irqrestore(&priv->lock, flags);
1149 return 0;
1150}
1151
1152static void
1153nv04_graph_context_dtor(struct nouveau_object *object)
1154{
1155 struct nv04_graph_priv *priv = (void *)object->engine;
1156 struct nv04_graph_chan *chan = (void *)object;
1157 unsigned long flags;
1158
1159 spin_lock_irqsave(&priv->lock, flags);
1160 priv->chan[chan->chid] = NULL;
1161 spin_unlock_irqrestore(&priv->lock, flags);
1162
1163 nouveau_object_destroy(&chan->base);
1164}
1165
1166static int
1167nv04_graph_context_fini(struct nouveau_object *object, bool suspend)
1168{
1169 struct nv04_graph_priv *priv = (void *)object->engine;
1170 struct nv04_graph_chan *chan = (void *)object;
1171 unsigned long flags;
1172
1173 spin_lock_irqsave(&priv->lock, flags);
1174 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1175 if (nv04_graph_channel(priv) == chan)
1176 nv04_graph_unload_context(chan);
1177 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1178 spin_unlock_irqrestore(&priv->lock, flags);
1179
1180 return nouveau_object_fini(&chan->base, suspend);
1181}
1182
1183static struct nouveau_oclass
1184nv04_graph_cclass = {
1185 .handle = NV_ENGCTX(GR, 0x04),
1186 .ofuncs = &(struct nouveau_ofuncs) {
1187 .ctor = nv04_graph_context_ctor,
1188 .dtor = nv04_graph_context_dtor,
1189 .init = nouveau_object_init,
1190 .fini = nv04_graph_context_fini,
1191 },
1192};
1193
1194/*******************************************************************************
1195 * PGRAPH engine/subdev functions
1196 ******************************************************************************/
1197
1198bool
1199nv04_graph_idle(void *obj)
1200{
1201 struct nouveau_graph *graph = nouveau_graph(obj);
1202 u32 mask = 0xffffffff;
1203
1204 if (nv_device(obj)->card_type == NV_40)
1205 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1206
1207 if (!nv_wait(graph, NV04_PGRAPH_STATUS, mask, 0)) {
1208 nv_error(graph, "idle timed out with status 0x%08x\n",
1209 nv_rd32(graph, NV04_PGRAPH_STATUS));
1210 return false;
1211 }
1212
1213 return true;
1214}
1215
1216static struct nouveau_bitfield
1217nv04_graph_intr_name[] = {
951 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, 1218 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
952 {} 1219 {}
953}; 1220};
954 1221
955static struct nouveau_bitfield nv04_graph_nstatus[] = { 1222static struct nouveau_bitfield
1223nv04_graph_nstatus[] = {
956 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, 1224 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
957 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, 1225 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
958 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, 1226 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
@@ -960,7 +1228,8 @@ static struct nouveau_bitfield nv04_graph_nstatus[] = {
960 {} 1228 {}
961}; 1229};
962 1230
963struct nouveau_bitfield nv04_graph_nsource[] = { 1231struct nouveau_bitfield
1232nv04_graph_nsource[] = {
964 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, 1233 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
965 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, 1234 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
966 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, 1235 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
@@ -984,343 +1253,135 @@ struct nouveau_bitfield nv04_graph_nsource[] = {
984}; 1253};
985 1254
986static void 1255static void
987nv04_graph_context_switch(struct drm_device *dev) 1256nv04_graph_intr(struct nouveau_subdev *subdev)
988{ 1257{
989 struct drm_nouveau_private *dev_priv = dev->dev_private; 1258 struct nv04_graph_priv *priv = (void *)subdev;
990 struct nouveau_channel *chan = NULL; 1259 struct nv04_graph_chan *chan = NULL;
991 int chid; 1260 struct nouveau_namedb *namedb = NULL;
992 1261 struct nouveau_handle *handle = NULL;
993 nouveau_wait_for_idle(dev); 1262 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
994 1263 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
995 /* If previous context is valid, we need to save it */ 1264 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
996 nv04_graph_unload_context(dev); 1265 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
1266 u32 chid = (addr & 0x0f000000) >> 24;
1267 u32 subc = (addr & 0x0000e000) >> 13;
1268 u32 mthd = (addr & 0x00001ffc);
1269 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
1270 u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
1271 u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
1272 u32 show = stat;
1273 unsigned long flags;
997 1274
998 /* Load context for next channel */ 1275 spin_lock_irqsave(&priv->lock, flags);
999 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 1276 chan = priv->chan[chid];
1000 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
1001 chan = dev_priv->channels.ptr[chid];
1002 if (chan) 1277 if (chan)
1003 nv04_graph_load_context(chan); 1278 namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1004} 1279 spin_unlock_irqrestore(&priv->lock, flags);
1005 1280
1006static void 1281 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1007nv04_graph_isr(struct drm_device *dev) 1282 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1008{ 1283 handle = nouveau_namedb_get_vinst(namedb, inst);
1009 u32 stat; 1284 if (handle && !nv_call(handle->object, mthd, data))
1010 1285 show &= ~NV_PGRAPH_INTR_NOTIFY;
1011 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1012 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1013 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1014 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1015 u32 chid = (addr & 0x0f000000) >> 24;
1016 u32 subc = (addr & 0x0000e000) >> 13;
1017 u32 mthd = (addr & 0x00001ffc);
1018 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1019 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1020 u32 show = stat;
1021
1022 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1023 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1024 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1025 show &= ~NV_PGRAPH_INTR_NOTIFY;
1026 }
1027 } 1286 }
1287 }
1028 1288
1029 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { 1289 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1030 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 1290 nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1031 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1291 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1032 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1292 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1033 nv04_graph_context_switch(dev); 1293 nv04_graph_context_switch(priv);
1034 } 1294 }
1035 1295
1036 nv_wr32(dev, NV03_PGRAPH_INTR, stat); 1296 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
1037 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); 1297 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
1038 1298
1039 if (show && nouveau_ratelimit()) { 1299 if (show) {
1040 NV_INFO(dev, "PGRAPH -"); 1300 nv_error(priv, "");
1041 nouveau_bitfield_print(nv04_graph_intr, show); 1301 nouveau_bitfield_print(nv04_graph_intr_name, show);
1042 printk(" nsource:"); 1302 printk(" nsource:");
1043 nouveau_bitfield_print(nv04_graph_nsource, nsource); 1303 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1044 printk(" nstatus:"); 1304 printk(" nstatus:");
1045 nouveau_bitfield_print(nv04_graph_nstatus, nstatus); 1305 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1046 printk("\n"); 1306 printk("\n");
1047 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " 1307 nv_error(priv, "ch %d/%d class 0x%04x "
1048 "mthd 0x%04x data 0x%08x\n", 1308 "mthd 0x%04x data 0x%08x\n",
1049 chid, subc, class, mthd, data); 1309 chid, subc, class, mthd, data);
1050 }
1051 } 1310 }
1311
1312 nouveau_namedb_put(handle);
1052} 1313}
1053 1314
1054static void 1315static int
1055nv04_graph_destroy(struct drm_device *dev, int engine) 1316nv04_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1317 struct nouveau_oclass *oclass, void *data, u32 size,
1318 struct nouveau_object **pobject)
1056{ 1319{
1057 struct nv04_graph_engine *pgraph = nv_engine(dev, engine); 1320 struct nv04_graph_priv *priv;
1321 int ret;
1058 1322
1059 nouveau_irq_unregister(dev, 12); 1323 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
1324 *pobject = nv_object(priv);
1325 if (ret)
1326 return ret;
1060 1327
1061 NVOBJ_ENGINE_DEL(dev, GR); 1328 nv_subdev(priv)->unit = 0x00001000;
1062 kfree(pgraph); 1329 nv_subdev(priv)->intr = nv04_graph_intr;
1330 nv_engine(priv)->cclass = &nv04_graph_cclass;
1331 nv_engine(priv)->sclass = nv04_graph_sclass;
1332 spin_lock_init(&priv->lock);
1333 return 0;
1063} 1334}
1064 1335
1065int 1336static int
1066nv04_graph_create(struct drm_device *dev) 1337nv04_graph_init(struct nouveau_object *object)
1067{ 1338{
1068 struct nv04_graph_engine *pgraph; 1339 struct nouveau_engine *engine = nv_engine(object);
1069 1340 struct nv04_graph_priv *priv = (void *)engine;
1070 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); 1341 int ret;
1071 if (!pgraph) 1342
1072 return -ENOMEM; 1343 ret = nouveau_graph_init(&priv->base);
1073 1344 if (ret)
1074 pgraph->base.destroy = nv04_graph_destroy; 1345 return ret;
1075 pgraph->base.init = nv04_graph_init;
1076 pgraph->base.fini = nv04_graph_fini;
1077 pgraph->base.context_new = nv04_graph_context_new;
1078 pgraph->base.context_del = nv04_graph_context_del;
1079 pgraph->base.object_new = nv04_graph_object_new;
1080
1081 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1082 nouveau_irq_register(dev, 12, nv04_graph_isr);
1083
1084 /* dvd subpicture */
1085 NVOBJ_CLASS(dev, 0x0038, GR);
1086
1087 /* m2mf */
1088 NVOBJ_CLASS(dev, 0x0039, GR);
1089
1090 /* nv03 gdirect */
1091 NVOBJ_CLASS(dev, 0x004b, GR);
1092 NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
1093 NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
1094 NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
1095 NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
1096 NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
1097
1098 /* nv04 gdirect */
1099 NVOBJ_CLASS(dev, 0x004a, GR);
1100 NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1101 NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
1102 NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
1103 NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
1104 NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
1105 NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
1106
1107 /* nv01 imageblit */
1108 NVOBJ_CLASS(dev, 0x001f, GR);
1109 NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
1110 NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
1111 NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1112 NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
1113 NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
1114 NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
1115 NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
1116 NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
1117
1118 /* nv04 imageblit */
1119 NVOBJ_CLASS(dev, 0x005f, GR);
1120 NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
1121 NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
1122 NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1123 NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
1124 NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
1125 NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
1126 NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
1127 NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
1128
1129 /* nv04 iifc */
1130 NVOBJ_CLASS(dev, 0x0060, GR);
1131 NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
1132 NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
1133 NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
1134 NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
1135 NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
1136 NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
1137 NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
1138 NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
1139
1140 /* nv05 iifc */
1141 NVOBJ_CLASS(dev, 0x0064, GR);
1142
1143 /* nv01 ifc */
1144 NVOBJ_CLASS(dev, 0x0021, GR);
1145 NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
1146 NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
1147 NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1148 NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
1149 NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
1150 NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
1151 NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
1152
1153 /* nv04 ifc */
1154 NVOBJ_CLASS(dev, 0x0061, GR);
1155 NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
1156 NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
1157 NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1158 NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
1159 NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
1160 NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
1161 NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
1162 NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
1163
1164 /* nv05 ifc */
1165 NVOBJ_CLASS(dev, 0x0065, GR);
1166
1167 /* nv03 sifc */
1168 NVOBJ_CLASS(dev, 0x0036, GR);
1169 NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
1170 NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1171 NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
1172 NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
1173 NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
1174 NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
1175
1176 /* nv04 sifc */
1177 NVOBJ_CLASS(dev, 0x0076, GR);
1178 NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
1179 NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1180 NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
1181 NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
1182 NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
1183 NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
1184 NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
1185
1186 /* nv05 sifc */
1187 NVOBJ_CLASS(dev, 0x0066, GR);
1188
1189 /* nv03 sifm */
1190 NVOBJ_CLASS(dev, 0x0037, GR);
1191 NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1192 NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
1193 NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
1194 NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
1195 NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
1196
1197 /* nv04 sifm */
1198 NVOBJ_CLASS(dev, 0x0077, GR);
1199 NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1200 NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
1201 NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
1202 NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
1203 NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
1204 NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
1205
1206 /* null */
1207 NVOBJ_CLASS(dev, 0x0030, GR);
1208
1209 /* surf2d */
1210 NVOBJ_CLASS(dev, 0x0042, GR);
1211
1212 /* rop */
1213 NVOBJ_CLASS(dev, 0x0043, GR);
1214
1215 /* beta1 */
1216 NVOBJ_CLASS(dev, 0x0012, GR);
1217
1218 /* beta4 */
1219 NVOBJ_CLASS(dev, 0x0072, GR);
1220
1221 /* cliprect */
1222 NVOBJ_CLASS(dev, 0x0019, GR);
1223
1224 /* nv01 pattern */
1225 NVOBJ_CLASS(dev, 0x0018, GR);
1226
1227 /* nv04 pattern */
1228 NVOBJ_CLASS(dev, 0x0044, GR);
1229
1230 /* swzsurf */
1231 NVOBJ_CLASS(dev, 0x0052, GR);
1232
1233 /* surf3d */
1234 NVOBJ_CLASS(dev, 0x0053, GR);
1235 NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
1236 NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
1237
1238 /* nv03 tex_tri */
1239 NVOBJ_CLASS(dev, 0x0048, GR);
1240 NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
1241 NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
1242 NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
1243
1244 /* tex_tri */
1245 NVOBJ_CLASS(dev, 0x0054, GR);
1246
1247 /* multitex_tri */
1248 NVOBJ_CLASS(dev, 0x0055, GR);
1249
1250 /* nv01 chroma */
1251 NVOBJ_CLASS(dev, 0x0017, GR);
1252
1253 /* nv04 chroma */
1254 NVOBJ_CLASS(dev, 0x0057, GR);
1255
1256 /* surf_dst */
1257 NVOBJ_CLASS(dev, 0x0058, GR);
1258
1259 /* surf_src */
1260 NVOBJ_CLASS(dev, 0x0059, GR);
1261
1262 /* surf_color */
1263 NVOBJ_CLASS(dev, 0x005a, GR);
1264
1265 /* surf_zeta */
1266 NVOBJ_CLASS(dev, 0x005b, GR);
1267
1268 /* nv01 line */
1269 NVOBJ_CLASS(dev, 0x001c, GR);
1270 NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
1271 NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1272 NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
1273 NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
1274 NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
1275 NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
1276
1277 /* nv04 line */
1278 NVOBJ_CLASS(dev, 0x005c, GR);
1279 NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
1280 NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1281 NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
1282 NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
1283 NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
1284 NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
1285 NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
1286
1287 /* nv01 tri */
1288 NVOBJ_CLASS(dev, 0x001d, GR);
1289 NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
1290 NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1291 NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
1292 NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
1293 NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
1294 NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
1295
1296 /* nv04 tri */
1297 NVOBJ_CLASS(dev, 0x005d, GR);
1298 NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
1299 NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1300 NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
1301 NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
1302 NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
1303 NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
1304 NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
1305
1306 /* nv01 rect */
1307 NVOBJ_CLASS(dev, 0x001e, GR);
1308 NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
1309 NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1310 NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
1311 NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
1312 NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
1313 NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
1314
1315 /* nv04 rect */
1316 NVOBJ_CLASS(dev, 0x005e, GR);
1317 NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
1318 NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1319 NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
1320 NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
1321 NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
1322 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
1323 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
1324 1346
1347 /* Enable PGRAPH interrupts */
1348 nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
1349 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1350
1351 nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
1352 nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
1353 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
1354 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
1355 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
1356 /*1231C000 blob, 001 haiku*/
1357 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
1358 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
1359 /*0x72111100 blob , 01 haiku*/
1360 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
1361 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
1362 /*haiku same*/
1363
1364 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
1365 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
1366 /*haiku and blob 10d4*/
1367
1368 nv_wr32(priv, NV04_PGRAPH_STATE , 0xFFFFFFFF);
1369 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
1370 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1371
1372 /* These don't belong here, they're part of a per-channel context */
1373 nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
1374 nv_wr32(priv, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
1325 return 0; 1375 return 0;
1326} 1376}
1377
1378struct nouveau_oclass
1379nv04_graph_oclass = {
1380 .handle = NV_ENGINE(GR, 0x04),
1381 .ofuncs = &(struct nouveau_ofuncs) {
1382 .ctor = nv04_graph_ctor,
1383 .dtor = _nouveau_graph_dtor,
1384 .init = nv04_graph_init,
1385 .fini = _nouveau_graph_fini,
1386 },
1387};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index d006658e6468..ce38196634df 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -22,27 +22,28 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include "drm.h" 26#include <core/class.h>
27#include <nouveau_drm.h> 27#include <core/handle.h>
28#include "nouveau_drv.h" 28
29#include "nouveau_util.h" 29#include <subdev/fb.h>
30 30
31struct nv10_graph_engine { 31#include <engine/fifo.h>
32 struct nouveau_exec_engine base; 32#include <engine/graph.h>
33}; 33
34#include "regs.h"
34 35
35struct pipe_state { 36struct pipe_state {
36 uint32_t pipe_0x0000[0x040/4]; 37 u32 pipe_0x0000[0x040/4];
37 uint32_t pipe_0x0040[0x010/4]; 38 u32 pipe_0x0040[0x010/4];
38 uint32_t pipe_0x0200[0x0c0/4]; 39 u32 pipe_0x0200[0x0c0/4];
39 uint32_t pipe_0x4400[0x080/4]; 40 u32 pipe_0x4400[0x080/4];
40 uint32_t pipe_0x6400[0x3b0/4]; 41 u32 pipe_0x6400[0x3b0/4];
41 uint32_t pipe_0x6800[0x2f0/4]; 42 u32 pipe_0x6800[0x2f0/4];
42 uint32_t pipe_0x6c00[0x030/4]; 43 u32 pipe_0x6c00[0x030/4];
43 uint32_t pipe_0x7000[0x130/4]; 44 u32 pipe_0x7000[0x130/4];
44 uint32_t pipe_0x7400[0x0c0/4]; 45 u32 pipe_0x7400[0x0c0/4];
45 uint32_t pipe_0x7800[0x0c0/4]; 46 u32 pipe_0x7800[0x0c0/4];
46}; 47};
47 48
48static int nv10_graph_ctx_regs[] = { 49static int nv10_graph_ctx_regs[] = {
@@ -388,117 +389,322 @@ static int nv17_graph_ctx_regs[] = {
388 0x00400a04, 389 0x00400a04,
389}; 390};
390 391
391struct graph_state { 392struct nv10_graph_priv {
393 struct nouveau_graph base;
394 struct nv10_graph_chan *chan[32];
395 spinlock_t lock;
396};
397
398struct nv10_graph_chan {
399 struct nouveau_object base;
400 int chid;
392 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)]; 401 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
393 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)]; 402 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
394 struct pipe_state pipe_state; 403 struct pipe_state pipe_state;
395 uint32_t lma_window[4]; 404 u32 lma_window[4];
396}; 405};
397 406
398#define PIPE_SAVE(dev, state, addr) \ 407
408static inline struct nv10_graph_priv *
409nv10_graph_priv(struct nv10_graph_chan *chan)
410{
411 return (void *)nv_object(chan)->engine;
412}
413
414/*******************************************************************************
415 * Graphics object classes
416 ******************************************************************************/
417
418#define PIPE_SAVE(priv, state, addr) \
399 do { \ 419 do { \
400 int __i; \ 420 int __i; \
401 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ 421 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
402 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ 422 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
403 state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \ 423 state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
404 } while (0) 424 } while (0)
405 425
406#define PIPE_RESTORE(dev, state, addr) \ 426#define PIPE_RESTORE(priv, state, addr) \
407 do { \ 427 do { \
408 int __i; \ 428 int __i; \
409 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ 429 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
410 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ 430 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
411 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \ 431 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
412 } while (0) 432 } while (0)
413 433
414static void nv10_graph_save_pipe(struct nouveau_channel *chan) 434static struct nouveau_oclass
435nv10_graph_sclass[] = {
436 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
437 { 0x0019, &nv04_graph_ofuncs }, /* clip */
438 { 0x0030, &nv04_graph_ofuncs }, /* null */
439 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
440 { 0x0043, &nv04_graph_ofuncs }, /* rop */
441 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
442 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
443 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
444 { 0x005f, &nv04_graph_ofuncs }, /* blit */
445 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
446 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
447 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
448 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
449 { 0x009f, &nv04_graph_ofuncs }, /* blit */
450 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
451 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
452 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
453 { 0x0056, &nv04_graph_ofuncs }, /* celcius */
454 {},
455};
456
457static struct nouveau_oclass
458nv15_graph_sclass[] = {
459 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
460 { 0x0019, &nv04_graph_ofuncs }, /* clip */
461 { 0x0030, &nv04_graph_ofuncs }, /* null */
462 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
463 { 0x0043, &nv04_graph_ofuncs }, /* rop */
464 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
465 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
466 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
467 { 0x005f, &nv04_graph_ofuncs }, /* blit */
468 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
469 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
470 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
471 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
472 { 0x009f, &nv04_graph_ofuncs }, /* blit */
473 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
474 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
475 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
476 { 0x0096, &nv04_graph_ofuncs }, /* celcius */
477 {},
478};
479
480static int
481nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
482 void *args, u32 size)
483{
484 struct nv10_graph_chan *chan = (void *)object->parent;
485 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
486 struct pipe_state *pipe = &chan->pipe_state;
487 u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
488 u32 xfmode0, xfmode1;
489 u32 data = *(u32 *)args;
490 int i;
491
492 chan->lma_window[(mthd - 0x1638) / 4] = data;
493
494 if (mthd != 0x1644)
495 return 0;
496
497 nv04_graph_idle(priv);
498
499 PIPE_SAVE(priv, pipe_0x0040, 0x0040);
500 PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
501
502 PIPE_RESTORE(priv, chan->lma_window, 0x6790);
503
504 nv04_graph_idle(priv);
505
506 xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
507 xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
508
509 PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
510 PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
511 PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
512 PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
513
514 nv04_graph_idle(priv);
515
516 nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
517 nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
518 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
519 for (i = 0; i < 4; i++)
520 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
521 for (i = 0; i < 4; i++)
522 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
523
524 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
525 for (i = 0; i < 3; i++)
526 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
527
528 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
529 for (i = 0; i < 3; i++)
530 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
531
532 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
533 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
534
535 PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
536
537 nv04_graph_idle(priv);
538
539 PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
540
541 nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
542 nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
543
544 PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
545 PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
546 PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
547 PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
548
549 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
550 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
551
552 nv04_graph_idle(priv);
553
554 return 0;
555}
556
557static int
558nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
559 void *args, u32 size)
415{ 560{
416 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; 561 struct nv10_graph_chan *chan = (void *)object->parent;
417 struct pipe_state *pipe = &pgraph_ctx->pipe_state; 562 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
418 struct drm_device *dev = chan->dev; 563
419 564 nv04_graph_idle(priv);
420 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); 565
421 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); 566 nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
422 PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400); 567 nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
423 PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800); 568 return 0;
424 PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
425 PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
426 PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
427 PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
428 PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
429 PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
430} 569}
431 570
432static void nv10_graph_load_pipe(struct nouveau_channel *chan) 571static struct nouveau_omthds
572nv17_celcius_omthds[] = {
573 { 0x1638, nv17_graph_mthd_lma_window },
574 { 0x163c, nv17_graph_mthd_lma_window },
575 { 0x1640, nv17_graph_mthd_lma_window },
576 { 0x1644, nv17_graph_mthd_lma_window },
577 { 0x1658, nv17_graph_mthd_lma_enable },
578 {}
579};
580
581static struct nouveau_oclass
582nv17_graph_sclass[] = {
583 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
584 { 0x0019, &nv04_graph_ofuncs }, /* clip */
585 { 0x0030, &nv04_graph_ofuncs }, /* null */
586 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
587 { 0x0043, &nv04_graph_ofuncs }, /* rop */
588 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
589 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
590 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
591 { 0x005f, &nv04_graph_ofuncs }, /* blit */
592 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
593 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
594 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
595 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
596 { 0x009f, &nv04_graph_ofuncs }, /* blit */
597 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
598 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
599 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
600 { 0x0099, &nv04_graph_ofuncs, nv17_celcius_omthds },
601 {},
602};
603
604/*******************************************************************************
605 * PGRAPH context
606 ******************************************************************************/
607
608static struct nv10_graph_chan *
609nv10_graph_channel(struct nv10_graph_priv *priv)
433{ 610{
434 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; 611 struct nv10_graph_chan *chan = NULL;
435 struct pipe_state *pipe = &pgraph_ctx->pipe_state; 612 if (nv_rd32(priv, 0x400144) & 0x00010000) {
436 struct drm_device *dev = chan->dev; 613 int chid = nv_rd32(priv, 0x400148) >> 24;
437 uint32_t xfmode0, xfmode1; 614 if (chid < ARRAY_SIZE(priv->chan))
615 chan = priv->chan[chid];
616 }
617 return chan;
618}
619
620static void
621nv10_graph_save_pipe(struct nv10_graph_chan *chan)
622{
623 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
624 struct pipe_state *pipe = &chan->pipe_state;
625
626 PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
627 PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
628 PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
629 PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
630 PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
631 PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
632 PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
633 PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
634 PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
635 PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
636}
637
638static void
639nv10_graph_load_pipe(struct nv10_graph_chan *chan)
640{
641 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
642 struct pipe_state *pipe = &chan->pipe_state;
643 u32 xfmode0, xfmode1;
438 int i; 644 int i;
439 645
440 nouveau_wait_for_idle(dev); 646 nv04_graph_idle(priv);
441 /* XXX check haiku comments */ 647 /* XXX check haiku comments */
442 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0); 648 xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
443 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1); 649 xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
444 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000); 650 nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
445 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000); 651 nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
446 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); 652 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
447 for (i = 0; i < 4; i++) 653 for (i = 0; i < 4; i++)
448 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); 654 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
449 for (i = 0; i < 4; i++) 655 for (i = 0; i < 4; i++)
450 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); 656 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
451 657
452 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); 658 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
453 for (i = 0; i < 3; i++) 659 for (i = 0; i < 3; i++)
454 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); 660 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
455 661
456 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); 662 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
457 for (i = 0; i < 3; i++) 663 for (i = 0; i < 3; i++)
458 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); 664 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
459 665
460 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); 666 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
461 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); 667 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
462 668
463 669
464 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200); 670 PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
465 nouveau_wait_for_idle(dev); 671 nv04_graph_idle(priv);
466 672
467 /* restore XFMODE */ 673 /* restore XFMODE */
468 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); 674 nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
469 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); 675 nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
470 PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400); 676 PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
471 PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800); 677 PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
472 PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00); 678 PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
473 PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000); 679 PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
474 PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400); 680 PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
475 PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800); 681 PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
476 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400); 682 PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
477 PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000); 683 PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
478 PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040); 684 PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
479 nouveau_wait_for_idle(dev); 685 nv04_graph_idle(priv);
480} 686}
481 687
482static void nv10_graph_create_pipe(struct nouveau_channel *chan) 688static void
689nv10_graph_create_pipe(struct nv10_graph_chan *chan)
483{ 690{
484 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR]; 691 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
485 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; 692 struct pipe_state *pipe_state = &chan->pipe_state;
486 struct drm_device *dev = chan->dev; 693 u32 *pipe_state_addr;
487 uint32_t *fifo_pipe_state_addr;
488 int i; 694 int i;
489#define PIPE_INIT(addr) \ 695#define PIPE_INIT(addr) \
490 do { \ 696 do { \
491 fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ 697 pipe_state_addr = pipe_state->pipe_##addr; \
492 } while (0) 698 } while (0)
493#define PIPE_INIT_END(addr) \ 699#define PIPE_INIT_END(addr) \
494 do { \ 700 do { \
495 uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \ 701 u32 *__end_addr = pipe_state->pipe_##addr + \
496 ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \ 702 ARRAY_SIZE(pipe_state->pipe_##addr); \
497 if (fifo_pipe_state_addr != __end_addr) \ 703 if (pipe_state_addr != __end_addr) \
498 NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \ 704 nv_error(priv, "incomplete pipe init for 0x%x : %p/%p\n", \
499 addr, fifo_pipe_state_addr, __end_addr); \ 705 addr, pipe_state_addr, __end_addr); \
500 } while (0) 706 } while (0)
501#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value 707#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
502 708
503 PIPE_INIT(0x0200); 709 PIPE_INIT(0x0200);
504 for (i = 0; i < 48; i++) 710 for (i = 0; i < 48; i++)
@@ -634,34 +840,36 @@ static void nv10_graph_create_pipe(struct nouveau_channel *chan)
634#undef NV_WRITE_PIPE_INIT 840#undef NV_WRITE_PIPE_INIT
635} 841}
636 842
637static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) 843static int
844nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
638{ 845{
639 int i; 846 int i;
640 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) { 847 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
641 if (nv10_graph_ctx_regs[i] == reg) 848 if (nv10_graph_ctx_regs[i] == reg)
642 return i; 849 return i;
643 } 850 }
644 NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg); 851 nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg);
645 return -1; 852 return -1;
646} 853}
647 854
648static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) 855static int
856nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
649{ 857{
650 int i; 858 int i;
651 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) { 859 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
652 if (nv17_graph_ctx_regs[i] == reg) 860 if (nv17_graph_ctx_regs[i] == reg)
653 return i; 861 return i;
654 } 862 }
655 NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg); 863 nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg);
656 return -1; 864 return -1;
657} 865}
658 866
659static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan, 867static void
660 uint32_t inst) 868nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst)
661{ 869{
662 struct drm_device *dev = chan->dev; 870 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
663 uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4]; 871 u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
664 uint32_t ctx_user, ctx_switch[5]; 872 u32 ctx_user, ctx_switch[5];
665 int i, subchan = -1; 873 int i, subchan = -1;
666 874
667 /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state 875 /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
@@ -671,7 +879,7 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
671 879
672 /* Look for a celsius object */ 880 /* Look for a celsius object */
673 for (i = 0; i < 8; i++) { 881 for (i = 0; i < 8; i++) {
674 int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff; 882 int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
675 883
676 if (class == 0x56 || class == 0x96 || class == 0x99) { 884 if (class == 0x56 || class == 0x96 || class == 0x99) {
677 subchan = i; 885 subchan = i;
@@ -683,168 +891,158 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
683 return; 891 return;
684 892
685 /* Save the current ctx object */ 893 /* Save the current ctx object */
686 ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER); 894 ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
687 for (i = 0; i < 5; i++) 895 for (i = 0; i < 5; i++)
688 ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i)); 896 ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
689 897
690 /* Save the FIFO state */ 898 /* Save the FIFO state */
691 st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2); 899 st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
692 st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL); 900 st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
693 st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH); 901 st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
694 fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR); 902 fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
695 903
696 for (i = 0; i < ARRAY_SIZE(fifo); i++) 904 for (i = 0; i < ARRAY_SIZE(fifo); i++)
697 fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i); 905 fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
698 906
699 /* Switch to the celsius subchannel */ 907 /* Switch to the celsius subchannel */
700 for (i = 0; i < 5; i++) 908 for (i = 0; i < 5; i++)
701 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), 909 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
702 nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i))); 910 nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
703 nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13); 911 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
704 912
705 /* Inject NV10TCL_DMA_VTXBUF */ 913 /* Inject NV10TCL_DMA_VTXBUF */
706 nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0); 914 nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
707 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 915 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
708 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); 916 0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
709 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); 917 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
710 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); 918 nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
711 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); 919 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
712 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); 920 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
713 921
714 /* Restore the FIFO state */ 922 /* Restore the FIFO state */
715 for (i = 0; i < ARRAY_SIZE(fifo); i++) 923 for (i = 0; i < ARRAY_SIZE(fifo); i++)
716 nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]); 924 nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
717 925
718 nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr); 926 nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
719 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2); 927 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
720 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl); 928 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
721 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh); 929 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
722 930
723 /* Restore the current ctx object */ 931 /* Restore the current ctx object */
724 for (i = 0; i < 5; i++) 932 for (i = 0; i < 5; i++)
725 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]); 933 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
726 nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user); 934 nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
727} 935}
728 936
729static int 937static int
730nv10_graph_load_context(struct nouveau_channel *chan) 938nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
731{ 939{
732 struct drm_device *dev = chan->dev; 940 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
733 struct drm_nouveau_private *dev_priv = dev->dev_private; 941 u32 inst;
734 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
735 uint32_t tmp;
736 int i; 942 int i;
737 943
738 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 944 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
739 nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]); 945 nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
740 if (dev_priv->chipset >= 0x17) { 946
947 if (nv_device(priv)->chipset >= 0x17) {
741 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) 948 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
742 nv_wr32(dev, nv17_graph_ctx_regs[i], 949 nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
743 pgraph_ctx->nv17[i]);
744 } 950 }
745 951
746 nv10_graph_load_pipe(chan); 952 nv10_graph_load_pipe(chan);
747 nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1) 953
748 & 0xffff)); 954 inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
749 955 nv10_graph_load_dma_vtxbuf(chan, chid, inst);
750 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); 956
751 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER); 957 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
752 nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24); 958 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
753 tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2); 959 nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
754 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
755 return 0; 960 return 0;
756} 961}
757 962
758static int 963static int
759nv10_graph_unload_context(struct drm_device *dev) 964nv10_graph_unload_context(struct nv10_graph_chan *chan)
760{ 965{
761 struct drm_nouveau_private *dev_priv = dev->dev_private; 966 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
762 struct nouveau_channel *chan;
763 struct graph_state *ctx;
764 uint32_t tmp;
765 int i; 967 int i;
766 968
767 chan = nv10_graph_channel(dev);
768 if (!chan)
769 return 0;
770 ctx = chan->engctx[NVOBJ_ENGINE_GR];
771
772 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 969 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
773 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]); 970 chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
774 971
775 if (dev_priv->chipset >= 0x17) { 972 if (nv_device(priv)->chipset >= 0x17) {
776 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) 973 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
777 ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]); 974 chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
778 } 975 }
779 976
780 nv10_graph_save_pipe(chan); 977 nv10_graph_save_pipe(chan);
781 978
782 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); 979 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
783 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 980 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
784 tmp |= 31 << 24;
785 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
786 return 0; 981 return 0;
787} 982}
788 983
789static void 984static void
790nv10_graph_context_switch(struct drm_device *dev) 985nv10_graph_context_switch(struct nv10_graph_priv *priv)
791{ 986{
792 struct drm_nouveau_private *dev_priv = dev->dev_private; 987 struct nv10_graph_chan *prev = NULL;
793 struct nouveau_channel *chan = NULL; 988 struct nv10_graph_chan *next = NULL;
989 unsigned long flags;
794 int chid; 990 int chid;
795 991
796 nouveau_wait_for_idle(dev); 992 spin_lock_irqsave(&priv->lock, flags);
993 nv04_graph_idle(priv);
797 994
798 /* If previous context is valid, we need to save it */ 995 /* If previous context is valid, we need to save it */
799 nv10_graph_unload_context(dev); 996 prev = nv10_graph_channel(priv);
997 if (prev)
998 nv10_graph_unload_context(prev);
999
1000 /* load context for next channel */
1001 chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
1002 next = priv->chan[chid];
1003 if (next)
1004 nv10_graph_load_context(next, chid);
800 1005
801 /* Load context for next channel */ 1006 spin_unlock_irqrestore(&priv->lock, flags);
802 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
803 chan = dev_priv->channels.ptr[chid];
804 if (chan && chan->engctx[NVOBJ_ENGINE_GR])
805 nv10_graph_load_context(chan);
806} 1007}
807 1008
808#define NV_WRITE_CTX(reg, val) do { \ 1009#define NV_WRITE_CTX(reg, val) do { \
809 int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ 1010 int offset = nv10_graph_ctx_regs_find_offset(priv, reg); \
810 if (offset > 0) \ 1011 if (offset > 0) \
811 pgraph_ctx->nv10[offset] = val; \ 1012 chan->nv10[offset] = val; \
812 } while (0) 1013 } while (0)
813 1014
814#define NV17_WRITE_CTX(reg, val) do { \ 1015#define NV17_WRITE_CTX(reg, val) do { \
815 int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \ 1016 int offset = nv17_graph_ctx_regs_find_offset(priv, reg); \
816 if (offset > 0) \ 1017 if (offset > 0) \
817 pgraph_ctx->nv17[offset] = val; \ 1018 chan->nv17[offset] = val; \
818 } while (0) 1019 } while (0)
819 1020
820struct nouveau_channel *
821nv10_graph_channel(struct drm_device *dev)
822{
823 struct drm_nouveau_private *dev_priv = dev->dev_private;
824 int chid = 31;
825
826 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
827 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
828
829 if (chid >= 31)
830 return NULL;
831
832 return dev_priv->channels.ptr[chid];
833}
834
835static int 1021static int
836nv10_graph_context_new(struct nouveau_channel *chan, int engine) 1022nv10_graph_context_ctor(struct nouveau_object *parent,
1023 struct nouveau_object *engine,
1024 struct nouveau_oclass *oclass, void *data, u32 size,
1025 struct nouveau_object **pobject)
837{ 1026{
838 struct drm_device *dev = chan->dev; 1027 struct nouveau_fifo_chan *fifo = (void *)parent;
839 struct drm_nouveau_private *dev_priv = dev->dev_private; 1028 struct nv10_graph_priv *priv = (void *)engine;
840 struct graph_state *pgraph_ctx; 1029 struct nv10_graph_chan *chan;
841 1030 unsigned long flags;
842 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id); 1031 int ret;
843 1032
844 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL); 1033 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
845 if (pgraph_ctx == NULL) 1034 *pobject = nv_object(chan);
846 return -ENOMEM; 1035 if (ret)
847 chan->engctx[engine] = pgraph_ctx; 1036 return ret;
1037
1038 spin_lock_irqsave(&priv->lock, flags);
1039 if (priv->chan[fifo->chid]) {
1040 *pobject = nv_object(priv->chan[fifo->chid]);
1041 atomic_inc(&(*pobject)->refcount);
1042 spin_unlock_irqrestore(&priv->lock, flags);
1043 nouveau_object_destroy(&chan->base);
1044 return 1;
1045 }
848 1046
849 NV_WRITE_CTX(0x00400e88, 0x08000000); 1047 NV_WRITE_CTX(0x00400e88, 0x08000000);
850 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); 1048 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -853,212 +1051,91 @@ nv10_graph_context_new(struct nouveau_channel *chan, int engine)
853 NV_WRITE_CTX(0x00400e14, 0x00001000); 1051 NV_WRITE_CTX(0x00400e14, 0x00001000);
854 NV_WRITE_CTX(0x00400e30, 0x00080008); 1052 NV_WRITE_CTX(0x00400e30, 0x00080008);
855 NV_WRITE_CTX(0x00400e34, 0x00080008); 1053 NV_WRITE_CTX(0x00400e34, 0x00080008);
856 if (dev_priv->chipset >= 0x17) { 1054 if (nv_device(priv)->chipset >= 0x17) {
857 /* is it really needed ??? */ 1055 /* is it really needed ??? */
858 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, 1056 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
859 nv_rd32(dev, NV10_PGRAPH_DEBUG_4)); 1057 nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
860 NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0)); 1058 NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
861 NV17_WRITE_CTX(0x00400eac, 0x0fff0000); 1059 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
862 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); 1060 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
863 NV17_WRITE_CTX(0x00400ec0, 0x00000080); 1061 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
864 NV17_WRITE_CTX(0x00400ed0, 0x00000080); 1062 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
865 } 1063 }
866 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); 1064 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
867 1065
868 nv10_graph_create_pipe(chan); 1066 nv10_graph_create_pipe(chan);
1067
1068 priv->chan[fifo->chid] = chan;
1069 chan->chid = fifo->chid;
1070 spin_unlock_irqrestore(&priv->lock, flags);
869 return 0; 1071 return 0;
870} 1072}
871 1073
872static void 1074static void
873nv10_graph_context_del(struct nouveau_channel *chan, int engine) 1075nv10_graph_context_dtor(struct nouveau_object *object)
874{ 1076{
875 struct drm_device *dev = chan->dev; 1077 struct nv10_graph_priv *priv = (void *)object->engine;
876 struct drm_nouveau_private *dev_priv = dev->dev_private; 1078 struct nv10_graph_chan *chan = (void *)object;
877 struct graph_state *pgraph_ctx = chan->engctx[engine];
878 unsigned long flags; 1079 unsigned long flags;
879 1080
880 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 1081 spin_lock_irqsave(&priv->lock, flags);
881 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); 1082 priv->chan[chan->chid] = NULL;
882 1083 spin_unlock_irqrestore(&priv->lock, flags);
883 /* Unload the context if it's the currently active one */
884 if (nv10_graph_channel(dev) == chan)
885 nv10_graph_unload_context(dev);
886 1084
887 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); 1085 nouveau_object_destroy(&chan->base);
888 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
889
890 /* Free the context resources */
891 chan->engctx[engine] = NULL;
892 kfree(pgraph_ctx);
893}
894
895static void
896nv10_graph_set_tile_region(struct drm_device *dev, int i)
897{
898 struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
899 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
900 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
901 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
902} 1086}
903 1087
904static int 1088static int
905nv10_graph_init(struct drm_device *dev, int engine) 1089nv10_graph_context_fini(struct nouveau_object *object, bool suspend)
906{ 1090{
907 struct drm_nouveau_private *dev_priv = dev->dev_private; 1091 struct nv10_graph_priv *priv = (void *)object->engine;
908 u32 tmp; 1092 struct nv10_graph_chan *chan = (void *)object;
909 int i; 1093 unsigned long flags;
910
911 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
912 ~NV_PMC_ENABLE_PGRAPH);
913 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
914 NV_PMC_ENABLE_PGRAPH);
915
916 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
917 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
918
919 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
920 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
921 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
922 /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
923 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
924 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
925 (1<<29) |
926 (1<<31));
927 if (dev_priv->chipset >= 0x17) {
928 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
929 nv_wr32(dev, 0x400a10, 0x3ff3fb6);
930 nv_wr32(dev, 0x400838, 0x2f8684);
931 nv_wr32(dev, 0x40083c, 0x115f3f);
932 nv_wr32(dev, 0x004006b0, 0x40000020);
933 } else
934 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
935 1094
936 /* Turn all the tiling regions off. */ 1095 spin_lock_irqsave(&priv->lock, flags);
937 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 1096 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
938 nv10_graph_set_tile_region(dev, i); 1097 if (nv10_graph_channel(priv) == chan)
939 1098 nv10_graph_unload_context(chan);
940 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); 1099 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
941 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); 1100 spin_unlock_irqrestore(&priv->lock, flags);
942 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
943 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
944 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
945 nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
946
947 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
948 tmp |= 31 << 24;
949 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
950 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
951 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
952 1101
953 return 0; 1102 return nouveau_object_fini(&chan->base, suspend);
954} 1103}
955 1104
956static int 1105static struct nouveau_oclass
957nv10_graph_fini(struct drm_device *dev, int engine, bool suspend) 1106nv10_graph_cclass = {
958{ 1107 .handle = NV_ENGCTX(GR, 0x10),
959 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); 1108 .ofuncs = &(struct nouveau_ofuncs) {
960 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) { 1109 .ctor = nv10_graph_context_ctor,
961 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); 1110 .dtor = nv10_graph_context_dtor,
962 return -EBUSY; 1111 .init = nouveau_object_init,
963 } 1112 .fini = nv10_graph_context_fini,
964 nv10_graph_unload_context(dev); 1113 },
965 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 1114};
966 return 0;
967}
968
969static int
970nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
971 u32 class, u32 mthd, u32 data)
972{
973 struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
974 struct drm_device *dev = chan->dev;
975 struct pipe_state *pipe = &ctx->pipe_state;
976 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
977 uint32_t xfmode0, xfmode1;
978 int i;
979
980 ctx->lma_window[(mthd - 0x1638) / 4] = data;
981
982 if (mthd != 0x1644)
983 return 0;
984
985 nouveau_wait_for_idle(dev);
986
987 PIPE_SAVE(dev, pipe_0x0040, 0x0040);
988 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
989
990 PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
991
992 nouveau_wait_for_idle(dev);
993
994 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
995 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
996
997 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
998 PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
999 PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
1000 PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
1001
1002 nouveau_wait_for_idle(dev);
1003
1004 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
1005 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
1006 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
1007 for (i = 0; i < 4; i++)
1008 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
1009 for (i = 0; i < 4; i++)
1010 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1011
1012 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
1013 for (i = 0; i < 3; i++)
1014 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
1015
1016 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
1017 for (i = 0; i < 3; i++)
1018 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1019
1020 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
1021 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
1022
1023 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
1024
1025 nouveau_wait_for_idle(dev);
1026
1027 PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
1028
1029 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
1030 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
1031
1032 PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
1033 PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
1034 PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
1035 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
1036
1037 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
1038 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1039
1040 nouveau_wait_for_idle(dev);
1041 1115
1042 return 0; 1116/*******************************************************************************
1043} 1117 * PGRAPH engine/subdev functions
1118 ******************************************************************************/
1044 1119
1045static int 1120static void
1046nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, 1121nv10_graph_tile_prog(struct nouveau_engine *engine, int i)
1047 u32 class, u32 mthd, u32 data)
1048{ 1122{
1049 struct drm_device *dev = chan->dev; 1123 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
1124 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
1125 struct nv10_graph_priv *priv = (void *)engine;
1126 unsigned long flags;
1050 1127
1051 nouveau_wait_for_idle(dev); 1128 pfifo->pause(pfifo, &flags);
1129 nv04_graph_idle(priv);
1052 1130
1053 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 1131 nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
1054 nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8); 1132 nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
1055 nv_wr32(dev, 0x004006b0, 1133 nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
1056 nv_rd32(dev, 0x004006b0) | 0x8 << 24);
1057 1134
1058 return 0; 1135 pfifo->start(pfifo, &flags);
1059} 1136}
1060 1137
1061struct nouveau_bitfield nv10_graph_intr[] = { 1138struct nouveau_bitfield nv10_graph_intr_name[] = {
1062 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, 1139 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1063 { NV_PGRAPH_INTR_ERROR, "ERROR" }, 1140 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1064 {} 1141 {}
@@ -1073,115 +1150,165 @@ struct nouveau_bitfield nv10_graph_nstatus[] = {
1073}; 1150};
1074 1151
1075static void 1152static void
1076nv10_graph_isr(struct drm_device *dev) 1153nv10_graph_intr(struct nouveau_subdev *subdev)
1077{ 1154{
1078 u32 stat; 1155 struct nv10_graph_priv *priv = (void *)subdev;
1079 1156 struct nv10_graph_chan *chan = NULL;
1080 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { 1157 struct nouveau_namedb *namedb = NULL;
1081 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 1158 struct nouveau_handle *handle = NULL;
1082 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); 1159 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
1083 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); 1160 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
1084 u32 chid = (addr & 0x01f00000) >> 20; 1161 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
1085 u32 subc = (addr & 0x00070000) >> 16; 1162 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
1086 u32 mthd = (addr & 0x00001ffc); 1163 u32 chid = (addr & 0x01f00000) >> 20;
1087 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); 1164 u32 subc = (addr & 0x00070000) >> 16;
1088 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; 1165 u32 mthd = (addr & 0x00001ffc);
1089 u32 show = stat; 1166 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
1090 1167 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
1091 if (stat & NV_PGRAPH_INTR_ERROR) { 1168 u32 show = stat;
1092 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 1169 unsigned long flags;
1093 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1094 show &= ~NV_PGRAPH_INTR_ERROR;
1095 }
1096 }
1097 1170
1098 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { 1171 spin_lock_irqsave(&priv->lock, flags);
1099 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 1172 chan = priv->chan[chid];
1100 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1173 if (chan)
1101 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1174 namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1102 nv10_graph_context_switch(dev); 1175 spin_unlock_irqrestore(&priv->lock, flags);
1176
1177 if (stat & NV_PGRAPH_INTR_ERROR) {
1178 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1179 handle = nouveau_namedb_get_class(namedb, class);
1180 if (handle && !nv_call(handle->object, mthd, data))
1181 show &= ~NV_PGRAPH_INTR_ERROR;
1103 } 1182 }
1183 }
1104 1184
1105 nv_wr32(dev, NV03_PGRAPH_INTR, stat); 1185 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1106 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); 1186 nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1107 1187 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1108 if (show && nouveau_ratelimit()) { 1188 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1109 NV_INFO(dev, "PGRAPH -"); 1189 nv10_graph_context_switch(priv);
1110 nouveau_bitfield_print(nv10_graph_intr, show); 1190 }
1111 printk(" nsource:"); 1191
1112 nouveau_bitfield_print(nv04_graph_nsource, nsource); 1192 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
1113 printk(" nstatus:"); 1193 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
1114 nouveau_bitfield_print(nv10_graph_nstatus, nstatus); 1194
1115 printk("\n"); 1195 if (show) {
1116 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " 1196 nv_error(priv, "");
1117 "mthd 0x%04x data 0x%08x\n", 1197 nouveau_bitfield_print(nv10_graph_intr_name, show);
1118 chid, subc, class, mthd, data); 1198 printk(" nsource:");
1119 } 1199 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1200 printk(" nstatus:");
1201 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
1202 printk("\n");
1203 nv_error(priv, "ch %d/%d class 0x%04x "
1204 "mthd 0x%04x data 0x%08x\n",
1205 chid, subc, class, mthd, data);
1120 } 1206 }
1207
1208 nouveau_namedb_put(handle);
1121} 1209}
1122 1210
1123static void 1211static int
1124nv10_graph_destroy(struct drm_device *dev, int engine) 1212nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1213 struct nouveau_oclass *oclass, void *data, u32 size,
1214 struct nouveau_object **pobject)
1125{ 1215{
1126 struct nv10_graph_engine *pgraph = nv_engine(dev, engine); 1216 struct nv10_graph_priv *priv;
1217 int ret;
1218
1219 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
1220 *pobject = nv_object(priv);
1221 if (ret)
1222 return ret;
1223
1224 nv_subdev(priv)->unit = 0x00001000;
1225 nv_subdev(priv)->intr = nv10_graph_intr;
1226 nv_engine(priv)->cclass = &nv10_graph_cclass;
1227
1228 if (nv_device(priv)->chipset <= 0x10)
1229 nv_engine(priv)->sclass = nv10_graph_sclass;
1230 else
1231 if (nv_device(priv)->chipset < 0x17 ||
1232 nv_device(priv)->chipset == 0x1a)
1233 nv_engine(priv)->sclass = nv15_graph_sclass;
1234 else
1235 nv_engine(priv)->sclass = nv17_graph_sclass;
1236
1237 nv_engine(priv)->tile_prog = nv10_graph_tile_prog;
1238 spin_lock_init(&priv->lock);
1239 return 0;
1240}
1127 1241
1128 nouveau_irq_unregister(dev, 12); 1242static void
1129 kfree(pgraph); 1243nv10_graph_dtor(struct nouveau_object *object)
1244{
1245 struct nv10_graph_priv *priv = (void *)object;
1246 nouveau_graph_destroy(&priv->base);
1130} 1247}
1131 1248
1132int 1249static int
1133nv10_graph_create(struct drm_device *dev) 1250nv10_graph_init(struct nouveau_object *object)
1134{ 1251{
1135 struct drm_nouveau_private *dev_priv = dev->dev_private; 1252 struct nouveau_engine *engine = nv_engine(object);
1136 struct nv10_graph_engine *pgraph; 1253 struct nouveau_fb *pfb = nouveau_fb(object);
1137 1254 struct nv10_graph_priv *priv = (void *)engine;
1138 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); 1255 int ret, i;
1139 if (!pgraph) 1256
1140 return -ENOMEM; 1257 ret = nouveau_graph_init(&priv->base);
1141 1258 if (ret)
1142 pgraph->base.destroy = nv10_graph_destroy; 1259 return ret;
1143 pgraph->base.init = nv10_graph_init; 1260
1144 pgraph->base.fini = nv10_graph_fini; 1261 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
1145 pgraph->base.context_new = nv10_graph_context_new; 1262 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1146 pgraph->base.context_del = nv10_graph_context_del; 1263
1147 pgraph->base.object_new = nv04_graph_object_new; 1264 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
1148 pgraph->base.set_tile_region = nv10_graph_set_tile_region; 1265 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
1149 1266 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
1150 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 1267 /* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
1151 nouveau_irq_register(dev, 12, nv10_graph_isr); 1268 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1152 1269 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1153 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 1270
1154 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 1271 if (nv_device(priv)->chipset >= 0x17) {
1155 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 1272 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1156 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ 1273 nv_wr32(priv, 0x400a10, 0x03ff3fb6);
1157 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ 1274 nv_wr32(priv, 0x400838, 0x002f8684);
1158 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ 1275 nv_wr32(priv, 0x40083c, 0x00115f3f);
1159 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ 1276 nv_wr32(priv, 0x4006b0, 0x40000020);
1160 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1161 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1162 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1163 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1164 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1165 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1166 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1167 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1168 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1169 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1170
1171 /* celcius */
1172 if (dev_priv->chipset <= 0x10) {
1173 NVOBJ_CLASS(dev, 0x0056, GR);
1174 } else
1175 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1176 NVOBJ_CLASS(dev, 0x0096, GR);
1177 } else { 1277 } else {
1178 NVOBJ_CLASS(dev, 0x0099, GR); 1278 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
1179 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1180 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1181 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1182 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1183 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1184 } 1279 }
1185 1280
1281 /* Turn all the tiling regions off. */
1282 for (i = 0; i < pfb->tile.regions; i++)
1283 engine->tile_prog(engine, i);
1284
1285 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
1286 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
1287 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
1288 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
1289 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
1290 nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
1291
1292 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
1293 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
1294 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
1186 return 0; 1295 return 0;
1187} 1296}
1297
1298static int
1299nv10_graph_fini(struct nouveau_object *object, bool suspend)
1300{
1301 struct nv10_graph_priv *priv = (void *)object;
1302 return nouveau_graph_fini(&priv->base, suspend);
1303}
1304
1305struct nouveau_oclass
1306nv10_graph_oclass = {
1307 .handle = NV_ENGINE(GR, 0x10),
1308 .ofuncs = &(struct nouveau_ofuncs) {
1309 .ctor = nv10_graph_ctor,
1310 .dtor = nv10_graph_dtor,
1311 .init = nv10_graph_init,
1312 .fini = nv10_graph_fini,
1313 },
1314};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 0d874b8b18e5..61faef976aee 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,836 +1,378 @@
1#include "drmP.h" 1#include <core/os.h>
2#include "drm.h" 2#include <core/class.h>
3#include "nouveau_drv.h" 3#include <core/engctx.h>
4#include <nouveau_drm.h> 4#include <core/handle.h>
5 5#include <core/enum.h>
6/* 6
7 * NV20 7#include <subdev/timer.h>
8 * ----- 8#include <subdev/fb.h>
9 * There are 3 families : 9
10 * NV20 is 0x10de:0x020* 10#include <engine/graph.h>
11 * NV25/28 is 0x10de:0x025* / 0x10de:0x028* 11#include <engine/fifo.h>
12 * NV2A is 0x10de:0x02A0 12
13 * 13#include "nv20.h"
14 * NV30 14#include "regs.h"
15 * ----- 15
16 * There are 3 families : 16/*******************************************************************************
17 * NV30/31 is 0x10de:0x030* / 0x10de:0x031* 17 * Graphics object classes
18 * NV34 is 0x10de:0x032* 18 ******************************************************************************/
19 * NV35/36 is 0x10de:0x033* / 0x10de:0x034* 19
20 * 20static struct nouveau_oclass
21 * Not seen in the wild, no dumps (probably NV35) : 21nv20_graph_sclass[] = {
22 * NV37 is 0x10de:0x00fc, 0x10de:0x00fd 22 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
23 * NV38 is 0x10de:0x0333, 0x10de:0x00fe 23 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
24 * 24 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
25 */ 25 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
26 26 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
27struct nv20_graph_engine { 27 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
28 struct nouveau_exec_engine base; 28 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
29 struct nouveau_gpuobj *ctxtab; 29 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
30 void (*grctx_init)(struct nouveau_gpuobj *); 30 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
31 u32 grctx_size; 31 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
32 u32 grctx_user; 32 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
33 { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
34 { 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */
35 { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
36 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
37 {},
33}; 38};
34 39
35#define NV20_GRCTX_SIZE (3580*4) 40/*******************************************************************************
36#define NV25_GRCTX_SIZE (3529*4) 41 * PGRAPH context
37#define NV2A_GRCTX_SIZE (3500*4) 42 ******************************************************************************/
38 43
39#define NV30_31_GRCTX_SIZE (24392) 44static int
40#define NV34_GRCTX_SIZE (18140) 45nv20_graph_context_ctor(struct nouveau_object *parent,
41#define NV35_36_GRCTX_SIZE (22396) 46 struct nouveau_object *engine,
42 47 struct nouveau_oclass *oclass, void *data, u32 size,
43int 48 struct nouveau_object **pobject)
44nv20_graph_unload_context(struct drm_device *dev)
45{ 49{
46 struct nouveau_channel *chan; 50 struct nv20_graph_chan *chan;
47 struct nouveau_gpuobj *grctx; 51 int ret, i;
48 u32 tmp;
49
50 chan = nv10_graph_channel(dev);
51 if (!chan)
52 return 0;
53 grctx = chan->engctx[NVOBJ_ENGINE_GR];
54
55 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->addr >> 4);
56 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
57 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
58
59 nouveau_wait_for_idle(dev);
60
61 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
62 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
63 tmp |= 31 << 24;
64 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
65 return 0;
66}
67 52
68static void 53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
69nv20_graph_rdi(struct drm_device *dev) 54 0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC,
70{ 55 &chan);
71 struct drm_nouveau_private *dev_priv = dev->dev_private; 56 *pobject = nv_object(chan);
72 int i, writecount = 32; 57 if (ret)
73 uint32_t rdi_index = 0x2c80000; 58 return ret;
74
75 if (dev_priv->chipset == 0x20) {
76 rdi_index = 0x3d0000;
77 writecount = 15;
78 }
79
80 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
81 for (i = 0; i < writecount; i++)
82 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
83 59
84 nouveau_wait_for_idle(dev); 60 chan->chid = nouveau_fifo_chan(parent)->chid;
85}
86 61
87static void 62 nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
88nv20_graph_context_init(struct nouveau_gpuobj *ctx) 63 nv_wo32(chan, 0x033c, 0xffff0000);
89{ 64 nv_wo32(chan, 0x03a0, 0x0fff0000);
90 int i; 65 nv_wo32(chan, 0x03a4, 0x0fff0000);
91 66 nv_wo32(chan, 0x047c, 0x00000101);
92 nv_wo32(ctx, 0x033c, 0xffff0000); 67 nv_wo32(chan, 0x0490, 0x00000111);
93 nv_wo32(ctx, 0x03a0, 0x0fff0000); 68 nv_wo32(chan, 0x04a8, 0x44400000);
94 nv_wo32(ctx, 0x03a4, 0x0fff0000);
95 nv_wo32(ctx, 0x047c, 0x00000101);
96 nv_wo32(ctx, 0x0490, 0x00000111);
97 nv_wo32(ctx, 0x04a8, 0x44400000);
98 for (i = 0x04d4; i <= 0x04e0; i += 4) 69 for (i = 0x04d4; i <= 0x04e0; i += 4)
99 nv_wo32(ctx, i, 0x00030303); 70 nv_wo32(chan, i, 0x00030303);
100 for (i = 0x04f4; i <= 0x0500; i += 4) 71 for (i = 0x04f4; i <= 0x0500; i += 4)
101 nv_wo32(ctx, i, 0x00080000); 72 nv_wo32(chan, i, 0x00080000);
102 for (i = 0x050c; i <= 0x0518; i += 4) 73 for (i = 0x050c; i <= 0x0518; i += 4)
103 nv_wo32(ctx, i, 0x01012000); 74 nv_wo32(chan, i, 0x01012000);
104 for (i = 0x051c; i <= 0x0528; i += 4) 75 for (i = 0x051c; i <= 0x0528; i += 4)
105 nv_wo32(ctx, i, 0x000105b8); 76 nv_wo32(chan, i, 0x000105b8);
106 for (i = 0x052c; i <= 0x0538; i += 4) 77 for (i = 0x052c; i <= 0x0538; i += 4)
107 nv_wo32(ctx, i, 0x00080008); 78 nv_wo32(chan, i, 0x00080008);
108 for (i = 0x055c; i <= 0x0598; i += 4) 79 for (i = 0x055c; i <= 0x0598; i += 4)
109 nv_wo32(ctx, i, 0x07ff0000); 80 nv_wo32(chan, i, 0x07ff0000);
110 nv_wo32(ctx, 0x05a4, 0x4b7fffff); 81 nv_wo32(chan, 0x05a4, 0x4b7fffff);
111 nv_wo32(ctx, 0x05fc, 0x00000001); 82 nv_wo32(chan, 0x05fc, 0x00000001);
112 nv_wo32(ctx, 0x0604, 0x00004000); 83 nv_wo32(chan, 0x0604, 0x00004000);
113 nv_wo32(ctx, 0x0610, 0x00000001); 84 nv_wo32(chan, 0x0610, 0x00000001);
114 nv_wo32(ctx, 0x0618, 0x00040000); 85 nv_wo32(chan, 0x0618, 0x00040000);
115 nv_wo32(ctx, 0x061c, 0x00010000); 86 nv_wo32(chan, 0x061c, 0x00010000);
116 for (i = 0x1c1c; i <= 0x248c; i += 16) { 87 for (i = 0x1c1c; i <= 0x248c; i += 16) {
117 nv_wo32(ctx, (i + 0), 0x10700ff9); 88 nv_wo32(chan, (i + 0), 0x10700ff9);
118 nv_wo32(ctx, (i + 4), 0x0436086c); 89 nv_wo32(chan, (i + 4), 0x0436086c);
119 nv_wo32(ctx, (i + 8), 0x000c001b); 90 nv_wo32(chan, (i + 8), 0x000c001b);
120 } 91 }
121 nv_wo32(ctx, 0x281c, 0x3f800000); 92 nv_wo32(chan, 0x281c, 0x3f800000);
122 nv_wo32(ctx, 0x2830, 0x3f800000); 93 nv_wo32(chan, 0x2830, 0x3f800000);
123 nv_wo32(ctx, 0x285c, 0x40000000); 94 nv_wo32(chan, 0x285c, 0x40000000);
124 nv_wo32(ctx, 0x2860, 0x3f800000); 95 nv_wo32(chan, 0x2860, 0x3f800000);
125 nv_wo32(ctx, 0x2864, 0x3f000000); 96 nv_wo32(chan, 0x2864, 0x3f000000);
126 nv_wo32(ctx, 0x286c, 0x40000000); 97 nv_wo32(chan, 0x286c, 0x40000000);
127 nv_wo32(ctx, 0x2870, 0x3f800000); 98 nv_wo32(chan, 0x2870, 0x3f800000);
128 nv_wo32(ctx, 0x2878, 0xbf800000); 99 nv_wo32(chan, 0x2878, 0xbf800000);
129 nv_wo32(ctx, 0x2880, 0xbf800000); 100 nv_wo32(chan, 0x2880, 0xbf800000);
130 nv_wo32(ctx, 0x34a4, 0x000fe000); 101 nv_wo32(chan, 0x34a4, 0x000fe000);
131 nv_wo32(ctx, 0x3530, 0x000003f8); 102 nv_wo32(chan, 0x3530, 0x000003f8);
132 nv_wo32(ctx, 0x3540, 0x002fe000); 103 nv_wo32(chan, 0x3540, 0x002fe000);
133 for (i = 0x355c; i <= 0x3578; i += 4) 104 for (i = 0x355c; i <= 0x3578; i += 4)
134 nv_wo32(ctx, i, 0x001c527c); 105 nv_wo32(chan, i, 0x001c527c);
106 return 0;
135} 107}
136 108
137static void 109int
138nv25_graph_context_init(struct nouveau_gpuobj *ctx) 110nv20_graph_context_init(struct nouveau_object *object)
139{ 111{
140 int i; 112 struct nv20_graph_priv *priv = (void *)object->engine;
141 113 struct nv20_graph_chan *chan = (void *)object;
142 nv_wo32(ctx, 0x035c, 0xffff0000); 114 int ret;
143 nv_wo32(ctx, 0x03c0, 0x0fff0000);
144 nv_wo32(ctx, 0x03c4, 0x0fff0000);
145 nv_wo32(ctx, 0x049c, 0x00000101);
146 nv_wo32(ctx, 0x04b0, 0x00000111);
147 nv_wo32(ctx, 0x04c8, 0x00000080);
148 nv_wo32(ctx, 0x04cc, 0xffff0000);
149 nv_wo32(ctx, 0x04d0, 0x00000001);
150 nv_wo32(ctx, 0x04e4, 0x44400000);
151 nv_wo32(ctx, 0x04fc, 0x4b800000);
152 for (i = 0x0510; i <= 0x051c; i += 4)
153 nv_wo32(ctx, i, 0x00030303);
154 for (i = 0x0530; i <= 0x053c; i += 4)
155 nv_wo32(ctx, i, 0x00080000);
156 for (i = 0x0548; i <= 0x0554; i += 4)
157 nv_wo32(ctx, i, 0x01012000);
158 for (i = 0x0558; i <= 0x0564; i += 4)
159 nv_wo32(ctx, i, 0x000105b8);
160 for (i = 0x0568; i <= 0x0574; i += 4)
161 nv_wo32(ctx, i, 0x00080008);
162 for (i = 0x0598; i <= 0x05d4; i += 4)
163 nv_wo32(ctx, i, 0x07ff0000);
164 nv_wo32(ctx, 0x05e0, 0x4b7fffff);
165 nv_wo32(ctx, 0x0620, 0x00000080);
166 nv_wo32(ctx, 0x0624, 0x30201000);
167 nv_wo32(ctx, 0x0628, 0x70605040);
168 nv_wo32(ctx, 0x062c, 0xb0a09080);
169 nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
170 nv_wo32(ctx, 0x0664, 0x00000001);
171 nv_wo32(ctx, 0x066c, 0x00004000);
172 nv_wo32(ctx, 0x0678, 0x00000001);
173 nv_wo32(ctx, 0x0680, 0x00040000);
174 nv_wo32(ctx, 0x0684, 0x00010000);
175 for (i = 0x1b04; i <= 0x2374; i += 16) {
176 nv_wo32(ctx, (i + 0), 0x10700ff9);
177 nv_wo32(ctx, (i + 4), 0x0436086c);
178 nv_wo32(ctx, (i + 8), 0x000c001b);
179 }
180 nv_wo32(ctx, 0x2704, 0x3f800000);
181 nv_wo32(ctx, 0x2718, 0x3f800000);
182 nv_wo32(ctx, 0x2744, 0x40000000);
183 nv_wo32(ctx, 0x2748, 0x3f800000);
184 nv_wo32(ctx, 0x274c, 0x3f000000);
185 nv_wo32(ctx, 0x2754, 0x40000000);
186 nv_wo32(ctx, 0x2758, 0x3f800000);
187 nv_wo32(ctx, 0x2760, 0xbf800000);
188 nv_wo32(ctx, 0x2768, 0xbf800000);
189 nv_wo32(ctx, 0x308c, 0x000fe000);
190 nv_wo32(ctx, 0x3108, 0x000003f8);
191 nv_wo32(ctx, 0x3468, 0x002fe000);
192 for (i = 0x3484; i <= 0x34a0; i += 4)
193 nv_wo32(ctx, i, 0x001c527c);
194}
195 115
196static void 116 ret = nouveau_graph_context_init(&chan->base);
197nv2a_graph_context_init(struct nouveau_gpuobj *ctx) 117 if (ret)
198{ 118 return ret;
199 int i;
200
201 nv_wo32(ctx, 0x033c, 0xffff0000);
202 nv_wo32(ctx, 0x03a0, 0x0fff0000);
203 nv_wo32(ctx, 0x03a4, 0x0fff0000);
204 nv_wo32(ctx, 0x047c, 0x00000101);
205 nv_wo32(ctx, 0x0490, 0x00000111);
206 nv_wo32(ctx, 0x04a8, 0x44400000);
207 for (i = 0x04d4; i <= 0x04e0; i += 4)
208 nv_wo32(ctx, i, 0x00030303);
209 for (i = 0x04f4; i <= 0x0500; i += 4)
210 nv_wo32(ctx, i, 0x00080000);
211 for (i = 0x050c; i <= 0x0518; i += 4)
212 nv_wo32(ctx, i, 0x01012000);
213 for (i = 0x051c; i <= 0x0528; i += 4)
214 nv_wo32(ctx, i, 0x000105b8);
215 for (i = 0x052c; i <= 0x0538; i += 4)
216 nv_wo32(ctx, i, 0x00080008);
217 for (i = 0x055c; i <= 0x0598; i += 4)
218 nv_wo32(ctx, i, 0x07ff0000);
219 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
220 nv_wo32(ctx, 0x05fc, 0x00000001);
221 nv_wo32(ctx, 0x0604, 0x00004000);
222 nv_wo32(ctx, 0x0610, 0x00000001);
223 nv_wo32(ctx, 0x0618, 0x00040000);
224 nv_wo32(ctx, 0x061c, 0x00010000);
225 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
226 nv_wo32(ctx, (i + 0), 0x10700ff9);
227 nv_wo32(ctx, (i + 4), 0x0436086c);
228 nv_wo32(ctx, (i + 8), 0x000c001b);
229 }
230 nv_wo32(ctx, 0x269c, 0x3f800000);
231 nv_wo32(ctx, 0x26b0, 0x3f800000);
232 nv_wo32(ctx, 0x26dc, 0x40000000);
233 nv_wo32(ctx, 0x26e0, 0x3f800000);
234 nv_wo32(ctx, 0x26e4, 0x3f000000);
235 nv_wo32(ctx, 0x26ec, 0x40000000);
236 nv_wo32(ctx, 0x26f0, 0x3f800000);
237 nv_wo32(ctx, 0x26f8, 0xbf800000);
238 nv_wo32(ctx, 0x2700, 0xbf800000);
239 nv_wo32(ctx, 0x3024, 0x000fe000);
240 nv_wo32(ctx, 0x30a0, 0x000003f8);
241 nv_wo32(ctx, 0x33fc, 0x002fe000);
242 for (i = 0x341c; i <= 0x3438; i += 4)
243 nv_wo32(ctx, i, 0x001c527c);
244}
245 119
246static void 120 nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
247nv30_31_graph_context_init(struct nouveau_gpuobj *ctx) 121 return 0;
248{
249 int i;
250
251 nv_wo32(ctx, 0x0410, 0x00000101);
252 nv_wo32(ctx, 0x0424, 0x00000111);
253 nv_wo32(ctx, 0x0428, 0x00000060);
254 nv_wo32(ctx, 0x0444, 0x00000080);
255 nv_wo32(ctx, 0x0448, 0xffff0000);
256 nv_wo32(ctx, 0x044c, 0x00000001);
257 nv_wo32(ctx, 0x0460, 0x44400000);
258 nv_wo32(ctx, 0x048c, 0xffff0000);
259 for (i = 0x04e0; i < 0x04e8; i += 4)
260 nv_wo32(ctx, i, 0x0fff0000);
261 nv_wo32(ctx, 0x04ec, 0x00011100);
262 for (i = 0x0508; i < 0x0548; i += 4)
263 nv_wo32(ctx, i, 0x07ff0000);
264 nv_wo32(ctx, 0x0550, 0x4b7fffff);
265 nv_wo32(ctx, 0x058c, 0x00000080);
266 nv_wo32(ctx, 0x0590, 0x30201000);
267 nv_wo32(ctx, 0x0594, 0x70605040);
268 nv_wo32(ctx, 0x0598, 0xb8a89888);
269 nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
270 nv_wo32(ctx, 0x05b0, 0xb0000000);
271 for (i = 0x0600; i < 0x0640; i += 4)
272 nv_wo32(ctx, i, 0x00010588);
273 for (i = 0x0640; i < 0x0680; i += 4)
274 nv_wo32(ctx, i, 0x00030303);
275 for (i = 0x06c0; i < 0x0700; i += 4)
276 nv_wo32(ctx, i, 0x0008aae4);
277 for (i = 0x0700; i < 0x0740; i += 4)
278 nv_wo32(ctx, i, 0x01012000);
279 for (i = 0x0740; i < 0x0780; i += 4)
280 nv_wo32(ctx, i, 0x00080008);
281 nv_wo32(ctx, 0x085c, 0x00040000);
282 nv_wo32(ctx, 0x0860, 0x00010000);
283 for (i = 0x0864; i < 0x0874; i += 4)
284 nv_wo32(ctx, i, 0x00040004);
285 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
286 nv_wo32(ctx, i + 0, 0x10700ff9);
287 nv_wo32(ctx, i + 1, 0x0436086c);
288 nv_wo32(ctx, i + 2, 0x000c001b);
289 }
290 for (i = 0x30b8; i < 0x30c8; i += 4)
291 nv_wo32(ctx, i, 0x0000ffff);
292 nv_wo32(ctx, 0x344c, 0x3f800000);
293 nv_wo32(ctx, 0x3808, 0x3f800000);
294 nv_wo32(ctx, 0x381c, 0x3f800000);
295 nv_wo32(ctx, 0x3848, 0x40000000);
296 nv_wo32(ctx, 0x384c, 0x3f800000);
297 nv_wo32(ctx, 0x3850, 0x3f000000);
298 nv_wo32(ctx, 0x3858, 0x40000000);
299 nv_wo32(ctx, 0x385c, 0x3f800000);
300 nv_wo32(ctx, 0x3864, 0xbf800000);
301 nv_wo32(ctx, 0x386c, 0xbf800000);
302} 122}
303 123
304static void 124int
305nv34_graph_context_init(struct nouveau_gpuobj *ctx) 125nv20_graph_context_fini(struct nouveau_object *object, bool suspend)
306{ 126{
307 int i; 127 struct nv20_graph_priv *priv = (void *)object->engine;
308 128 struct nv20_graph_chan *chan = (void *)object;
309 nv_wo32(ctx, 0x040c, 0x01000101); 129 int chid = -1;
310 nv_wo32(ctx, 0x0420, 0x00000111); 130
311 nv_wo32(ctx, 0x0424, 0x00000060); 131 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
312 nv_wo32(ctx, 0x0440, 0x00000080); 132 if (nv_rd32(priv, 0x400144) & 0x00010000)
313 nv_wo32(ctx, 0x0444, 0xffff0000); 133 chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
314 nv_wo32(ctx, 0x0448, 0x00000001); 134 if (chan->chid == chid) {
315 nv_wo32(ctx, 0x045c, 0x44400000); 135 nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
316 nv_wo32(ctx, 0x0480, 0xffff0000); 136 nv_wr32(priv, 0x400788, 0x00000002);
317 for (i = 0x04d4; i < 0x04dc; i += 4) 137 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
318 nv_wo32(ctx, i, 0x0fff0000); 138 nv_wr32(priv, 0x400144, 0x10000000);
319 nv_wo32(ctx, 0x04e0, 0x00011100); 139 nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
320 for (i = 0x04fc; i < 0x053c; i += 4)
321 nv_wo32(ctx, i, 0x07ff0000);
322 nv_wo32(ctx, 0x0544, 0x4b7fffff);
323 nv_wo32(ctx, 0x057c, 0x00000080);
324 nv_wo32(ctx, 0x0580, 0x30201000);
325 nv_wo32(ctx, 0x0584, 0x70605040);
326 nv_wo32(ctx, 0x0588, 0xb8a89888);
327 nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
328 nv_wo32(ctx, 0x05a0, 0xb0000000);
329 for (i = 0x05f0; i < 0x0630; i += 4)
330 nv_wo32(ctx, i, 0x00010588);
331 for (i = 0x0630; i < 0x0670; i += 4)
332 nv_wo32(ctx, i, 0x00030303);
333 for (i = 0x06b0; i < 0x06f0; i += 4)
334 nv_wo32(ctx, i, 0x0008aae4);
335 for (i = 0x06f0; i < 0x0730; i += 4)
336 nv_wo32(ctx, i, 0x01012000);
337 for (i = 0x0730; i < 0x0770; i += 4)
338 nv_wo32(ctx, i, 0x00080008);
339 nv_wo32(ctx, 0x0850, 0x00040000);
340 nv_wo32(ctx, 0x0854, 0x00010000);
341 for (i = 0x0858; i < 0x0868; i += 4)
342 nv_wo32(ctx, i, 0x00040004);
343 for (i = 0x15ac; i <= 0x271c ; i += 16) {
344 nv_wo32(ctx, i + 0, 0x10700ff9);
345 nv_wo32(ctx, i + 1, 0x0436086c);
346 nv_wo32(ctx, i + 2, 0x000c001b);
347 } 140 }
348 for (i = 0x274c; i < 0x275c; i += 4) 141 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
349 nv_wo32(ctx, i, 0x0000ffff);
350 nv_wo32(ctx, 0x2ae0, 0x3f800000);
351 nv_wo32(ctx, 0x2e9c, 0x3f800000);
352 nv_wo32(ctx, 0x2eb0, 0x3f800000);
353 nv_wo32(ctx, 0x2edc, 0x40000000);
354 nv_wo32(ctx, 0x2ee0, 0x3f800000);
355 nv_wo32(ctx, 0x2ee4, 0x3f000000);
356 nv_wo32(ctx, 0x2eec, 0x40000000);
357 nv_wo32(ctx, 0x2ef0, 0x3f800000);
358 nv_wo32(ctx, 0x2ef8, 0xbf800000);
359 nv_wo32(ctx, 0x2f00, 0xbf800000);
360}
361 142
362static void 143 nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
363nv35_36_graph_context_init(struct nouveau_gpuobj *ctx) 144 return nouveau_graph_context_fini(&chan->base, suspend);
364{
365 int i;
366
367 nv_wo32(ctx, 0x040c, 0x00000101);
368 nv_wo32(ctx, 0x0420, 0x00000111);
369 nv_wo32(ctx, 0x0424, 0x00000060);
370 nv_wo32(ctx, 0x0440, 0x00000080);
371 nv_wo32(ctx, 0x0444, 0xffff0000);
372 nv_wo32(ctx, 0x0448, 0x00000001);
373 nv_wo32(ctx, 0x045c, 0x44400000);
374 nv_wo32(ctx, 0x0488, 0xffff0000);
375 for (i = 0x04dc; i < 0x04e4; i += 4)
376 nv_wo32(ctx, i, 0x0fff0000);
377 nv_wo32(ctx, 0x04e8, 0x00011100);
378 for (i = 0x0504; i < 0x0544; i += 4)
379 nv_wo32(ctx, i, 0x07ff0000);
380 nv_wo32(ctx, 0x054c, 0x4b7fffff);
381 nv_wo32(ctx, 0x0588, 0x00000080);
382 nv_wo32(ctx, 0x058c, 0x30201000);
383 nv_wo32(ctx, 0x0590, 0x70605040);
384 nv_wo32(ctx, 0x0594, 0xb8a89888);
385 nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
386 nv_wo32(ctx, 0x05ac, 0xb0000000);
387 for (i = 0x0604; i < 0x0644; i += 4)
388 nv_wo32(ctx, i, 0x00010588);
389 for (i = 0x0644; i < 0x0684; i += 4)
390 nv_wo32(ctx, i, 0x00030303);
391 for (i = 0x06c4; i < 0x0704; i += 4)
392 nv_wo32(ctx, i, 0x0008aae4);
393 for (i = 0x0704; i < 0x0744; i += 4)
394 nv_wo32(ctx, i, 0x01012000);
395 for (i = 0x0744; i < 0x0784; i += 4)
396 nv_wo32(ctx, i, 0x00080008);
397 nv_wo32(ctx, 0x0860, 0x00040000);
398 nv_wo32(ctx, 0x0864, 0x00010000);
399 for (i = 0x0868; i < 0x0878; i += 4)
400 nv_wo32(ctx, i, 0x00040004);
401 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
402 nv_wo32(ctx, i + 0, 0x10700ff9);
403 nv_wo32(ctx, i + 4, 0x0436086c);
404 nv_wo32(ctx, i + 8, 0x000c001b);
405 }
406 for (i = 0x30bc; i < 0x30cc; i += 4)
407 nv_wo32(ctx, i, 0x0000ffff);
408 nv_wo32(ctx, 0x3450, 0x3f800000);
409 nv_wo32(ctx, 0x380c, 0x3f800000);
410 nv_wo32(ctx, 0x3820, 0x3f800000);
411 nv_wo32(ctx, 0x384c, 0x40000000);
412 nv_wo32(ctx, 0x3850, 0x3f800000);
413 nv_wo32(ctx, 0x3854, 0x3f000000);
414 nv_wo32(ctx, 0x385c, 0x40000000);
415 nv_wo32(ctx, 0x3860, 0x3f800000);
416 nv_wo32(ctx, 0x3868, 0xbf800000);
417 nv_wo32(ctx, 0x3870, 0xbf800000);
418} 145}
419 146
420int 147static struct nouveau_oclass
421nv20_graph_context_new(struct nouveau_channel *chan, int engine) 148nv20_graph_cclass = {
422{ 149 .handle = NV_ENGCTX(GR, 0x20),
423 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine); 150 .ofuncs = &(struct nouveau_ofuncs) {
424 struct nouveau_gpuobj *grctx = NULL; 151 .ctor = nv20_graph_context_ctor,
425 struct drm_device *dev = chan->dev; 152 .dtor = _nouveau_graph_context_dtor,
426 int ret; 153 .init = nv20_graph_context_init,
427 154 .fini = nv20_graph_context_fini,
428 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, 155 .rd32 = _nouveau_graph_context_rd32,
429 NVOBJ_FLAG_ZERO_ALLOC, &grctx); 156 .wr32 = _nouveau_graph_context_wr32,
430 if (ret) 157 },
431 return ret; 158};
432
433 /* Initialise default context values */
434 pgraph->grctx_init(grctx);
435
436 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
437 /* CTX_USER */
438 nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
439 159
440 nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->addr >> 4); 160/*******************************************************************************
441 chan->engctx[engine] = grctx; 161 * PGRAPH engine/subdev functions
442 return 0; 162 ******************************************************************************/
443}
444 163
445void 164void
446nv20_graph_context_del(struct nouveau_channel *chan, int engine) 165nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
447{ 166{
448 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine); 167 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
449 struct nouveau_gpuobj *grctx = chan->engctx[engine]; 168 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
450 struct drm_device *dev = chan->dev; 169 struct nv20_graph_priv *priv = (void *)engine;
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 unsigned long flags; 170 unsigned long flags;
453 171
454 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 172 pfifo->pause(pfifo, &flags);
455 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); 173 nv04_graph_idle(priv);
456 174
457 /* Unload the context if it's the currently active one */ 175 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
458 if (nv10_graph_channel(dev) == chan) 176 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
459 nv20_graph_unload_context(dev); 177 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
460 178
461 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); 179 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
462 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 180 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
181 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
182 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
183 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
184 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
463 185
464 /* Free the context resources */ 186 if (nv_device(engine)->card_type == NV_20) {
465 nv_wo32(pgraph->ctxtab, chan->id * 4, 0); 187 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
188 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
189 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
190 }
466 191
467 nouveau_gpuobj_ref(NULL, &grctx); 192 pfifo->start(pfifo, &flags);
468 chan->engctx[engine] = NULL;
469} 193}
470 194
471static void 195void
472nv20_graph_set_tile_region(struct drm_device *dev, int i) 196nv20_graph_intr(struct nouveau_subdev *subdev)
473{ 197{
474 struct drm_nouveau_private *dev_priv = dev->dev_private; 198 struct nv20_graph_priv *priv = (void *)subdev;
475 struct nouveau_fb_tile *tile = nvfb_tile(dev, i); 199 struct nouveau_engine *engine = nv_engine(subdev);
476 200 struct nouveau_handle *handle = NULL;
477 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); 201 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
478 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); 202 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
479 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); 203 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
480 204 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
481 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); 205 u32 chid = (addr & 0x01f00000) >> 20;
482 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit); 206 u32 subc = (addr & 0x00070000) >> 16;
483 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); 207 u32 mthd = (addr & 0x00001ffc);
484 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch); 208 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
485 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); 209 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
486 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr); 210 u32 inst = nv_ro32(priv->ctxtab, (chid * 4)) << 4;
487 211 u32 show = stat;
488 if (dev_priv->card_type == NV_20) { 212
489 nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp); 213 if (stat & NV_PGRAPH_INTR_ERROR) {
490 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); 214 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
491 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp); 215 handle = nouveau_engctx_lookup_class(engine, inst, class);
216 if (handle && !nv_call(handle->object, mthd, data))
217 show &= ~NV_PGRAPH_INTR_ERROR;
218 nouveau_engctx_handle_put(handle);
219 }
492 } 220 }
493}
494
495int
496nv20_graph_init(struct drm_device *dev, int engine)
497{
498 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
499 struct drm_nouveau_private *dev_priv = dev->dev_private;
500 uint32_t tmp, vramsz;
501 int i;
502
503 nv_wr32(dev, NV03_PMC_ENABLE,
504 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
505 nv_wr32(dev, NV03_PMC_ENABLE,
506 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
507
508 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
509
510 nv20_graph_rdi(dev);
511
512 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
513 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
514
515 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
516 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
517 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
518 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
519 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
520 nv_wr32(dev, 0x40009C , 0x00000040);
521
522 if (dev_priv->chipset >= 0x25) {
523 nv_wr32(dev, 0x400890, 0x00a8cfff);
524 nv_wr32(dev, 0x400610, 0x304B1FB6);
525 nv_wr32(dev, 0x400B80, 0x1cbd3883);
526 nv_wr32(dev, 0x400B84, 0x44000000);
527 nv_wr32(dev, 0x400098, 0x40000080);
528 nv_wr32(dev, 0x400B88, 0x000000ff);
529 221
530 } else { 222 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
531 nv_wr32(dev, 0x400880, 0x0008c7df); 223 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
532 nv_wr32(dev, 0x400094, 0x00000005); 224
533 nv_wr32(dev, 0x400B80, 0x45eae20e); 225 if (show) {
534 nv_wr32(dev, 0x400B84, 0x24000000); 226 nv_info(priv, "");
535 nv_wr32(dev, 0x400098, 0x00000040); 227 nouveau_bitfield_print(nv10_graph_intr_name, show);
536 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); 228 printk(" nsource:");
537 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); 229 nouveau_bitfield_print(nv04_graph_nsource, nsource);
538 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038); 230 printk(" nstatus:");
539 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); 231 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
232 printk("\n");
233 nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
234 chid, subc, class, mthd, data);
540 } 235 }
236}
541 237
542 /* Turn all the tiling regions off. */ 238static int
543 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 239nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
544 nv20_graph_set_tile_region(dev, i); 240 struct nouveau_oclass *oclass, void *data, u32 size,
545 241 struct nouveau_object **pobject)
546 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); 242{
547 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); 243 struct nv20_graph_priv *priv;
548 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); 244 int ret;
549
550 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
551 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
552
553 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
554 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
555 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
556 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
557
558 /* begin RAM config */
559 vramsz = pci_resource_len(dev->pdev, 0) - 1;
560 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
561 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
562 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
563 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
564 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
565 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
566 nv_wr32(dev, 0x400820, 0);
567 nv_wr32(dev, 0x400824, 0);
568 nv_wr32(dev, 0x400864, vramsz - 1);
569 nv_wr32(dev, 0x400868, vramsz - 1);
570 245
571 /* interesting.. the below overwrites some of the tile setup above.. */ 246 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
572 nv_wr32(dev, 0x400B20, 0x00000000); 247 *pobject = nv_object(priv);
573 nv_wr32(dev, 0x400B04, 0xFFFFFFFF); 248 if (ret)
249 return ret;
574 250
575 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0); 251 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
576 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0); 252 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
577 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); 253 if (ret)
578 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); 254 return ret;
579 255
256 nv_subdev(priv)->unit = 0x00001000;
257 nv_subdev(priv)->intr = nv20_graph_intr;
258 nv_engine(priv)->cclass = &nv20_graph_cclass;
259 nv_engine(priv)->sclass = nv20_graph_sclass;
260 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
580 return 0; 261 return 0;
581} 262}
582 263
583int 264void
584nv30_graph_init(struct drm_device *dev, int engine) 265nv20_graph_dtor(struct nouveau_object *object)
585{ 266{
586 struct nv20_graph_engine *pgraph = nv_engine(dev, engine); 267 struct nv20_graph_priv *priv = (void *)object;
587 struct drm_nouveau_private *dev_priv = dev->dev_private; 268 nouveau_gpuobj_ref(NULL, &priv->ctxtab);
588 int i; 269 nouveau_graph_destroy(&priv->base);
589 270}
590 nv_wr32(dev, NV03_PMC_ENABLE,
591 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
592 nv_wr32(dev, NV03_PMC_ENABLE,
593 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
594
595 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
596
597 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
598 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
599
600 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
601 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
602 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
603 nv_wr32(dev, 0x400890, 0x01b463ff);
604 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
605 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
606 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
607 nv_wr32(dev, 0x400B80, 0x1003d888);
608 nv_wr32(dev, 0x400B84, 0x0c000000);
609 nv_wr32(dev, 0x400098, 0x00000000);
610 nv_wr32(dev, 0x40009C, 0x0005ad00);
611 nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
612 nv_wr32(dev, 0x4000a0, 0x00000000);
613 nv_wr32(dev, 0x4000a4, 0x00000008);
614 nv_wr32(dev, 0x4008a8, 0xb784a400);
615 nv_wr32(dev, 0x400ba0, 0x002f8685);
616 nv_wr32(dev, 0x400ba4, 0x00231f3f);
617 nv_wr32(dev, 0x4008a4, 0x40000020);
618
619 if (dev_priv->chipset == 0x34) {
620 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
621 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
622 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
623 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
624 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
625 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
626 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
627 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
628 }
629 271
630 nv_wr32(dev, 0x4000c0, 0x00000016); 272int
273nv20_graph_init(struct nouveau_object *object)
274{
275 struct nouveau_engine *engine = nv_engine(object);
276 struct nv20_graph_priv *priv = (void *)engine;
277 struct nouveau_fb *pfb = nouveau_fb(object);
278 u32 tmp, vramsz;
279 int ret, i;
631 280
632 /* Turn all the tiling regions off. */ 281 ret = nouveau_graph_init(&priv->base);
633 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 282 if (ret)
634 nv20_graph_set_tile_region(dev, i); 283 return ret;
635 284
636 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 285 nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
637 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
638 nv_wr32(dev, 0x0040075c , 0x00000001);
639 286
640 /* begin RAM config */ 287 if (nv_device(priv)->chipset == 0x20) {
641 /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */ 288 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
642 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); 289 for (i = 0; i < 15; i++)
643 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); 290 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
644 if (dev_priv->chipset != 0x34) { 291 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
645 nv_wr32(dev, 0x400750, 0x00EA0000); 292 } else {
646 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0)); 293 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
647 nv_wr32(dev, 0x400750, 0x00EA0004); 294 for (i = 0; i < 32; i++)
648 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1)); 295 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
296 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
649 } 297 }
650 298
651 return 0; 299 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
652} 300 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
653 301
654int 302 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
655nv20_graph_fini(struct drm_device *dev, int engine, bool suspend) 303 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
656{ 304 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
657 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); 305 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
658 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) { 306 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
659 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); 307 nv_wr32(priv, 0x40009C , 0x00000040);
660 return -EBUSY;
661 }
662 nv20_graph_unload_context(dev);
663 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
664 return 0;
665}
666 308
667static void 309 if (nv_device(priv)->chipset >= 0x25) {
668nv20_graph_isr(struct drm_device *dev) 310 nv_wr32(priv, 0x400890, 0x00a8cfff);
669{ 311 nv_wr32(priv, 0x400610, 0x304B1FB6);
670 u32 stat; 312 nv_wr32(priv, 0x400B80, 0x1cbd3883);
671 313 nv_wr32(priv, 0x400B84, 0x44000000);
672 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { 314 nv_wr32(priv, 0x400098, 0x40000080);
673 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 315 nv_wr32(priv, 0x400B88, 0x000000ff);
674 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
675 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
676 u32 chid = (addr & 0x01f00000) >> 20;
677 u32 subc = (addr & 0x00070000) >> 16;
678 u32 mthd = (addr & 0x00001ffc);
679 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
680 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
681 u32 show = stat;
682
683 if (stat & NV_PGRAPH_INTR_ERROR) {
684 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
685 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
686 show &= ~NV_PGRAPH_INTR_ERROR;
687 }
688 }
689 316
690 nv_wr32(dev, NV03_PGRAPH_INTR, stat); 317 } else {
691 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); 318 nv_wr32(priv, 0x400880, 0x0008c7df);
692 319 nv_wr32(priv, 0x400094, 0x00000005);
693 if (show && nouveau_ratelimit()) { 320 nv_wr32(priv, 0x400B80, 0x45eae20e);
694 NV_INFO(dev, "PGRAPH -"); 321 nv_wr32(priv, 0x400B84, 0x24000000);
695 nouveau_bitfield_print(nv10_graph_intr, show); 322 nv_wr32(priv, 0x400098, 0x00000040);
696 printk(" nsource:"); 323 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
697 nouveau_bitfield_print(nv04_graph_nsource, nsource); 324 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
698 printk(" nstatus:"); 325 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
699 nouveau_bitfield_print(nv10_graph_nstatus, nstatus); 326 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
700 printk("\n");
701 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
702 "mthd 0x%04x data 0x%08x\n",
703 chid, subc, class, mthd, data);
704 }
705 } 327 }
706}
707
708static void
709nv20_graph_destroy(struct drm_device *dev, int engine)
710{
711 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
712 328
713 nouveau_irq_unregister(dev, 12); 329 /* Turn all the tiling regions off. */
714 nouveau_gpuobj_ref(NULL, &pgraph->ctxtab); 330 for (i = 0; i < pfb->tile.regions; i++)
331 engine->tile_prog(engine, i);
715 332
716 NVOBJ_ENGINE_DEL(dev, GR); 333 nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
717 kfree(pgraph); 334 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
718} 335 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
719 336
720int 337 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
721nv20_graph_create(struct drm_device *dev) 338 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
722{
723 struct drm_nouveau_private *dev_priv = dev->dev_private;
724 struct nv20_graph_engine *pgraph;
725 int ret;
726 339
727 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); 340 tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
728 if (!pgraph) 341 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
729 return -ENOMEM; 342 tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
730 343 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
731 pgraph->base.destroy = nv20_graph_destroy;
732 pgraph->base.fini = nv20_graph_fini;
733 pgraph->base.context_new = nv20_graph_context_new;
734 pgraph->base.context_del = nv20_graph_context_del;
735 pgraph->base.object_new = nv04_graph_object_new;
736 pgraph->base.set_tile_region = nv20_graph_set_tile_region;
737
738 pgraph->grctx_user = 0x0028;
739 if (dev_priv->card_type == NV_20) {
740 pgraph->base.init = nv20_graph_init;
741 switch (dev_priv->chipset) {
742 case 0x20:
743 pgraph->grctx_init = nv20_graph_context_init;
744 pgraph->grctx_size = NV20_GRCTX_SIZE;
745 pgraph->grctx_user = 0x0000;
746 break;
747 case 0x25:
748 case 0x28:
749 pgraph->grctx_init = nv25_graph_context_init;
750 pgraph->grctx_size = NV25_GRCTX_SIZE;
751 break;
752 case 0x2a:
753 pgraph->grctx_init = nv2a_graph_context_init;
754 pgraph->grctx_size = NV2A_GRCTX_SIZE;
755 pgraph->grctx_user = 0x0000;
756 break;
757 default:
758 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
759 kfree(pgraph);
760 return 0;
761 }
762 } else {
763 pgraph->base.init = nv30_graph_init;
764 switch (dev_priv->chipset) {
765 case 0x30:
766 case 0x31:
767 pgraph->grctx_init = nv30_31_graph_context_init;
768 pgraph->grctx_size = NV30_31_GRCTX_SIZE;
769 break;
770 case 0x34:
771 pgraph->grctx_init = nv34_graph_context_init;
772 pgraph->grctx_size = NV34_GRCTX_SIZE;
773 break;
774 case 0x35:
775 case 0x36:
776 pgraph->grctx_init = nv35_36_graph_context_init;
777 pgraph->grctx_size = NV35_36_GRCTX_SIZE;
778 break;
779 default:
780 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
781 kfree(pgraph);
782 return 0;
783 }
784 }
785 344
786 /* Create Context Pointer Table */ 345 /* begin RAM config */
787 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC, 346 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
788 &pgraph->ctxtab); 347 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
789 if (ret) { 348 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
790 kfree(pgraph); 349 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
791 return ret; 350 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
792 } 351 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
352 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
353 nv_wr32(priv, 0x400820, 0);
354 nv_wr32(priv, 0x400824, 0);
355 nv_wr32(priv, 0x400864, vramsz - 1);
356 nv_wr32(priv, 0x400868, vramsz - 1);
793 357
794 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 358 /* interesting.. the below overwrites some of the tile setup above.. */
795 nouveau_irq_register(dev, 12, nv20_graph_isr); 359 nv_wr32(priv, 0x400B20, 0x00000000);
796 360 nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
797 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
798 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
799 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
800 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
801 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
802 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
803 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
804 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
805 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
806 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
807 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
808 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
809 if (dev_priv->card_type == NV_20) {
810 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
811 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
812
813 /* kelvin */
814 if (dev_priv->chipset < 0x25)
815 NVOBJ_CLASS(dev, 0x0097, GR);
816 else
817 NVOBJ_CLASS(dev, 0x0597, GR);
818 } else {
819 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
820 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
821 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
822 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
823
824 /* rankine */
825 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
826 NVOBJ_CLASS(dev, 0x0397, GR);
827 else
828 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
829 NVOBJ_CLASS(dev, 0x0697, GR);
830 else
831 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
832 NVOBJ_CLASS(dev, 0x0497, GR);
833 }
834 361
362 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
363 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
364 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
365 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
835 return 0; 366 return 0;
836} 367}
368
369struct nouveau_oclass
370nv20_graph_oclass = {
371 .handle = NV_ENGINE(GR, 0x20),
372 .ofuncs = &(struct nouveau_ofuncs) {
373 .ctor = nv20_graph_ctor,
374 .dtor = nv20_graph_dtor,
375 .init = nv20_graph_init,
376 .fini = _nouveau_graph_fini,
377 },
378};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
new file mode 100644
index 000000000000..2bea7313e03f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
@@ -0,0 +1,31 @@
1#ifndef __NV20_GRAPH_H__
2#define __NV20_GRAPH_H__
3
4#include <core/enum.h>
5
6#include <engine/graph.h>
7#include <engine/fifo.h>
8
9struct nv20_graph_priv {
10 struct nouveau_graph base;
11 struct nouveau_gpuobj *ctxtab;
12};
13
14struct nv20_graph_chan {
15 struct nouveau_graph_chan base;
16 int chid;
17};
18
19extern struct nouveau_oclass nv25_graph_sclass[];
20int nv20_graph_context_init(struct nouveau_object *);
21int nv20_graph_context_fini(struct nouveau_object *, bool);
22
23void nv20_graph_tile_prog(struct nouveau_engine *, int);
24void nv20_graph_intr(struct nouveau_subdev *);
25
26void nv20_graph_dtor(struct nouveau_object *);
27int nv20_graph_init(struct nouveau_object *);
28
29int nv30_graph_init(struct nouveau_object *);
30
31#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
new file mode 100644
index 000000000000..b2b650dd8b28
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -0,0 +1,167 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18struct nouveau_oclass
19nv25_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
32 { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
33 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
34 { 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */
35 {},
36};
37
38/*******************************************************************************
39 * PGRAPH context
40 ******************************************************************************/
41
42static int
43nv25_graph_context_ctor(struct nouveau_object *parent,
44 struct nouveau_object *engine,
45 struct nouveau_oclass *oclass, void *data, u32 size,
46 struct nouveau_object **pobject)
47{
48 struct nv20_graph_chan *chan;
49 int ret, i;
50
51 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724,
52 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
53 *pobject = nv_object(chan);
54 if (ret)
55 return ret;
56
57 chan->chid = nouveau_fifo_chan(parent)->chid;
58
59 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
60 nv_wo32(chan, 0x035c, 0xffff0000);
61 nv_wo32(chan, 0x03c0, 0x0fff0000);
62 nv_wo32(chan, 0x03c4, 0x0fff0000);
63 nv_wo32(chan, 0x049c, 0x00000101);
64 nv_wo32(chan, 0x04b0, 0x00000111);
65 nv_wo32(chan, 0x04c8, 0x00000080);
66 nv_wo32(chan, 0x04cc, 0xffff0000);
67 nv_wo32(chan, 0x04d0, 0x00000001);
68 nv_wo32(chan, 0x04e4, 0x44400000);
69 nv_wo32(chan, 0x04fc, 0x4b800000);
70 for (i = 0x0510; i <= 0x051c; i += 4)
71 nv_wo32(chan, i, 0x00030303);
72 for (i = 0x0530; i <= 0x053c; i += 4)
73 nv_wo32(chan, i, 0x00080000);
74 for (i = 0x0548; i <= 0x0554; i += 4)
75 nv_wo32(chan, i, 0x01012000);
76 for (i = 0x0558; i <= 0x0564; i += 4)
77 nv_wo32(chan, i, 0x000105b8);
78 for (i = 0x0568; i <= 0x0574; i += 4)
79 nv_wo32(chan, i, 0x00080008);
80 for (i = 0x0598; i <= 0x05d4; i += 4)
81 nv_wo32(chan, i, 0x07ff0000);
82 nv_wo32(chan, 0x05e0, 0x4b7fffff);
83 nv_wo32(chan, 0x0620, 0x00000080);
84 nv_wo32(chan, 0x0624, 0x30201000);
85 nv_wo32(chan, 0x0628, 0x70605040);
86 nv_wo32(chan, 0x062c, 0xb0a09080);
87 nv_wo32(chan, 0x0630, 0xf0e0d0c0);
88 nv_wo32(chan, 0x0664, 0x00000001);
89 nv_wo32(chan, 0x066c, 0x00004000);
90 nv_wo32(chan, 0x0678, 0x00000001);
91 nv_wo32(chan, 0x0680, 0x00040000);
92 nv_wo32(chan, 0x0684, 0x00010000);
93 for (i = 0x1b04; i <= 0x2374; i += 16) {
94 nv_wo32(chan, (i + 0), 0x10700ff9);
95 nv_wo32(chan, (i + 4), 0x0436086c);
96 nv_wo32(chan, (i + 8), 0x000c001b);
97 }
98 nv_wo32(chan, 0x2704, 0x3f800000);
99 nv_wo32(chan, 0x2718, 0x3f800000);
100 nv_wo32(chan, 0x2744, 0x40000000);
101 nv_wo32(chan, 0x2748, 0x3f800000);
102 nv_wo32(chan, 0x274c, 0x3f000000);
103 nv_wo32(chan, 0x2754, 0x40000000);
104 nv_wo32(chan, 0x2758, 0x3f800000);
105 nv_wo32(chan, 0x2760, 0xbf800000);
106 nv_wo32(chan, 0x2768, 0xbf800000);
107 nv_wo32(chan, 0x308c, 0x000fe000);
108 nv_wo32(chan, 0x3108, 0x000003f8);
109 nv_wo32(chan, 0x3468, 0x002fe000);
110 for (i = 0x3484; i <= 0x34a0; i += 4)
111 nv_wo32(chan, i, 0x001c527c);
112 return 0;
113}
114
115static struct nouveau_oclass
116nv25_graph_cclass = {
117 .handle = NV_ENGCTX(GR, 0x25),
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nv25_graph_context_ctor,
120 .dtor = _nouveau_graph_context_dtor,
121 .init = nv20_graph_context_init,
122 .fini = nv20_graph_context_fini,
123 .rd32 = _nouveau_graph_context_rd32,
124 .wr32 = _nouveau_graph_context_wr32,
125 },
126};
127
128/*******************************************************************************
129 * PGRAPH engine/subdev functions
130 ******************************************************************************/
131
132static int
133nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
134 struct nouveau_oclass *oclass, void *data, u32 size,
135 struct nouveau_object **pobject)
136{
137 struct nv20_graph_priv *priv;
138 int ret;
139
140 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
141 *pobject = nv_object(priv);
142 if (ret)
143 return ret;
144
145 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
147 if (ret)
148 return ret;
149
150 nv_subdev(priv)->unit = 0x00001000;
151 nv_subdev(priv)->intr = nv20_graph_intr;
152 nv_engine(priv)->cclass = &nv25_graph_cclass;
153 nv_engine(priv)->sclass = nv25_graph_sclass;
154 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
155 return 0;
156}
157
158struct nouveau_oclass
159nv25_graph_oclass = {
160 .handle = NV_ENGINE(GR, 0x25),
161 .ofuncs = &(struct nouveau_ofuncs) {
162 .ctor = nv25_graph_ctor,
163 .dtor = nv20_graph_dtor,
164 .init = nv20_graph_init,
165 .fini = _nouveau_graph_fini,
166 },
167};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
new file mode 100644
index 000000000000..700462fa0ae0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -0,0 +1,134 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * PGRAPH context
16 ******************************************************************************/
17
18static int
19nv2a_graph_context_ctor(struct nouveau_object *parent,
20 struct nouveau_object *engine,
21 struct nouveau_oclass *oclass, void *data, u32 size,
22 struct nouveau_object **pobject)
23{
24 struct nv20_graph_chan *chan;
25 int ret, i;
26
27 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
28 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
29 *pobject = nv_object(chan);
30 if (ret)
31 return ret;
32
33 chan->chid = nouveau_fifo_chan(parent)->chid;
34
35 nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
36 nv_wo32(chan, 0x033c, 0xffff0000);
37 nv_wo32(chan, 0x03a0, 0x0fff0000);
38 nv_wo32(chan, 0x03a4, 0x0fff0000);
39 nv_wo32(chan, 0x047c, 0x00000101);
40 nv_wo32(chan, 0x0490, 0x00000111);
41 nv_wo32(chan, 0x04a8, 0x44400000);
42 for (i = 0x04d4; i <= 0x04e0; i += 4)
43 nv_wo32(chan, i, 0x00030303);
44 for (i = 0x04f4; i <= 0x0500; i += 4)
45 nv_wo32(chan, i, 0x00080000);
46 for (i = 0x050c; i <= 0x0518; i += 4)
47 nv_wo32(chan, i, 0x01012000);
48 for (i = 0x051c; i <= 0x0528; i += 4)
49 nv_wo32(chan, i, 0x000105b8);
50 for (i = 0x052c; i <= 0x0538; i += 4)
51 nv_wo32(chan, i, 0x00080008);
52 for (i = 0x055c; i <= 0x0598; i += 4)
53 nv_wo32(chan, i, 0x07ff0000);
54 nv_wo32(chan, 0x05a4, 0x4b7fffff);
55 nv_wo32(chan, 0x05fc, 0x00000001);
56 nv_wo32(chan, 0x0604, 0x00004000);
57 nv_wo32(chan, 0x0610, 0x00000001);
58 nv_wo32(chan, 0x0618, 0x00040000);
59 nv_wo32(chan, 0x061c, 0x00010000);
60 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
61 nv_wo32(chan, (i + 0), 0x10700ff9);
62 nv_wo32(chan, (i + 4), 0x0436086c);
63 nv_wo32(chan, (i + 8), 0x000c001b);
64 }
65 nv_wo32(chan, 0x269c, 0x3f800000);
66 nv_wo32(chan, 0x26b0, 0x3f800000);
67 nv_wo32(chan, 0x26dc, 0x40000000);
68 nv_wo32(chan, 0x26e0, 0x3f800000);
69 nv_wo32(chan, 0x26e4, 0x3f000000);
70 nv_wo32(chan, 0x26ec, 0x40000000);
71 nv_wo32(chan, 0x26f0, 0x3f800000);
72 nv_wo32(chan, 0x26f8, 0xbf800000);
73 nv_wo32(chan, 0x2700, 0xbf800000);
74 nv_wo32(chan, 0x3024, 0x000fe000);
75 nv_wo32(chan, 0x30a0, 0x000003f8);
76 nv_wo32(chan, 0x33fc, 0x002fe000);
77 for (i = 0x341c; i <= 0x3438; i += 4)
78 nv_wo32(chan, i, 0x001c527c);
79 return 0;
80}
81
82static struct nouveau_oclass
83nv2a_graph_cclass = {
84 .handle = NV_ENGCTX(GR, 0x2a),
85 .ofuncs = &(struct nouveau_ofuncs) {
86 .ctor = nv2a_graph_context_ctor,
87 .dtor = _nouveau_graph_context_dtor,
88 .init = nv20_graph_context_init,
89 .fini = nv20_graph_context_fini,
90 .rd32 = _nouveau_graph_context_rd32,
91 .wr32 = _nouveau_graph_context_wr32,
92 },
93};
94
95/*******************************************************************************
96 * PGRAPH engine/subdev functions
97 ******************************************************************************/
98
99static int
100nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
101 struct nouveau_oclass *oclass, void *data, u32 size,
102 struct nouveau_object **pobject)
103{
104 struct nv20_graph_priv *priv;
105 int ret;
106
107 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
108 *pobject = nv_object(priv);
109 if (ret)
110 return ret;
111
112 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
113 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
114 if (ret)
115 return ret;
116
117 nv_subdev(priv)->unit = 0x00001000;
118 nv_subdev(priv)->intr = nv20_graph_intr;
119 nv_engine(priv)->cclass = &nv2a_graph_cclass;
120 nv_engine(priv)->sclass = nv25_graph_sclass;
121 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
122 return 0;
123}
124
125struct nouveau_oclass
126nv2a_graph_oclass = {
127 .handle = NV_ENGINE(GR, 0x2a),
128 .ofuncs = &(struct nouveau_ofuncs) {
129 .ctor = nv2a_graph_ctor,
130 .dtor = nv20_graph_dtor,
131 .init = nv20_graph_init,
132 .fini = _nouveau_graph_fini,
133 },
134};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
new file mode 100644
index 000000000000..cedadaa92d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -0,0 +1,238 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18static struct nouveau_oclass
19nv30_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
32 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
33 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
34 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
35 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
36 { 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */
37 {},
38};
39
40/*******************************************************************************
41 * PGRAPH context
42 ******************************************************************************/
43
44static int
45nv30_graph_context_ctor(struct nouveau_object *parent,
46 struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv20_graph_chan *chan;
51 int ret, i;
52
53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48,
54 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
55 *pobject = nv_object(chan);
56 if (ret)
57 return ret;
58
59 chan->chid = nouveau_fifo_chan(parent)->chid;
60
61 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
62 nv_wo32(chan, 0x0410, 0x00000101);
63 nv_wo32(chan, 0x0424, 0x00000111);
64 nv_wo32(chan, 0x0428, 0x00000060);
65 nv_wo32(chan, 0x0444, 0x00000080);
66 nv_wo32(chan, 0x0448, 0xffff0000);
67 nv_wo32(chan, 0x044c, 0x00000001);
68 nv_wo32(chan, 0x0460, 0x44400000);
69 nv_wo32(chan, 0x048c, 0xffff0000);
70 for (i = 0x04e0; i < 0x04e8; i += 4)
71 nv_wo32(chan, i, 0x0fff0000);
72 nv_wo32(chan, 0x04ec, 0x00011100);
73 for (i = 0x0508; i < 0x0548; i += 4)
74 nv_wo32(chan, i, 0x07ff0000);
75 nv_wo32(chan, 0x0550, 0x4b7fffff);
76 nv_wo32(chan, 0x058c, 0x00000080);
77 nv_wo32(chan, 0x0590, 0x30201000);
78 nv_wo32(chan, 0x0594, 0x70605040);
79 nv_wo32(chan, 0x0598, 0xb8a89888);
80 nv_wo32(chan, 0x059c, 0xf8e8d8c8);
81 nv_wo32(chan, 0x05b0, 0xb0000000);
82 for (i = 0x0600; i < 0x0640; i += 4)
83 nv_wo32(chan, i, 0x00010588);
84 for (i = 0x0640; i < 0x0680; i += 4)
85 nv_wo32(chan, i, 0x00030303);
86 for (i = 0x06c0; i < 0x0700; i += 4)
87 nv_wo32(chan, i, 0x0008aae4);
88 for (i = 0x0700; i < 0x0740; i += 4)
89 nv_wo32(chan, i, 0x01012000);
90 for (i = 0x0740; i < 0x0780; i += 4)
91 nv_wo32(chan, i, 0x00080008);
92 nv_wo32(chan, 0x085c, 0x00040000);
93 nv_wo32(chan, 0x0860, 0x00010000);
94 for (i = 0x0864; i < 0x0874; i += 4)
95 nv_wo32(chan, i, 0x00040004);
96 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
97 nv_wo32(chan, i + 0, 0x10700ff9);
98 nv_wo32(chan, i + 1, 0x0436086c);
99 nv_wo32(chan, i + 2, 0x000c001b);
100 }
101 for (i = 0x30b8; i < 0x30c8; i += 4)
102 nv_wo32(chan, i, 0x0000ffff);
103 nv_wo32(chan, 0x344c, 0x3f800000);
104 nv_wo32(chan, 0x3808, 0x3f800000);
105 nv_wo32(chan, 0x381c, 0x3f800000);
106 nv_wo32(chan, 0x3848, 0x40000000);
107 nv_wo32(chan, 0x384c, 0x3f800000);
108 nv_wo32(chan, 0x3850, 0x3f000000);
109 nv_wo32(chan, 0x3858, 0x40000000);
110 nv_wo32(chan, 0x385c, 0x3f800000);
111 nv_wo32(chan, 0x3864, 0xbf800000);
112 nv_wo32(chan, 0x386c, 0xbf800000);
113 return 0;
114}
115
116static struct nouveau_oclass
117nv30_graph_cclass = {
118 .handle = NV_ENGCTX(GR, 0x30),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv30_graph_context_ctor,
121 .dtor = _nouveau_graph_context_dtor,
122 .init = nv20_graph_context_init,
123 .fini = nv20_graph_context_fini,
124 .rd32 = _nouveau_graph_context_rd32,
125 .wr32 = _nouveau_graph_context_wr32,
126 },
127};
128
129/*******************************************************************************
130 * PGRAPH engine/subdev functions
131 ******************************************************************************/
132
133static int
134nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nv20_graph_priv *priv;
139 int ret;
140
141 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
142 *pobject = nv_object(priv);
143 if (ret)
144 return ret;
145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret)
149 return ret;
150
151 nv_subdev(priv)->unit = 0x00001000;
152 nv_subdev(priv)->intr = nv20_graph_intr;
153 nv_engine(priv)->cclass = &nv30_graph_cclass;
154 nv_engine(priv)->sclass = nv30_graph_sclass;
155 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
156 return 0;
157}
158
159int
160nv30_graph_init(struct nouveau_object *object)
161{
162 struct nouveau_engine *engine = nv_engine(object);
163 struct nv20_graph_priv *priv = (void *)engine;
164 struct nouveau_fb *pfb = nouveau_fb(object);
165 int ret, i;
166
167 ret = nouveau_graph_init(&priv->base);
168 if (ret)
169 return ret;
170
171 nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
172
173 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
174 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
175
176 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
177 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
178 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
179 nv_wr32(priv, 0x400890, 0x01b463ff);
180 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
181 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
182 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
183 nv_wr32(priv, 0x400B80, 0x1003d888);
184 nv_wr32(priv, 0x400B84, 0x0c000000);
185 nv_wr32(priv, 0x400098, 0x00000000);
186 nv_wr32(priv, 0x40009C, 0x0005ad00);
187 nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
188 nv_wr32(priv, 0x4000a0, 0x00000000);
189 nv_wr32(priv, 0x4000a4, 0x00000008);
190 nv_wr32(priv, 0x4008a8, 0xb784a400);
191 nv_wr32(priv, 0x400ba0, 0x002f8685);
192 nv_wr32(priv, 0x400ba4, 0x00231f3f);
193 nv_wr32(priv, 0x4008a4, 0x40000020);
194
195 if (nv_device(priv)->chipset == 0x34) {
196 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
197 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
198 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
199 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
200 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
201 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
202 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
203 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
204 }
205
206 nv_wr32(priv, 0x4000c0, 0x00000016);
207
208 /* Turn all the tiling regions off. */
209 for (i = 0; i < pfb->tile.regions; i++)
210 engine->tile_prog(engine, i);
211
212 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
213 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
214 nv_wr32(priv, 0x0040075c , 0x00000001);
215
216 /* begin RAM config */
217 /* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
218 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
219 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
220 if (nv_device(priv)->chipset != 0x34) {
221 nv_wr32(priv, 0x400750, 0x00EA0000);
222 nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
223 nv_wr32(priv, 0x400750, 0x00EA0004);
224 nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
225 }
226 return 0;
227}
228
229struct nouveau_oclass
230nv30_graph_oclass = {
231 .handle = NV_ENGINE(GR, 0x30),
232 .ofuncs = &(struct nouveau_ofuncs) {
233 .ctor = nv30_graph_ctor,
234 .dtor = nv20_graph_dtor,
235 .init = nv30_graph_init,
236 .fini = _nouveau_graph_fini,
237 },
238};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
new file mode 100644
index 000000000000..273f6320027b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -0,0 +1,168 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18static struct nouveau_oclass
19nv34_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
32 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
33 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
34 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
35 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
36 { 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */
37 {},
38};
39
40/*******************************************************************************
41 * PGRAPH context
42 ******************************************************************************/
43
44static int
45nv34_graph_context_ctor(struct nouveau_object *parent,
46 struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv20_graph_chan *chan;
51 int ret, i;
52
53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc,
54 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
55 *pobject = nv_object(chan);
56 if (ret)
57 return ret;
58
59 chan->chid = nouveau_fifo_chan(parent)->chid;
60
61 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
62 nv_wo32(chan, 0x040c, 0x01000101);
63 nv_wo32(chan, 0x0420, 0x00000111);
64 nv_wo32(chan, 0x0424, 0x00000060);
65 nv_wo32(chan, 0x0440, 0x00000080);
66 nv_wo32(chan, 0x0444, 0xffff0000);
67 nv_wo32(chan, 0x0448, 0x00000001);
68 nv_wo32(chan, 0x045c, 0x44400000);
69 nv_wo32(chan, 0x0480, 0xffff0000);
70 for (i = 0x04d4; i < 0x04dc; i += 4)
71 nv_wo32(chan, i, 0x0fff0000);
72 nv_wo32(chan, 0x04e0, 0x00011100);
73 for (i = 0x04fc; i < 0x053c; i += 4)
74 nv_wo32(chan, i, 0x07ff0000);
75 nv_wo32(chan, 0x0544, 0x4b7fffff);
76 nv_wo32(chan, 0x057c, 0x00000080);
77 nv_wo32(chan, 0x0580, 0x30201000);
78 nv_wo32(chan, 0x0584, 0x70605040);
79 nv_wo32(chan, 0x0588, 0xb8a89888);
80 nv_wo32(chan, 0x058c, 0xf8e8d8c8);
81 nv_wo32(chan, 0x05a0, 0xb0000000);
82 for (i = 0x05f0; i < 0x0630; i += 4)
83 nv_wo32(chan, i, 0x00010588);
84 for (i = 0x0630; i < 0x0670; i += 4)
85 nv_wo32(chan, i, 0x00030303);
86 for (i = 0x06b0; i < 0x06f0; i += 4)
87 nv_wo32(chan, i, 0x0008aae4);
88 for (i = 0x06f0; i < 0x0730; i += 4)
89 nv_wo32(chan, i, 0x01012000);
90 for (i = 0x0730; i < 0x0770; i += 4)
91 nv_wo32(chan, i, 0x00080008);
92 nv_wo32(chan, 0x0850, 0x00040000);
93 nv_wo32(chan, 0x0854, 0x00010000);
94 for (i = 0x0858; i < 0x0868; i += 4)
95 nv_wo32(chan, i, 0x00040004);
96 for (i = 0x15ac; i <= 0x271c ; i += 16) {
97 nv_wo32(chan, i + 0, 0x10700ff9);
98 nv_wo32(chan, i + 1, 0x0436086c);
99 nv_wo32(chan, i + 2, 0x000c001b);
100 }
101 for (i = 0x274c; i < 0x275c; i += 4)
102 nv_wo32(chan, i, 0x0000ffff);
103 nv_wo32(chan, 0x2ae0, 0x3f800000);
104 nv_wo32(chan, 0x2e9c, 0x3f800000);
105 nv_wo32(chan, 0x2eb0, 0x3f800000);
106 nv_wo32(chan, 0x2edc, 0x40000000);
107 nv_wo32(chan, 0x2ee0, 0x3f800000);
108 nv_wo32(chan, 0x2ee4, 0x3f000000);
109 nv_wo32(chan, 0x2eec, 0x40000000);
110 nv_wo32(chan, 0x2ef0, 0x3f800000);
111 nv_wo32(chan, 0x2ef8, 0xbf800000);
112 nv_wo32(chan, 0x2f00, 0xbf800000);
113 return 0;
114}
115
116static struct nouveau_oclass
117nv34_graph_cclass = {
118 .handle = NV_ENGCTX(GR, 0x34),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv34_graph_context_ctor,
121 .dtor = _nouveau_graph_context_dtor,
122 .init = nv20_graph_context_init,
123 .fini = nv20_graph_context_fini,
124 .rd32 = _nouveau_graph_context_rd32,
125 .wr32 = _nouveau_graph_context_wr32,
126 },
127};
128
129/*******************************************************************************
130 * PGRAPH engine/subdev functions
131 ******************************************************************************/
132
133static int
134nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nv20_graph_priv *priv;
139 int ret;
140
141 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
142 *pobject = nv_object(priv);
143 if (ret)
144 return ret;
145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret)
149 return ret;
150
151 nv_subdev(priv)->unit = 0x00001000;
152 nv_subdev(priv)->intr = nv20_graph_intr;
153 nv_engine(priv)->cclass = &nv34_graph_cclass;
154 nv_engine(priv)->sclass = nv34_graph_sclass;
155 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
156 return 0;
157}
158
159struct nouveau_oclass
160nv34_graph_oclass = {
161 .handle = NV_ENGINE(GR, 0x34),
162 .ofuncs = &(struct nouveau_ofuncs) {
163 .ctor = nv34_graph_ctor,
164 .dtor = nv20_graph_dtor,
165 .init = nv30_graph_init,
166 .fini = _nouveau_graph_fini,
167 },
168};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
new file mode 100644
index 000000000000..f40ee2116ee1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -0,0 +1,166 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include "nv20.h"
10#include "regs.h"
11
12/*******************************************************************************
13 * Graphics object classes
14 ******************************************************************************/
15
16static struct nouveau_oclass
17nv35_graph_sclass[] = {
18 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
19 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
20 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
21 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
22 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
23 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
24 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
25 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
26 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
27 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
28 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
29 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
30 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
31 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
32 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
33 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
34 { 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
35 {},
36};
37
38/*******************************************************************************
39 * PGRAPH context
40 ******************************************************************************/
41
42static int
43nv35_graph_context_ctor(struct nouveau_object *parent,
44 struct nouveau_object *engine,
45 struct nouveau_oclass *oclass, void *data, u32 size,
46 struct nouveau_object **pobject)
47{
48 struct nv20_graph_chan *chan;
49 int ret, i;
50
51 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
52 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
53 *pobject = nv_object(chan);
54 if (ret)
55 return ret;
56
57 chan->chid = nouveau_fifo_chan(parent)->chid;
58
59 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
60 nv_wo32(chan, 0x040c, 0x00000101);
61 nv_wo32(chan, 0x0420, 0x00000111);
62 nv_wo32(chan, 0x0424, 0x00000060);
63 nv_wo32(chan, 0x0440, 0x00000080);
64 nv_wo32(chan, 0x0444, 0xffff0000);
65 nv_wo32(chan, 0x0448, 0x00000001);
66 nv_wo32(chan, 0x045c, 0x44400000);
67 nv_wo32(chan, 0x0488, 0xffff0000);
68 for (i = 0x04dc; i < 0x04e4; i += 4)
69 nv_wo32(chan, i, 0x0fff0000);
70 nv_wo32(chan, 0x04e8, 0x00011100);
71 for (i = 0x0504; i < 0x0544; i += 4)
72 nv_wo32(chan, i, 0x07ff0000);
73 nv_wo32(chan, 0x054c, 0x4b7fffff);
74 nv_wo32(chan, 0x0588, 0x00000080);
75 nv_wo32(chan, 0x058c, 0x30201000);
76 nv_wo32(chan, 0x0590, 0x70605040);
77 nv_wo32(chan, 0x0594, 0xb8a89888);
78 nv_wo32(chan, 0x0598, 0xf8e8d8c8);
79 nv_wo32(chan, 0x05ac, 0xb0000000);
80 for (i = 0x0604; i < 0x0644; i += 4)
81 nv_wo32(chan, i, 0x00010588);
82 for (i = 0x0644; i < 0x0684; i += 4)
83 nv_wo32(chan, i, 0x00030303);
84 for (i = 0x06c4; i < 0x0704; i += 4)
85 nv_wo32(chan, i, 0x0008aae4);
86 for (i = 0x0704; i < 0x0744; i += 4)
87 nv_wo32(chan, i, 0x01012000);
88 for (i = 0x0744; i < 0x0784; i += 4)
89 nv_wo32(chan, i, 0x00080008);
90 nv_wo32(chan, 0x0860, 0x00040000);
91 nv_wo32(chan, 0x0864, 0x00010000);
92 for (i = 0x0868; i < 0x0878; i += 4)
93 nv_wo32(chan, i, 0x00040004);
94 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
95 nv_wo32(chan, i + 0, 0x10700ff9);
96 nv_wo32(chan, i + 4, 0x0436086c);
97 nv_wo32(chan, i + 8, 0x000c001b);
98 }
99 for (i = 0x30bc; i < 0x30cc; i += 4)
100 nv_wo32(chan, i, 0x0000ffff);
101 nv_wo32(chan, 0x3450, 0x3f800000);
102 nv_wo32(chan, 0x380c, 0x3f800000);
103 nv_wo32(chan, 0x3820, 0x3f800000);
104 nv_wo32(chan, 0x384c, 0x40000000);
105 nv_wo32(chan, 0x3850, 0x3f800000);
106 nv_wo32(chan, 0x3854, 0x3f000000);
107 nv_wo32(chan, 0x385c, 0x40000000);
108 nv_wo32(chan, 0x3860, 0x3f800000);
109 nv_wo32(chan, 0x3868, 0xbf800000);
110 nv_wo32(chan, 0x3870, 0xbf800000);
111 return 0;
112}
113
114static struct nouveau_oclass
115nv35_graph_cclass = {
116 .handle = NV_ENGCTX(GR, 0x35),
117 .ofuncs = &(struct nouveau_ofuncs) {
118 .ctor = nv35_graph_context_ctor,
119 .dtor = _nouveau_graph_context_dtor,
120 .init = nv20_graph_context_init,
121 .fini = nv20_graph_context_fini,
122 .rd32 = _nouveau_graph_context_rd32,
123 .wr32 = _nouveau_graph_context_wr32,
124 },
125};
126
127/*******************************************************************************
128 * PGRAPH engine/subdev functions
129 ******************************************************************************/
130
131static int
132nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
133 struct nouveau_oclass *oclass, void *data, u32 size,
134 struct nouveau_object **pobject)
135{
136 struct nv20_graph_priv *priv;
137 int ret;
138
139 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
140 *pobject = nv_object(priv);
141 if (ret)
142 return ret;
143
144 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
145 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
146 if (ret)
147 return ret;
148
149 nv_subdev(priv)->unit = 0x00001000;
150 nv_subdev(priv)->intr = nv20_graph_intr;
151 nv_engine(priv)->cclass = &nv35_graph_cclass;
152 nv_engine(priv)->sclass = nv35_graph_sclass;
153 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
154 return 0;
155}
156
157struct nouveau_oclass
158nv35_graph_oclass = {
159 .handle = NV_ENGINE(GR, 0x35),
160 .ofuncs = &(struct nouveau_ofuncs) {
161 .ctor = nv35_graph_ctor,
162 .dtor = nv20_graph_dtor,
163 .init = nv30_graph_init,
164 .fini = _nouveau_graph_fini,
165 },
166};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 466d21514b2c..2f9f2c69d1e3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -1,151 +1,238 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
27#include "drmP.h" 25#include <core/os.h>
28#include "drm.h" 26#include <core/class.h>
29#include "nouveau_drv.h" 27#include <core/handle.h>
28#include <core/engctx.h>
29
30#include <subdev/fb.h>
31#include <subdev/timer.h>
32
33#include <engine/graph.h>
30#include <engine/fifo.h> 34#include <engine/fifo.h>
31#include <core/ramht.h>
32 35
33struct nv40_graph_engine { 36#include "nv40.h"
34 struct nouveau_exec_engine base; 37#include "regs.h"
35 u32 grctx_size; 38
39struct nv40_graph_priv {
40 struct nouveau_graph base;
41 u32 size;
36}; 42};
37 43
44struct nv40_graph_chan {
45 struct nouveau_graph_chan base;
46};
47
48/*******************************************************************************
49 * Graphics object classes
50 ******************************************************************************/
51
38static int 52static int
39nv40_graph_context_new(struct nouveau_channel *chan, int engine) 53nv40_graph_object_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
40{ 57{
41 struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine); 58 struct nouveau_gpuobj *obj;
42 struct drm_device *dev = chan->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_gpuobj *grctx = NULL;
45 unsigned long flags;
46 int ret; 59 int ret;
47 60
48 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, 61 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
49 NVOBJ_FLAG_ZERO_ALLOC, &grctx); 62 20, 16, 0, &obj);
63 *pobject = nv_object(obj);
50 if (ret) 64 if (ret)
51 return ret; 65 return ret;
52 66
53 /* Initialise default context values */ 67 nv_wo32(obj, 0x00, nv_mclass(obj));
54 nv40_grctx_fill(dev, grctx); 68 nv_wo32(obj, 0x04, 0x00000000);
55 nv_wo32(grctx, 0, grctx->addr); 69 nv_wo32(obj, 0x08, 0x00000000);
56 70#ifdef __BIG_ENDIAN
57 /* init grctx pointer in ramfc, and on PFIFO if channel is 71 nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
58 * already active there 72#endif
59 */ 73 nv_wo32(obj, 0x0c, 0x00000000);
60 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 74 nv_wo32(obj, 0x10, 0x00000000);
61 nv_wo32(chan->ramfc, 0x38, grctx->addr >> 4);
62 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
63 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
64 nv_wr32(dev, 0x0032e0, grctx->addr >> 4);
65 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
66 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
67
68 chan->engctx[engine] = grctx;
69 return 0; 75 return 0;
70} 76}
71 77
72static void 78struct nouveau_ofuncs
73nv40_graph_context_del(struct nouveau_channel *chan, int engine) 79nv40_graph_ofuncs = {
74{ 80 .ctor = nv40_graph_object_ctor,
75 struct nouveau_gpuobj *grctx = chan->engctx[engine]; 81 .dtor = _nouveau_gpuobj_dtor,
76 struct drm_device *dev = chan->dev; 82 .init = _nouveau_gpuobj_init,
77 struct drm_nouveau_private *dev_priv = dev->dev_private; 83 .fini = _nouveau_gpuobj_fini,
78 u32 inst = 0x01000000 | (grctx->addr >> 4); 84 .rd32 = _nouveau_gpuobj_rd32,
79 unsigned long flags; 85 .wr32 = _nouveau_gpuobj_wr32,
86};
80 87
81 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 88static struct nouveau_oclass
82 nv_mask(dev, 0x400720, 0x00000000, 0x00000001); 89nv40_graph_sclass[] = {
83 if (nv_rd32(dev, 0x40032c) == inst) 90 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
84 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000); 91 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
85 if (nv_rd32(dev, 0x400330) == inst) 92 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
86 nv_mask(dev, 0x400330, 0x01000000, 0x00000000); 93 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
87 nv_mask(dev, 0x400720, 0x00000001, 0x00000001); 94 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
88 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 95 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
89 96 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
90 /* Free the context resources */ 97 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
91 nouveau_gpuobj_ref(NULL, &grctx); 98 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
92 chan->engctx[engine] = NULL; 99 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
93} 100 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
101 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
102 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
103 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
104 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
105 { 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
106 {},
107};
108
109static struct nouveau_oclass
110nv44_graph_sclass[] = {
111 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
112 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
113 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
114 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
115 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
116 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
117 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
118 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
119 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
120 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
121 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
122 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
123 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
124 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
125 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
126 { 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
127 {},
128};
129
130/*******************************************************************************
131 * PGRAPH context
132 ******************************************************************************/
94 133
95int 134static int
96nv40_graph_object_new(struct nouveau_channel *chan, int engine, 135nv40_graph_context_ctor(struct nouveau_object *parent,
97 u32 handle, u16 class) 136 struct nouveau_object *engine,
137 struct nouveau_oclass *oclass, void *data, u32 size,
138 struct nouveau_object **pobject)
98{ 139{
99 struct drm_device *dev = chan->dev; 140 struct nv40_graph_priv *priv = (void *)engine;
100 struct nouveau_gpuobj *obj = NULL; 141 struct nv40_graph_chan *chan;
101 int ret; 142 int ret;
102 143
103 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 144 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
145 priv->size, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &chan);
147 *pobject = nv_object(chan);
104 if (ret) 148 if (ret)
105 return ret; 149 return ret;
106 obj->engine = 1;
107 obj->class = class;
108 150
109 nv_wo32(obj, 0x00, class); 151 nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
110 nv_wo32(obj, 0x04, 0x00000000); 152 nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
111#ifndef __BIG_ENDIAN 153 return 0;
112 nv_wo32(obj, 0x08, 0x00000000); 154}
113#else 155
114 nv_wo32(obj, 0x08, 0x01000000); 156static int
115#endif 157nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
116 nv_wo32(obj, 0x0c, 0x00000000); 158{
117 nv_wo32(obj, 0x10, 0x00000000); 159 struct nv04_graph_priv *priv = (void *)object->engine;
160 struct nv04_graph_chan *chan = (void *)object;
161 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
162 int ret = 0;
163
164 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
165
166 if (nv_rd32(priv, 0x40032c) == inst) {
167 if (suspend) {
168 nv_wr32(priv, 0x400720, 0x00000000);
169 nv_wr32(priv, 0x400784, inst);
170 nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
171 nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
172 if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
173 u32 insn = nv_rd32(priv, 0x400308);
174 nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
175 ret = -EBUSY;
176 }
177 }
118 178
119 ret = nouveau_ramht_insert(chan, handle, obj); 179 nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
120 nouveau_gpuobj_ref(NULL, &obj); 180 }
181
182 if (nv_rd32(priv, 0x400330) == inst)
183 nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
184
185 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
121 return ret; 186 return ret;
122} 187}
123 188
189static struct nouveau_oclass
190nv40_graph_cclass = {
191 .handle = NV_ENGCTX(GR, 0x40),
192 .ofuncs = &(struct nouveau_ofuncs) {
193 .ctor = nv40_graph_context_ctor,
194 .dtor = _nouveau_graph_context_dtor,
195 .init = _nouveau_graph_context_init,
196 .fini = nv40_graph_context_fini,
197 .rd32 = _nouveau_graph_context_rd32,
198 .wr32 = _nouveau_graph_context_wr32,
199 },
200};
201
202/*******************************************************************************
203 * PGRAPH engine/subdev functions
204 ******************************************************************************/
205
124static void 206static void
125nv40_graph_set_tile_region(struct drm_device *dev, int i) 207nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
126{ 208{
127 struct drm_nouveau_private *dev_priv = dev->dev_private; 209 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
128 struct nouveau_fb_tile *tile = nvfb_tile(dev, i); 210 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
211 struct nv40_graph_priv *priv = (void *)engine;
212 unsigned long flags;
213
214 pfifo->pause(pfifo, &flags);
215 nv04_graph_idle(priv);
129 216
130 switch (dev_priv->chipset) { 217 switch (nv_device(priv)->chipset) {
131 case 0x40: 218 case 0x40:
132 case 0x41: /* guess */ 219 case 0x41: /* guess */
133 case 0x42: 220 case 0x42:
134 case 0x43: 221 case 0x43:
135 case 0x45: /* guess */ 222 case 0x45: /* guess */
136 case 0x4e: 223 case 0x4e:
137 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); 224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
138 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); 225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
139 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); 226 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
140 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); 227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
141 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); 228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
142 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); 229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
143 break; 230 break;
144 case 0x44: 231 case 0x44:
145 case 0x4a: 232 case 0x4a:
146 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); 233 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
147 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); 234 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
148 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); 235 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
149 break; 236 break;
150 case 0x46: 237 case 0x46:
151 case 0x47: 238 case 0x47:
@@ -154,149 +241,213 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
154 case 0x4c: 241 case 0x4c:
155 case 0x67: 242 case 0x67:
156 default: 243 default:
157 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); 244 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
158 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); 245 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
159 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); 246 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
160 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); 247 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
161 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); 248 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
162 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); 249 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
163 break; 250 break;
164 } 251 }
252
253 pfifo->start(pfifo, &flags);
165} 254}
166 255
167/* 256static void
168 * G70 0x47 257nv40_graph_intr(struct nouveau_subdev *subdev)
169 * G71 0x49 258{
170 * NV45 0x48 259 struct nv40_graph_priv *priv = (void *)subdev;
171 * G72[M] 0x46 260 struct nouveau_engine *engine = nv_engine(subdev);
172 * G73 0x4b 261 struct nouveau_handle *handle = NULL;
173 * C51_G7X 0x4c 262 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
174 * C51 0x4e 263 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
175 */ 264 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
176int 265 u32 inst = (nv_rd32(priv, 0x40032c) & 0x000fffff) << 4;
177nv40_graph_init(struct drm_device *dev, int engine) 266 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
267 u32 subc = (addr & 0x00070000) >> 16;
268 u32 mthd = (addr & 0x00001ffc);
269 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
270 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
271 u32 show = stat;
272
273 if (stat & NV_PGRAPH_INTR_ERROR) {
274 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
275 handle = nouveau_engctx_lookup_class(engine, inst, class);
276 if (handle && !nv_call(handle->object, mthd, data))
277 show &= ~NV_PGRAPH_INTR_ERROR;
278 nouveau_engctx_handle_put(handle);
279 }
280
281 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
282 nv_mask(priv, 0x402000, 0, 0);
283 }
284 }
285
286 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
287 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
288
289 if (show) {
290 nv_info(priv, "");
291 nouveau_bitfield_print(nv10_graph_intr_name, show);
292 printk(" nsource:");
293 nouveau_bitfield_print(nv04_graph_nsource, nsource);
294 printk(" nstatus:");
295 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
296 printk("\n");
297 nv_error(priv, "ch 0x%08x subc %d class 0x%04x "
298 "mthd 0x%04x data 0x%08x\n",
299 inst, subc, class, mthd, data);
300 }
301}
302
303static int
304nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
305 struct nouveau_oclass *oclass, void *data, u32 size,
306 struct nouveau_object **pobject)
178{ 307{
179 struct nv40_graph_engine *pgraph = nv_engine(dev, engine); 308 struct nv40_graph_priv *priv;
180 struct drm_nouveau_private *dev_priv = dev->dev_private; 309 int ret;
181 uint32_t vramsz; 310
182 int i, j; 311 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
312 *pobject = nv_object(priv);
313 if (ret)
314 return ret;
183 315
184 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 316 nv_subdev(priv)->unit = 0x00001000;
185 ~NV_PMC_ENABLE_PGRAPH); 317 nv_subdev(priv)->intr = nv40_graph_intr;
186 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 318 nv_engine(priv)->cclass = &nv40_graph_cclass;
187 NV_PMC_ENABLE_PGRAPH); 319 if (nv44_graph_class(priv))
320 nv_engine(priv)->sclass = nv44_graph_sclass;
321 else
322 nv_engine(priv)->sclass = nv40_graph_sclass;
323 nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
324 return 0;
325}
326
327static int
328nv40_graph_init(struct nouveau_object *object)
329{
330 struct nouveau_engine *engine = nv_engine(object);
331 struct nouveau_fb *pfb = nouveau_fb(object);
332 struct nv40_graph_priv *priv = (void *)engine;
333 int ret, i, j;
334 u32 vramsz;
335
336 ret = nouveau_graph_init(&priv->base);
337 if (ret)
338 return ret;
188 339
189 /* generate and upload context program */ 340 /* generate and upload context program */
190 nv40_grctx_init(dev, &pgraph->grctx_size); 341 nv40_grctx_init(nv_device(priv), &priv->size);
191 342
192 /* No context present currently */ 343 /* No context present currently */
193 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 344 nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
194 345
195 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 346 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
196 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); 347 nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
197 348
198 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); 349 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
199 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); 350 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
200 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0); 351 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
201 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055); 352 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
202 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000); 353 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
203 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); 354 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
204 355
205 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); 356 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
206 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); 357 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
207 358
208 j = nv_rd32(dev, 0x1540) & 0xff; 359 j = nv_rd32(priv, 0x1540) & 0xff;
209 if (j) { 360 if (j) {
210 for (i = 0; !(j & 1); j >>= 1, i++) 361 for (i = 0; !(j & 1); j >>= 1, i++)
211 ; 362 ;
212 nv_wr32(dev, 0x405000, i); 363 nv_wr32(priv, 0x405000, i);
213 } 364 }
214 365
215 if (dev_priv->chipset == 0x40) { 366 if (nv_device(priv)->chipset == 0x40) {
216 nv_wr32(dev, 0x4009b0, 0x83280fff); 367 nv_wr32(priv, 0x4009b0, 0x83280fff);
217 nv_wr32(dev, 0x4009b4, 0x000000a0); 368 nv_wr32(priv, 0x4009b4, 0x000000a0);
218 } else { 369 } else {
219 nv_wr32(dev, 0x400820, 0x83280eff); 370 nv_wr32(priv, 0x400820, 0x83280eff);
220 nv_wr32(dev, 0x400824, 0x000000a0); 371 nv_wr32(priv, 0x400824, 0x000000a0);
221 } 372 }
222 373
223 switch (dev_priv->chipset) { 374 switch (nv_device(priv)->chipset) {
224 case 0x40: 375 case 0x40:
225 case 0x45: 376 case 0x45:
226 nv_wr32(dev, 0x4009b8, 0x0078e366); 377 nv_wr32(priv, 0x4009b8, 0x0078e366);
227 nv_wr32(dev, 0x4009bc, 0x0000014c); 378 nv_wr32(priv, 0x4009bc, 0x0000014c);
228 break; 379 break;
229 case 0x41: 380 case 0x41:
230 case 0x42: /* pciid also 0x00Cx */ 381 case 0x42: /* pciid also 0x00Cx */
231 /* case 0x0120: XXX (pciid) */ 382 /* case 0x0120: XXX (pciid) */
232 nv_wr32(dev, 0x400828, 0x007596ff); 383 nv_wr32(priv, 0x400828, 0x007596ff);
233 nv_wr32(dev, 0x40082c, 0x00000108); 384 nv_wr32(priv, 0x40082c, 0x00000108);
234 break; 385 break;
235 case 0x43: 386 case 0x43:
236 nv_wr32(dev, 0x400828, 0x0072cb77); 387 nv_wr32(priv, 0x400828, 0x0072cb77);
237 nv_wr32(dev, 0x40082c, 0x00000108); 388 nv_wr32(priv, 0x40082c, 0x00000108);
238 break; 389 break;
239 case 0x44: 390 case 0x44:
240 case 0x46: /* G72 */ 391 case 0x46: /* G72 */
241 case 0x4a: 392 case 0x4a:
242 case 0x4c: /* G7x-based C51 */ 393 case 0x4c: /* G7x-based C51 */
243 case 0x4e: 394 case 0x4e:
244 nv_wr32(dev, 0x400860, 0); 395 nv_wr32(priv, 0x400860, 0);
245 nv_wr32(dev, 0x400864, 0); 396 nv_wr32(priv, 0x400864, 0);
246 break; 397 break;
247 case 0x47: /* G70 */ 398 case 0x47: /* G70 */
248 case 0x49: /* G71 */ 399 case 0x49: /* G71 */
249 case 0x4b: /* G73 */ 400 case 0x4b: /* G73 */
250 nv_wr32(dev, 0x400828, 0x07830610); 401 nv_wr32(priv, 0x400828, 0x07830610);
251 nv_wr32(dev, 0x40082c, 0x0000016A); 402 nv_wr32(priv, 0x40082c, 0x0000016A);
252 break; 403 break;
253 default: 404 default:
254 break; 405 break;
255 } 406 }
256 407
257 nv_wr32(dev, 0x400b38, 0x2ffff800); 408 nv_wr32(priv, 0x400b38, 0x2ffff800);
258 nv_wr32(dev, 0x400b3c, 0x00006000); 409 nv_wr32(priv, 0x400b3c, 0x00006000);
259 410
260 /* Tiling related stuff. */ 411 /* Tiling related stuff. */
261 switch (dev_priv->chipset) { 412 switch (nv_device(priv)->chipset) {
262 case 0x44: 413 case 0x44:
263 case 0x4a: 414 case 0x4a:
264 nv_wr32(dev, 0x400bc4, 0x1003d888); 415 nv_wr32(priv, 0x400bc4, 0x1003d888);
265 nv_wr32(dev, 0x400bbc, 0xb7a7b500); 416 nv_wr32(priv, 0x400bbc, 0xb7a7b500);
266 break; 417 break;
267 case 0x46: 418 case 0x46:
268 nv_wr32(dev, 0x400bc4, 0x0000e024); 419 nv_wr32(priv, 0x400bc4, 0x0000e024);
269 nv_wr32(dev, 0x400bbc, 0xb7a7b520); 420 nv_wr32(priv, 0x400bbc, 0xb7a7b520);
270 break; 421 break;
271 case 0x4c: 422 case 0x4c:
272 case 0x4e: 423 case 0x4e:
273 case 0x67: 424 case 0x67:
274 nv_wr32(dev, 0x400bc4, 0x1003d888); 425 nv_wr32(priv, 0x400bc4, 0x1003d888);
275 nv_wr32(dev, 0x400bbc, 0xb7a7b540); 426 nv_wr32(priv, 0x400bbc, 0xb7a7b540);
276 break; 427 break;
277 default: 428 default:
278 break; 429 break;
279 } 430 }
280 431
281 /* Turn all the tiling regions off. */ 432 /* Turn all the tiling regions off. */
282 for (i = 0; i < nvfb_tile_nr(dev); i++) 433 for (i = 0; i < pfb->tile.regions; i++)
283 nv40_graph_set_tile_region(dev, i); 434 engine->tile_prog(engine, i);
284 435
285 /* begin RAM config */ 436 /* begin RAM config */
286 vramsz = pci_resource_len(dev->pdev, 0) - 1; 437 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
287 switch (dev_priv->chipset) { 438 switch (nv_device(priv)->chipset) {
288 case 0x40: 439 case 0x40:
289 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); 440 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
290 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); 441 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
291 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0)); 442 nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
292 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1)); 443 nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
293 nv_wr32(dev, 0x400820, 0); 444 nv_wr32(priv, 0x400820, 0);
294 nv_wr32(dev, 0x400824, 0); 445 nv_wr32(priv, 0x400824, 0);
295 nv_wr32(dev, 0x400864, vramsz); 446 nv_wr32(priv, 0x400864, vramsz);
296 nv_wr32(dev, 0x400868, vramsz); 447 nv_wr32(priv, 0x400868, vramsz);
297 break; 448 break;
298 default: 449 default:
299 switch (dev_priv->chipset) { 450 switch (nv_device(priv)->chipset) {
300 case 0x41: 451 case 0x41:
301 case 0x42: 452 case 0x42:
302 case 0x43: 453 case 0x43:
@@ -304,163 +455,33 @@ nv40_graph_init(struct drm_device *dev, int engine)
304 case 0x4e: 455 case 0x4e:
305 case 0x44: 456 case 0x44:
306 case 0x4a: 457 case 0x4a:
307 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); 458 nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
308 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); 459 nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
309 break; 460 break;
310 default: 461 default:
311 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); 462 nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
312 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); 463 nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
313 break; 464 break;
314 } 465 }
315 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); 466 nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
316 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); 467 nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
317 nv_wr32(dev, 0x400840, 0); 468 nv_wr32(priv, 0x400840, 0);
318 nv_wr32(dev, 0x400844, 0); 469 nv_wr32(priv, 0x400844, 0);
319 nv_wr32(dev, 0x4008A0, vramsz); 470 nv_wr32(priv, 0x4008A0, vramsz);
320 nv_wr32(dev, 0x4008A4, vramsz); 471 nv_wr32(priv, 0x4008A4, vramsz);
321 break; 472 break;
322 } 473 }
323 474
324 return 0; 475 return 0;
325} 476}
326 477
327static int 478struct nouveau_oclass
328nv40_graph_fini(struct drm_device *dev, int engine, bool suspend) 479nv40_graph_oclass = {
329{ 480 .handle = NV_ENGINE(GR, 0x40),
330 u32 inst = nv_rd32(dev, 0x40032c); 481 .ofuncs = &(struct nouveau_ofuncs) {
331 if (inst & 0x01000000) { 482 .ctor = nv40_graph_ctor,
332 nv_wr32(dev, 0x400720, 0x00000000); 483 .dtor = _nouveau_graph_dtor,
333 nv_wr32(dev, 0x400784, inst); 484 .init = nv40_graph_init,
334 nv_mask(dev, 0x400310, 0x00000020, 0x00000020); 485 .fini = _nouveau_graph_fini,
335 nv_mask(dev, 0x400304, 0x00000001, 0x00000001); 486 },
336 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) { 487};
337 u32 insn = nv_rd32(dev, 0x400308);
338 NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
339 }
340 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
341 }
342 return 0;
343}
344
345static int
346nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
347{
348 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
349 struct drm_nouveau_private *dev_priv = dev->dev_private;
350 struct nouveau_gpuobj *grctx;
351 unsigned long flags;
352 int i;
353
354 spin_lock_irqsave(&dev_priv->channels.lock, flags);
355 for (i = 0; i < pfifo->channels; i++) {
356 if (!dev_priv->channels.ptr[i])
357 continue;
358 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
359
360 if (grctx && grctx->addr == inst)
361 break;
362 }
363 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
364 return i;
365}
366
367static void
368nv40_graph_isr(struct drm_device *dev)
369{
370 u32 stat;
371
372 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
373 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
374 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
375 u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
376 u32 chid = nv40_graph_isr_chid(dev, inst);
377 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
378 u32 subc = (addr & 0x00070000) >> 16;
379 u32 mthd = (addr & 0x00001ffc);
380 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
381 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
382 u32 show = stat;
383
384 if (stat & NV_PGRAPH_INTR_ERROR) {
385 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
386 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
387 show &= ~NV_PGRAPH_INTR_ERROR;
388 } else
389 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
390 nv_mask(dev, 0x402000, 0, 0);
391 }
392 }
393
394 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
395 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
396
397 if (show && nouveau_ratelimit()) {
398 NV_INFO(dev, "PGRAPH -");
399 nouveau_bitfield_print(nv10_graph_intr, show);
400 printk(" nsource:");
401 nouveau_bitfield_print(nv04_graph_nsource, nsource);
402 printk(" nstatus:");
403 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
404 printk("\n");
405 NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
406 "class 0x%04x mthd 0x%04x data 0x%08x\n",
407 chid, inst, subc, class, mthd, data);
408 }
409 }
410}
411
412static void
413nv40_graph_destroy(struct drm_device *dev, int engine)
414{
415 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
416
417 nouveau_irq_unregister(dev, 12);
418
419 NVOBJ_ENGINE_DEL(dev, GR);
420 kfree(pgraph);
421}
422
423int
424nv40_graph_create(struct drm_device *dev)
425{
426 struct nv40_graph_engine *pgraph;
427
428 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
429 if (!pgraph)
430 return -ENOMEM;
431
432 pgraph->base.destroy = nv40_graph_destroy;
433 pgraph->base.init = nv40_graph_init;
434 pgraph->base.fini = nv40_graph_fini;
435 pgraph->base.context_new = nv40_graph_context_new;
436 pgraph->base.context_del = nv40_graph_context_del;
437 pgraph->base.object_new = nv40_graph_object_new;
438 pgraph->base.set_tile_region = nv40_graph_set_tile_region;
439
440 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
441 nouveau_irq_register(dev, 12, nv40_graph_isr);
442
443 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
444 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
445 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
446 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
447 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
448 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
449 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
450 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
451 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
452 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
453 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
454 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
455 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
456 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
457 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
458
459 /* curie */
460 if (nv44_graph_class(dev))
461 NVOBJ_CLASS(dev, 0x4497, GR);
462 else
463 NVOBJ_CLASS(dev, 0x4097, GR);
464
465 return 0;
466}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
new file mode 100644
index 000000000000..d2ac975afc2e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -0,0 +1,21 @@
1#ifndef __NV40_GRAPH_H__
2#define __NV40_GRAPH_H__
3
4/* returns 1 if device is one of the nv4x using the 0x4497 object class,
5 * helpful to determine a number of other hardware features
6 */
7static inline int
8nv44_graph_class(void *priv)
9{
10 struct nouveau_device *device = nv_device(priv);
11
12 if ((device->chipset & 0xf0) == 0x60)
13 return 1;
14
15 return !(0x0baf & (1 << (device->chipset & 0x0f)));
16}
17
18void nv40_grctx_init(struct nouveau_device *, u32 *size);
19void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 28932c4662e9..8955bdd3551c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -1,266 +1,234 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
27#include "drmP.h" 25#include <core/os.h>
28#include "drm.h" 26#include <core/class.h>
29#include "nouveau_drv.h" 27#include <core/handle.h>
30#include <engine/fifo.h> 28#include <core/engctx.h>
31#include <core/ramht.h> 29#include <core/enum.h>
32#include "nouveau_dma.h"
33#include "nv50_evo.h"
34
35struct nv50_graph_engine {
36 struct nouveau_exec_engine base;
37 u32 ctxprog[512];
38 u32 ctxprog_size;
39 u32 grctx_size;
40};
41
42static int
43nv50_graph_init(struct drm_device *dev, int engine)
44{
45 struct drm_nouveau_private *dev_priv = dev->dev_private;
46 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
47 u32 units = nv_rd32(dev, 0x001540);
48 int i;
49 30
50 NV_DEBUG(dev, "\n"); 31#include <subdev/fb.h>
32#include <subdev/vm.h>
33#include <subdev/timer.h>
51 34
52 /* master reset */ 35#include <engine/graph.h>
53 nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
54 nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
55 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
56
57 /* reset/enable traps and interrupts */
58 nv_wr32(dev, 0x400804, 0xc0000000);
59 nv_wr32(dev, 0x406800, 0xc0000000);
60 nv_wr32(dev, 0x400c04, 0xc0000000);
61 nv_wr32(dev, 0x401800, 0xc0000000);
62 nv_wr32(dev, 0x405018, 0xc0000000);
63 nv_wr32(dev, 0x402000, 0xc0000000);
64 for (i = 0; i < 16; i++) {
65 if (!(units & (1 << i)))
66 continue;
67 36
68 if (dev_priv->chipset < 0xa0) { 37#include "nv50.h"
69 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
70 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
71 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
72 } else {
73 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
74 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
75 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
76 }
77 }
78 38
79 nv_wr32(dev, 0x400108, 0xffffffff); 39struct nv50_graph_priv {
80 nv_wr32(dev, 0x400138, 0xffffffff); 40 struct nouveau_graph base;
81 nv_wr32(dev, 0x400100, 0xffffffff); 41 spinlock_t lock;
82 nv_wr32(dev, 0x40013c, 0xffffffff); 42 u32 size;
83 nv_wr32(dev, 0x400500, 0x00010001); 43};
84 44
85 /* upload context program, initialise ctxctl defaults */ 45struct nv50_graph_chan {
86 nv_wr32(dev, 0x400324, 0x00000000); 46 struct nouveau_graph_chan base;
87 for (i = 0; i < pgraph->ctxprog_size; i++) 47};
88 nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
89 nv_wr32(dev, 0x400824, 0x00000000);
90 nv_wr32(dev, 0x400828, 0x00000000);
91 nv_wr32(dev, 0x40082c, 0x00000000);
92 nv_wr32(dev, 0x400830, 0x00000000);
93 nv_wr32(dev, 0x400724, 0x00000000);
94 nv_wr32(dev, 0x40032c, 0x00000000);
95 nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
96 48
97 /* some unknown zcull magic */ 49/*******************************************************************************
98 switch (dev_priv->chipset & 0xf0) { 50 * Graphics object classes
99 case 0x50: 51 ******************************************************************************/
100 case 0x80:
101 case 0x90:
102 nv_wr32(dev, 0x402ca8, 0x00000800);
103 break;
104 case 0xa0:
105 default:
106 nv_wr32(dev, 0x402cc0, 0x00000000);
107 if (dev_priv->chipset == 0xa0 ||
108 dev_priv->chipset == 0xaa ||
109 dev_priv->chipset == 0xac) {
110 nv_wr32(dev, 0x402ca8, 0x00000802);
111 } else {
112 nv_wr32(dev, 0x402cc0, 0x00000000);
113 nv_wr32(dev, 0x402ca8, 0x00000002);
114 }
115 52
116 break; 53static int
117 } 54nv50_graph_object_ctor(struct nouveau_object *parent,
55 struct nouveau_object *engine,
56 struct nouveau_oclass *oclass, void *data, u32 size,
57 struct nouveau_object **pobject)
58{
59 struct nouveau_gpuobj *obj;
60 int ret;
118 61
119 /* zero out zcull regions */ 62 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
120 for (i = 0; i < 8; i++) { 63 16, 16, 0, &obj);
121 nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000); 64 *pobject = nv_object(obj);
122 nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000); 65 if (ret)
123 nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000); 66 return ret;
124 nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
125 }
126 67
68 nv_wo32(obj, 0x00, nv_mclass(obj));
69 nv_wo32(obj, 0x04, 0x00000000);
70 nv_wo32(obj, 0x08, 0x00000000);
71 nv_wo32(obj, 0x0c, 0x00000000);
127 return 0; 72 return 0;
128} 73}
129 74
130static int 75struct nouveau_ofuncs
131nv50_graph_fini(struct drm_device *dev, int engine, bool suspend) 76nv50_graph_ofuncs = {
132{ 77 .ctor = nv50_graph_object_ctor,
133 nv_wr32(dev, 0x40013c, 0x00000000); 78 .dtor = _nouveau_gpuobj_dtor,
134 return 0; 79 .init = _nouveau_gpuobj_init,
135} 80 .fini = _nouveau_gpuobj_fini,
81 .rd32 = _nouveau_gpuobj_rd32,
82 .wr32 = _nouveau_gpuobj_wr32,
83};
136 84
137static int 85static struct nouveau_oclass
138nv50_graph_context_new(struct nouveau_channel *chan, int engine) 86nv50_graph_sclass[] = {
139{ 87 { 0x0030, &nv50_graph_ofuncs },
140 struct drm_device *dev = chan->dev; 88 { 0x502d, &nv50_graph_ofuncs },
141 struct drm_nouveau_private *dev_priv = dev->dev_private; 89 { 0x5039, &nv50_graph_ofuncs },
142 struct nouveau_gpuobj *ramin = chan->ramin; 90 { 0x5097, &nv50_graph_ofuncs },
143 struct nouveau_gpuobj *grctx = NULL; 91 { 0x50c0, &nv50_graph_ofuncs },
144 struct nv50_graph_engine *pgraph = nv_engine(dev, engine); 92 {}
145 int hdr, ret; 93};
146
147 NV_DEBUG(dev, "ch%d\n", chan->id);
148
149 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
150 NVOBJ_FLAG_ZERO_ALLOC |
151 NVOBJ_FLAG_ZERO_FREE, &grctx);
152 if (ret)
153 return ret;
154 94
155 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 95static struct nouveau_oclass
156 nv_wo32(ramin, hdr + 0x00, 0x00190002); 96nv84_graph_sclass[] = {
157 nv_wo32(ramin, hdr + 0x04, grctx->addr + grctx->size - 1); 97 { 0x0030, &nv50_graph_ofuncs },
158 nv_wo32(ramin, hdr + 0x08, grctx->addr); 98 { 0x502d, &nv50_graph_ofuncs },
159 nv_wo32(ramin, hdr + 0x0c, 0); 99 { 0x5039, &nv50_graph_ofuncs },
160 nv_wo32(ramin, hdr + 0x10, 0); 100 { 0x50c0, &nv50_graph_ofuncs },
161 nv_wo32(ramin, hdr + 0x14, 0x00010000); 101 { 0x8297, &nv50_graph_ofuncs },
102 {}
103};
162 104
163 nv50_grctx_fill(dev, grctx); 105static struct nouveau_oclass
164 nv_wo32(grctx, 0x00000, chan->ramin->addr >> 12); 106nva0_graph_sclass[] = {
107 { 0x0030, &nv50_graph_ofuncs },
108 { 0x502d, &nv50_graph_ofuncs },
109 { 0x5039, &nv50_graph_ofuncs },
110 { 0x50c0, &nv50_graph_ofuncs },
111 { 0x8397, &nv50_graph_ofuncs },
112 {}
113};
165 114
166 nvimem_flush(dev); 115static struct nouveau_oclass
116nva3_graph_sclass[] = {
117 { 0x0030, &nv50_graph_ofuncs },
118 { 0x502d, &nv50_graph_ofuncs },
119 { 0x5039, &nv50_graph_ofuncs },
120 { 0x50c0, &nv50_graph_ofuncs },
121 { 0x8597, &nv50_graph_ofuncs },
122 { 0x85c0, &nv50_graph_ofuncs },
123 {}
124};
167 125
168 nvvm_engref(chan->vm, engine, 1); 126static struct nouveau_oclass
169 chan->engctx[NVOBJ_ENGINE_GR] = grctx; 127nvaf_graph_sclass[] = {
170 return 0; 128 { 0x0030, &nv50_graph_ofuncs },
171} 129 { 0x502d, &nv50_graph_ofuncs },
130 { 0x5039, &nv50_graph_ofuncs },
131 { 0x50c0, &nv50_graph_ofuncs },
132 { 0x85c0, &nv50_graph_ofuncs },
133 { 0x8697, &nv50_graph_ofuncs },
134 {}
135};
172 136
173static void 137/*******************************************************************************
174nv50_graph_context_del(struct nouveau_channel *chan, int engine) 138 * PGRAPH context
175{ 139 ******************************************************************************/
176 struct nouveau_gpuobj *grctx = chan->engctx[engine];
177 struct drm_device *dev = chan->dev;
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
180
181 for (i = hdr; i < hdr + 24; i += 4)
182 nv_wo32(chan->ramin, i, 0);
183 nvimem_flush(dev);
184
185 nvvm_engref(chan->vm, engine, -1);
186 nouveau_gpuobj_ref(NULL, &grctx);
187 chan->engctx[engine] = NULL;
188}
189 140
190static int 141static int
191nv50_graph_object_new(struct nouveau_channel *chan, int engine, 142nv50_graph_context_ctor(struct nouveau_object *parent,
192 u32 handle, u16 class) 143 struct nouveau_object *engine,
144 struct nouveau_oclass *oclass, void *data, u32 size,
145 struct nouveau_object **pobject)
193{ 146{
194 struct drm_device *dev = chan->dev; 147 struct nv50_graph_priv *priv = (void *)engine;
195 struct nouveau_gpuobj *obj = NULL; 148 struct nv50_graph_chan *chan;
196 int ret; 149 int ret;
197 150
198 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 151 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
152 priv->size, 0,
153 NVOBJ_FLAG_ZERO_ALLOC, &chan);
154 *pobject = nv_object(chan);
199 if (ret) 155 if (ret)
200 return ret; 156 return ret;
201 obj->engine = 1;
202 obj->class = class;
203
204 nv_wo32(obj, 0x00, class);
205 nv_wo32(obj, 0x04, 0x00000000);
206 nv_wo32(obj, 0x08, 0x00000000);
207 nv_wo32(obj, 0x0c, 0x00000000);
208 nvimem_flush(dev);
209 157
210 ret = nouveau_ramht_insert(chan, handle, obj); 158 nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
211 nouveau_gpuobj_ref(NULL, &obj); 159 return 0;
212 return ret;
213} 160}
214 161
215static void 162static struct nouveau_oclass
216nv50_graph_tlb_flush(struct drm_device *dev, int engine) 163nv50_graph_cclass = {
164 .handle = NV_ENGCTX(GR, 0x50),
165 .ofuncs = &(struct nouveau_ofuncs) {
166 .ctor = nv50_graph_context_ctor,
167 .dtor = _nouveau_graph_context_dtor,
168 .init = _nouveau_graph_context_init,
169 .fini = _nouveau_graph_context_fini,
170 .rd32 = _nouveau_graph_context_rd32,
171 .wr32 = _nouveau_graph_context_wr32,
172 },
173};
174
175/*******************************************************************************
176 * PGRAPH engine/subdev functions
177 ******************************************************************************/
178
179static int
180nv50_graph_tlb_flush(struct nouveau_engine *engine)
217{ 181{
218 nv50_vm_flush_engine(dev, 0); 182 nv50_vm_flush_engine(&engine->base, 0x00);
183 return 0;
219} 184}
220 185
221static void 186static int
222nv84_graph_tlb_flush(struct drm_device *dev, int engine) 187nv84_graph_tlb_flush(struct nouveau_engine *engine)
223{ 188{
224 struct drm_nouveau_private *dev_priv = dev->dev_private; 189 struct nouveau_timer *ptimer = nouveau_timer(engine);
190 struct nv50_graph_priv *priv = (void *)engine;
225 bool idle, timeout = false; 191 bool idle, timeout = false;
226 unsigned long flags; 192 unsigned long flags;
227 u64 start; 193 u64 start;
228 u32 tmp; 194 u32 tmp;
229 195
230 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 196 spin_lock_irqsave(&priv->lock, flags);
231 nv_mask(dev, 0x400500, 0x00000001, 0x00000000); 197 nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
232 198
233 start = nv_timer_read(dev); 199 start = ptimer->read(ptimer);
234 do { 200 do {
235 idle = true; 201 idle = true;
236 202
237 for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) { 203 for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
238 if ((tmp & 7) == 1) 204 if ((tmp & 7) == 1)
239 idle = false; 205 idle = false;
240 } 206 }
241 207
242 for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) { 208 for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
243 if ((tmp & 7) == 1) 209 if ((tmp & 7) == 1)
244 idle = false; 210 idle = false;
245 } 211 }
246 212
247 for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) { 213 for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
248 if ((tmp & 7) == 1) 214 if ((tmp & 7) == 1)
249 idle = false; 215 idle = false;
250 } 216 }
251 } while (!idle && !(timeout = nv_timer_read(dev) - start > 2000000000)); 217 } while (!idle &&
218 !(timeout = ptimer->read(ptimer) - start > 2000000000));
252 219
253 if (timeout) { 220 if (timeout) {
254 NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: " 221 nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
255 "0x%08x 0x%08x 0x%08x 0x%08x\n", 222 "0x%08x 0x%08x 0x%08x 0x%08x\n",
256 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380), 223 nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
257 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); 224 nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
258 } 225 }
259 226
260 nv50_vm_flush_engine(dev, 0); 227 nv50_vm_flush_engine(&engine->base, 0x00);
261 228
262 nv_mask(dev, 0x400500, 0x00000001, 0x00000001); 229 nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
263 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 230 spin_unlock_irqrestore(&priv->lock, flags);
231 return timeout ? -EBUSY : 0;
264} 232}
265 233
266static struct nouveau_enum nv50_mp_exec_error_names[] = { 234static struct nouveau_enum nv50_mp_exec_error_names[] = {
@@ -341,7 +309,7 @@ struct nouveau_enum nv50_data_error_names[] = {
341 {} 309 {}
342}; 310};
343 311
344static struct nouveau_bitfield nv50_graph_intr[] = { 312static struct nouveau_bitfield nv50_graph_intr_name[] = {
345 { 0x00000001, "NOTIFY" }, 313 { 0x00000001, "NOTIFY" },
346 { 0x00000002, "COMPUTE_QUERY" }, 314 { 0x00000002, "COMPUTE_QUERY" },
347 { 0x00000010, "ILLEGAL_MTHD" }, 315 { 0x00000010, "ILLEGAL_MTHD" },
@@ -356,95 +324,93 @@ static struct nouveau_bitfield nv50_graph_intr[] = {
356}; 324};
357 325
358static void 326static void
359nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) 327nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
360{ 328{
361 struct drm_nouveau_private *dev_priv = dev->dev_private; 329 u32 units = nv_rd32(priv, 0x1540);
362 uint32_t units = nv_rd32(dev, 0x1540); 330 u32 addr, mp10, status, pc, oplow, ophigh;
363 uint32_t addr, mp10, status, pc, oplow, ophigh;
364 int i; 331 int i;
365 int mps = 0; 332 int mps = 0;
366 for (i = 0; i < 4; i++) { 333 for (i = 0; i < 4; i++) {
367 if (!(units & 1 << (i+24))) 334 if (!(units & 1 << (i+24)))
368 continue; 335 continue;
369 if (dev_priv->chipset < 0xa0) 336 if (nv_device(priv)->chipset < 0xa0)
370 addr = 0x408200 + (tpid << 12) + (i << 7); 337 addr = 0x408200 + (tpid << 12) + (i << 7);
371 else 338 else
372 addr = 0x408100 + (tpid << 11) + (i << 7); 339 addr = 0x408100 + (tpid << 11) + (i << 7);
373 mp10 = nv_rd32(dev, addr + 0x10); 340 mp10 = nv_rd32(priv, addr + 0x10);
374 status = nv_rd32(dev, addr + 0x14); 341 status = nv_rd32(priv, addr + 0x14);
375 if (!status) 342 if (!status)
376 continue; 343 continue;
377 if (display) { 344 if (display) {
378 nv_rd32(dev, addr + 0x20); 345 nv_rd32(priv, addr + 0x20);
379 pc = nv_rd32(dev, addr + 0x24); 346 pc = nv_rd32(priv, addr + 0x24);
380 oplow = nv_rd32(dev, addr + 0x70); 347 oplow = nv_rd32(priv, addr + 0x70);
381 ophigh = nv_rd32(dev, addr + 0x74); 348 ophigh = nv_rd32(priv, addr + 0x74);
382 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " 349 nv_error(priv, "TRAP_MP_EXEC - "
383 "TP %d MP %d: ", tpid, i); 350 "TP %d MP %d: ", tpid, i);
384 nouveau_enum_print(nv50_mp_exec_error_names, status); 351 nouveau_enum_print(nv50_mp_exec_error_names, status);
385 printk(" at %06x warp %d, opcode %08x %08x\n", 352 printk(" at %06x warp %d, opcode %08x %08x\n",
386 pc&0xffffff, pc >> 24, 353 pc&0xffffff, pc >> 24,
387 oplow, ophigh); 354 oplow, ophigh);
388 } 355 }
389 nv_wr32(dev, addr + 0x10, mp10); 356 nv_wr32(priv, addr + 0x10, mp10);
390 nv_wr32(dev, addr + 0x14, 0); 357 nv_wr32(priv, addr + 0x14, 0);
391 mps++; 358 mps++;
392 } 359 }
393 if (!mps && display) 360 if (!mps && display)
394 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " 361 nv_error(priv, "TRAP_MP_EXEC - TP %d: "
395 "No MPs claiming errors?\n", tpid); 362 "No MPs claiming errors?\n", tpid);
396} 363}
397 364
398static void 365static void
399nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, 366nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
400 uint32_t ustatus_new, int display, const char *name) 367 u32 ustatus_new, int display, const char *name)
401{ 368{
402 struct drm_nouveau_private *dev_priv = dev->dev_private;
403 int tps = 0; 369 int tps = 0;
404 uint32_t units = nv_rd32(dev, 0x1540); 370 u32 units = nv_rd32(priv, 0x1540);
405 int i, r; 371 int i, r;
406 uint32_t ustatus_addr, ustatus; 372 u32 ustatus_addr, ustatus;
407 for (i = 0; i < 16; i++) { 373 for (i = 0; i < 16; i++) {
408 if (!(units & (1 << i))) 374 if (!(units & (1 << i)))
409 continue; 375 continue;
410 if (dev_priv->chipset < 0xa0) 376 if (nv_device(priv)->chipset < 0xa0)
411 ustatus_addr = ustatus_old + (i << 12); 377 ustatus_addr = ustatus_old + (i << 12);
412 else 378 else
413 ustatus_addr = ustatus_new + (i << 11); 379 ustatus_addr = ustatus_new + (i << 11);
414 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; 380 ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
415 if (!ustatus) 381 if (!ustatus)
416 continue; 382 continue;
417 tps++; 383 tps++;
418 switch (type) { 384 switch (type) {
419 case 6: /* texture error... unknown for now */ 385 case 6: /* texture error... unknown for now */
420 if (display) { 386 if (display) {
421 NV_ERROR(dev, "magic set %d:\n", i); 387 nv_error(priv, "magic set %d:\n", i);
422 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) 388 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
423 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, 389 nv_error(priv, "\t0x%08x: 0x%08x\n", r,
424 nv_rd32(dev, r)); 390 nv_rd32(priv, r));
425 } 391 }
426 break; 392 break;
427 case 7: /* MP error */ 393 case 7: /* MP error */
428 if (ustatus & 0x04030000) { 394 if (ustatus & 0x04030000) {
429 nv50_pgraph_mp_trap(dev, i, display); 395 nv50_priv_mp_trap(priv, i, display);
430 ustatus &= ~0x04030000; 396 ustatus &= ~0x04030000;
431 } 397 }
432 break; 398 break;
433 case 8: /* TPDMA error */ 399 case 8: /* TPDMA error */
434 { 400 {
435 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); 401 u32 e0c = nv_rd32(priv, ustatus_addr + 4);
436 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); 402 u32 e10 = nv_rd32(priv, ustatus_addr + 8);
437 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); 403 u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
438 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); 404 u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
439 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); 405 u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
440 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); 406 u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
441 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); 407 u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
442 /* 2d engine destination */ 408 /* 2d engine destination */
443 if (ustatus & 0x00000010) { 409 if (ustatus & 0x00000010) {
444 if (display) { 410 if (display) {
445 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", 411 nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
446 i, e14, e10); 412 i, e14, e10);
447 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", 413 nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
448 i, e0c, e18, e1c, e20, e24); 414 i, e0c, e18, e1c, e20, e24);
449 } 415 }
450 ustatus &= ~0x00000010; 416 ustatus &= ~0x00000010;
@@ -452,9 +418,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
452 /* Render target */ 418 /* Render target */
453 if (ustatus & 0x00000040) { 419 if (ustatus & 0x00000040) {
454 if (display) { 420 if (display) {
455 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", 421 nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
456 i, e14, e10); 422 i, e14, e10);
457 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", 423 nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
458 i, e0c, e18, e1c, e20, e24); 424 i, e0c, e18, e1c, e20, e24);
459 } 425 }
460 ustatus &= ~0x00000040; 426 ustatus &= ~0x00000040;
@@ -464,19 +430,19 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
464 if (display) { 430 if (display) {
465 if (e18 & 0x80000000) { 431 if (e18 & 0x80000000) {
466 /* g[] read fault? */ 432 /* g[] read fault? */
467 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", 433 nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
468 i, e14, e10 | ((e18 >> 24) & 0x1f)); 434 i, e14, e10 | ((e18 >> 24) & 0x1f));
469 e18 &= ~0x1f000000; 435 e18 &= ~0x1f000000;
470 } else if (e18 & 0xc) { 436 } else if (e18 & 0xc) {
471 /* g[] write fault? */ 437 /* g[] write fault? */
472 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", 438 nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
473 i, e14, e10 | ((e18 >> 7) & 0x1f)); 439 i, e14, e10 | ((e18 >> 7) & 0x1f));
474 e18 &= ~0x00000f80; 440 e18 &= ~0x00000f80;
475 } else { 441 } else {
476 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", 442 nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
477 i, e14, e10); 443 i, e14, e10);
478 } 444 }
479 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", 445 nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
480 i, e0c, e18, e1c, e20, e24); 446 i, e0c, e18, e1c, e20, e24);
481 } 447 }
482 ustatus &= ~0x00000080; 448 ustatus &= ~0x00000080;
@@ -486,23 +452,23 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
486 } 452 }
487 if (ustatus) { 453 if (ustatus) {
488 if (display) 454 if (display)
489 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); 455 nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
490 } 456 }
491 nv_wr32(dev, ustatus_addr, 0xc0000000); 457 nv_wr32(priv, ustatus_addr, 0xc0000000);
492 } 458 }
493 459
494 if (!tps && display) 460 if (!tps && display)
495 NV_INFO(dev, "%s - No TPs claiming errors?\n", name); 461 nv_info(priv, "%s - No TPs claiming errors?\n", name);
496} 462}
497 463
498static int 464static int
499nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid) 465nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display, u64 inst)
500{ 466{
501 u32 status = nv_rd32(dev, 0x400108); 467 u32 status = nv_rd32(priv, 0x400108);
502 u32 ustatus; 468 u32 ustatus;
503 469
504 if (!status && display) { 470 if (!status && display) {
505 NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n"); 471 nv_error(priv, "TRAP: no units reporting traps?\n");
506 return 1; 472 return 1;
507 } 473 }
508 474
@@ -510,72 +476,72 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
510 * COND, QUERY. If you get a trap from it, the command is still stuck 476 * COND, QUERY. If you get a trap from it, the command is still stuck
511 * in DISPATCH and you need to do something about it. */ 477 * in DISPATCH and you need to do something about it. */
512 if (status & 0x001) { 478 if (status & 0x001) {
513 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; 479 ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
514 if (!ustatus && display) { 480 if (!ustatus && display) {
515 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); 481 nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
516 } 482 }
517 483
518 nv_wr32(dev, 0x400500, 0x00000000); 484 nv_wr32(priv, 0x400500, 0x00000000);
519 485
520 /* Known to be triggered by screwed up NOTIFY and COND... */ 486 /* Known to be triggered by screwed up NOTIFY and COND... */
521 if (ustatus & 0x00000001) { 487 if (ustatus & 0x00000001) {
522 u32 addr = nv_rd32(dev, 0x400808); 488 u32 addr = nv_rd32(priv, 0x400808);
523 u32 subc = (addr & 0x00070000) >> 16; 489 u32 subc = (addr & 0x00070000) >> 16;
524 u32 mthd = (addr & 0x00001ffc); 490 u32 mthd = (addr & 0x00001ffc);
525 u32 datal = nv_rd32(dev, 0x40080c); 491 u32 datal = nv_rd32(priv, 0x40080c);
526 u32 datah = nv_rd32(dev, 0x400810); 492 u32 datah = nv_rd32(priv, 0x400810);
527 u32 class = nv_rd32(dev, 0x400814); 493 u32 class = nv_rd32(priv, 0x400814);
528 u32 r848 = nv_rd32(dev, 0x400848); 494 u32 r848 = nv_rd32(priv, 0x400848);
529 495
530 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n"); 496 nv_error(priv, "TRAP DISPATCH_FAULT\n");
531 if (display && (addr & 0x80000000)) { 497 if (display && (addr & 0x80000000)) {
532 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " 498 nv_error(priv, "ch 0x%010llx "
533 "subc %d class 0x%04x mthd 0x%04x " 499 "subc %d class 0x%04x mthd 0x%04x "
534 "data 0x%08x%08x " 500 "data 0x%08x%08x "
535 "400808 0x%08x 400848 0x%08x\n", 501 "400808 0x%08x 400848 0x%08x\n",
536 chid, inst, subc, class, mthd, datah, 502 inst, subc, class, mthd, datah,
537 datal, addr, r848); 503 datal, addr, r848);
538 } else 504 } else
539 if (display) { 505 if (display) {
540 NV_INFO(dev, "PGRAPH - no stuck command?\n"); 506 nv_error(priv, "no stuck command?\n");
541 } 507 }
542 508
543 nv_wr32(dev, 0x400808, 0); 509 nv_wr32(priv, 0x400808, 0);
544 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); 510 nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
545 nv_wr32(dev, 0x400848, 0); 511 nv_wr32(priv, 0x400848, 0);
546 ustatus &= ~0x00000001; 512 ustatus &= ~0x00000001;
547 } 513 }
548 514
549 if (ustatus & 0x00000002) { 515 if (ustatus & 0x00000002) {
550 u32 addr = nv_rd32(dev, 0x40084c); 516 u32 addr = nv_rd32(priv, 0x40084c);
551 u32 subc = (addr & 0x00070000) >> 16; 517 u32 subc = (addr & 0x00070000) >> 16;
552 u32 mthd = (addr & 0x00001ffc); 518 u32 mthd = (addr & 0x00001ffc);
553 u32 data = nv_rd32(dev, 0x40085c); 519 u32 data = nv_rd32(priv, 0x40085c);
554 u32 class = nv_rd32(dev, 0x400814); 520 u32 class = nv_rd32(priv, 0x400814);
555 521
556 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n"); 522 nv_error(priv, "TRAP DISPATCH_QUERY\n");
557 if (display && (addr & 0x80000000)) { 523 if (display && (addr & 0x80000000)) {
558 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " 524 nv_error(priv, "ch 0x%010llx "
559 "subc %d class 0x%04x mthd 0x%04x " 525 "subc %d class 0x%04x mthd 0x%04x "
560 "data 0x%08x 40084c 0x%08x\n", 526 "data 0x%08x 40084c 0x%08x\n",
561 chid, inst, subc, class, mthd, 527 inst, subc, class, mthd,
562 data, addr); 528 data, addr);
563 } else 529 } else
564 if (display) { 530 if (display) {
565 NV_INFO(dev, "PGRAPH - no stuck command?\n"); 531 nv_error(priv, "no stuck command?\n");
566 } 532 }
567 533
568 nv_wr32(dev, 0x40084c, 0); 534 nv_wr32(priv, 0x40084c, 0);
569 ustatus &= ~0x00000002; 535 ustatus &= ~0x00000002;
570 } 536 }
571 537
572 if (ustatus && display) { 538 if (ustatus && display) {
573 NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown " 539 nv_error(priv, "TRAP_DISPATCH (unknown "
574 "0x%08x)\n", ustatus); 540 "0x%08x)\n", ustatus);
575 } 541 }
576 542
577 nv_wr32(dev, 0x400804, 0xc0000000); 543 nv_wr32(priv, 0x400804, 0xc0000000);
578 nv_wr32(dev, 0x400108, 0x001); 544 nv_wr32(priv, 0x400108, 0x001);
579 status &= ~0x001; 545 status &= ~0x001;
580 if (!status) 546 if (!status)
581 return 0; 547 return 0;
@@ -583,81 +549,81 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
583 549
584 /* M2MF: Memory to memory copy engine. */ 550 /* M2MF: Memory to memory copy engine. */
585 if (status & 0x002) { 551 if (status & 0x002) {
586 u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; 552 u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
587 if (display) { 553 if (display) {
588 NV_INFO(dev, "PGRAPH - TRAP_M2MF"); 554 nv_error(priv, "TRAP_M2MF");
589 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); 555 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
590 printk("\n"); 556 printk("\n");
591 NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n", 557 nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
592 nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808), 558 nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
593 nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810)); 559 nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
594 560
595 } 561 }
596 562
597 /* No sane way found yet -- just reset the bugger. */ 563 /* No sane way found yet -- just reset the bugger. */
598 nv_wr32(dev, 0x400040, 2); 564 nv_wr32(priv, 0x400040, 2);
599 nv_wr32(dev, 0x400040, 0); 565 nv_wr32(priv, 0x400040, 0);
600 nv_wr32(dev, 0x406800, 0xc0000000); 566 nv_wr32(priv, 0x406800, 0xc0000000);
601 nv_wr32(dev, 0x400108, 0x002); 567 nv_wr32(priv, 0x400108, 0x002);
602 status &= ~0x002; 568 status &= ~0x002;
603 } 569 }
604 570
605 /* VFETCH: Fetches data from vertex buffers. */ 571 /* VFETCH: Fetches data from vertex buffers. */
606 if (status & 0x004) { 572 if (status & 0x004) {
607 u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; 573 u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
608 if (display) { 574 if (display) {
609 NV_INFO(dev, "PGRAPH - TRAP_VFETCH"); 575 nv_error(priv, "TRAP_VFETCH");
610 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); 576 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
611 printk("\n"); 577 printk("\n");
612 NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n", 578 nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
613 nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), 579 nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
614 nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10)); 580 nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
615 } 581 }
616 582
617 nv_wr32(dev, 0x400c04, 0xc0000000); 583 nv_wr32(priv, 0x400c04, 0xc0000000);
618 nv_wr32(dev, 0x400108, 0x004); 584 nv_wr32(priv, 0x400108, 0x004);
619 status &= ~0x004; 585 status &= ~0x004;
620 } 586 }
621 587
622 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ 588 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
623 if (status & 0x008) { 589 if (status & 0x008) {
624 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; 590 ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
625 if (display) { 591 if (display) {
626 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT"); 592 nv_error(priv, "TRAP_STRMOUT");
627 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); 593 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
628 printk("\n"); 594 printk("\n");
629 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n", 595 nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
630 nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), 596 nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
631 nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810)); 597 nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
632 598
633 } 599 }
634 600
635 /* No sane way found yet -- just reset the bugger. */ 601 /* No sane way found yet -- just reset the bugger. */
636 nv_wr32(dev, 0x400040, 0x80); 602 nv_wr32(priv, 0x400040, 0x80);
637 nv_wr32(dev, 0x400040, 0); 603 nv_wr32(priv, 0x400040, 0);
638 nv_wr32(dev, 0x401800, 0xc0000000); 604 nv_wr32(priv, 0x401800, 0xc0000000);
639 nv_wr32(dev, 0x400108, 0x008); 605 nv_wr32(priv, 0x400108, 0x008);
640 status &= ~0x008; 606 status &= ~0x008;
641 } 607 }
642 608
643 /* CCACHE: Handles code and c[] caches and fills them. */ 609 /* CCACHE: Handles code and c[] caches and fills them. */
644 if (status & 0x010) { 610 if (status & 0x010) {
645 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; 611 ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
646 if (display) { 612 if (display) {
647 NV_INFO(dev, "PGRAPH - TRAP_CCACHE"); 613 nv_error(priv, "TRAP_CCACHE");
648 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); 614 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
649 printk("\n"); 615 printk("\n");
650 NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x" 616 nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
651 " %08x %08x %08x\n", 617 " %08x %08x %08x\n",
652 nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004), 618 nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
653 nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c), 619 nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
654 nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014), 620 nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
655 nv_rd32(dev, 0x40501c)); 621 nv_rd32(priv, 0x40501c));
656 622
657 } 623 }
658 624
659 nv_wr32(dev, 0x405018, 0xc0000000); 625 nv_wr32(priv, 0x405018, 0xc0000000);
660 nv_wr32(dev, 0x400108, 0x010); 626 nv_wr32(priv, 0x400108, 0x010);
661 status &= ~0x010; 627 status &= ~0x010;
662 } 628 }
663 629
@@ -665,201 +631,248 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
665 * remaining, so try to handle it anyway. Perhaps related to that 631 * remaining, so try to handle it anyway. Perhaps related to that
666 * unknown DMA slot on tesla? */ 632 * unknown DMA slot on tesla? */
667 if (status & 0x20) { 633 if (status & 0x20) {
668 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; 634 ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
669 if (display) 635 if (display)
670 NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus); 636 nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
671 nv_wr32(dev, 0x402000, 0xc0000000); 637 nv_wr32(priv, 0x402000, 0xc0000000);
672 /* no status modifiction on purpose */ 638 /* no status modifiction on purpose */
673 } 639 }
674 640
675 /* TEXTURE: CUDA texturing units */ 641 /* TEXTURE: CUDA texturing units */
676 if (status & 0x040) { 642 if (status & 0x040) {
677 nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display, 643 nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
678 "PGRAPH - TRAP_TEXTURE"); 644 "TRAP_TEXTURE");
679 nv_wr32(dev, 0x400108, 0x040); 645 nv_wr32(priv, 0x400108, 0x040);
680 status &= ~0x040; 646 status &= ~0x040;
681 } 647 }
682 648
683 /* MP: CUDA execution engines. */ 649 /* MP: CUDA execution engines. */
684 if (status & 0x080) { 650 if (status & 0x080) {
685 nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display, 651 nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
686 "PGRAPH - TRAP_MP"); 652 "TRAP_MP");
687 nv_wr32(dev, 0x400108, 0x080); 653 nv_wr32(priv, 0x400108, 0x080);
688 status &= ~0x080; 654 status &= ~0x080;
689 } 655 }
690 656
691 /* TPDMA: Handles TP-initiated uncached memory accesses: 657 /* TPDMA: Handles TP-initiated uncached memory accesses:
692 * l[], g[], stack, 2d surfaces, render targets. */ 658 * l[], g[], stack, 2d surfaces, render targets. */
693 if (status & 0x100) { 659 if (status & 0x100) {
694 nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display, 660 nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
695 "PGRAPH - TRAP_TPDMA"); 661 "TRAP_TPDMA");
696 nv_wr32(dev, 0x400108, 0x100); 662 nv_wr32(priv, 0x400108, 0x100);
697 status &= ~0x100; 663 status &= ~0x100;
698 } 664 }
699 665
700 if (status) { 666 if (status) {
701 if (display) 667 if (display)
702 NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status); 668 nv_error(priv, "TRAP: unknown 0x%08x\n", status);
703 nv_wr32(dev, 0x400108, status); 669 nv_wr32(priv, 0x400108, status);
704 } 670 }
705 671
706 return 1; 672 return 1;
707} 673}
708 674
709int 675static void
710nv50_graph_isr_chid(struct drm_device *dev, u64 inst) 676nv50_graph_intr(struct nouveau_subdev *subdev)
711{ 677{
712 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 678 struct nv50_graph_priv *priv = (void *)subdev;
713 struct drm_nouveau_private *dev_priv = dev->dev_private; 679 struct nouveau_engine *engine = nv_engine(subdev);
714 struct nouveau_channel *chan; 680 struct nouveau_handle *handle = NULL;
715 unsigned long flags; 681 u32 stat = nv_rd32(priv, 0x400100);
716 int i; 682 u64 inst = (u64)(nv_rd32(priv, 0x40032c) & 0x0fffffff) << 12;
683 u32 addr = nv_rd32(priv, 0x400704);
684 u32 subc = (addr & 0x00070000) >> 16;
685 u32 mthd = (addr & 0x00001ffc);
686 u32 data = nv_rd32(priv, 0x400708);
687 u32 class = nv_rd32(priv, 0x400814);
688 u32 show = stat;
689
690 if (stat & 0x00000010) {
691 handle = nouveau_engctx_lookup_class(engine, inst, class);
692 if (handle && !nv_call(handle->object, mthd, data))
693 show &= ~0x00000010;
694 nouveau_engctx_handle_put(handle);
695 }
717 696
718 spin_lock_irqsave(&dev_priv->channels.lock, flags); 697 if (show & 0x00100000) {
719 for (i = 0; i < pfifo->channels; i++) { 698 u32 ecode = nv_rd32(priv, 0x400110);
720 chan = dev_priv->channels.ptr[i]; 699 nv_error(priv, "DATA_ERROR ");
721 if (!chan || !chan->ramin) 700 nouveau_enum_print(nv50_data_error_names, ecode);
722 continue; 701 printk("\n");
702 }
723 703
724 if (inst == chan->ramin->addr) 704 if (stat & 0x00200000) {
725 break; 705 if (!nv50_graph_trap_handler(priv, show, inst))
706 show &= ~0x00200000;
707 }
708
709 nv_wr32(priv, 0x400100, stat);
710 nv_wr32(priv, 0x400500, 0x00010001);
711
712 if (show) {
713 nv_info(priv, "");
714 nouveau_bitfield_print(nv50_graph_intr_name, show);
715 printk("\n");
716 nv_error(priv, "ch 0x%010llx subc %d class 0x%04x "
717 "mthd 0x%04x data 0x%08x\n",
718 inst, subc, class, mthd, data);
719 nv50_fb_trap(nouveau_fb(priv), 1);
726 } 720 }
727 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 721
728 return i; 722 if (nv_rd32(priv, 0x400824) & (1 << 31))
723 nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
729} 724}
730 725
731static void 726static int
732nv50_graph_isr(struct drm_device *dev) 727nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
728 struct nouveau_oclass *oclass, void *data, u32 size,
729 struct nouveau_object **pobject)
733{ 730{
734 u32 stat; 731 struct nv50_graph_priv *priv;
735 732 int ret;
736 while ((stat = nv_rd32(dev, 0x400100))) {
737 u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
738 u32 chid = nv50_graph_isr_chid(dev, inst);
739 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
740 u32 subc = (addr & 0x00070000) >> 16;
741 u32 mthd = (addr & 0x00001ffc);
742 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
743 u32 class = nv_rd32(dev, 0x400814);
744 u32 show = stat;
745
746 if (stat & 0x00000010) {
747 if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
748 mthd, data))
749 show &= ~0x00000010;
750 }
751 733
752 show = (show && nouveau_ratelimit()) ? show : 0; 734 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
735 *pobject = nv_object(priv);
736 if (ret)
737 return ret;
753 738
754 if (show & 0x00100000) { 739 nv_subdev(priv)->unit = 0x00201000;
755 u32 ecode = nv_rd32(dev, 0x400110); 740 nv_subdev(priv)->intr = nv50_graph_intr;
756 NV_INFO(dev, "PGRAPH - DATA_ERROR "); 741 nv_engine(priv)->cclass = &nv50_graph_cclass;
757 nouveau_enum_print(nv50_data_error_names, ecode);
758 printk("\n");
759 }
760 742
761 if (stat & 0x00200000) { 743 switch (nv_device(priv)->chipset) {
762 if (!nv50_pgraph_trap_handler(dev, show, inst, chid)) 744 case 0x50:
763 show &= ~0x00200000; 745 nv_engine(priv)->sclass = nv50_graph_sclass;
764 } 746 break;
747 case 0x84:
748 case 0x86:
749 case 0x92:
750 case 0x94:
751 case 0x96:
752 case 0x98:
753 nv_engine(priv)->sclass = nv84_graph_sclass;
754 break;
755 case 0xa0:
756 case 0xaa:
757 case 0xac:
758 nv_engine(priv)->sclass = nva0_graph_sclass;
759 break;
760 case 0xa3:
761 case 0xa5:
762 case 0xa8:
763 nv_engine(priv)->sclass = nva3_graph_sclass;
764 break;
765 case 0xaf:
766 nv_engine(priv)->sclass = nvaf_graph_sclass;
767 break;
765 768
766 nv_wr32(dev, 0x400100, stat); 769 };
767 nv_wr32(dev, 0x400500, 0x00010001);
768 770
769 if (show) { 771 if (nv_device(priv)->chipset == 0x50 ||
770 NV_INFO(dev, "PGRAPH -"); 772 nv_device(priv)->chipset == 0xac)
771 nouveau_bitfield_print(nv50_graph_intr, show); 773 nv_engine(priv)->tlb_flush = nv50_graph_tlb_flush;
772 printk("\n"); 774 else
773 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d " 775 nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush;
774 "class 0x%04x mthd 0x%04x data 0x%08x\n",
775 chid, inst, subc, class, mthd, data);
776 nv50_fb_vm_trap(dev, 1);
777 }
778 }
779 776
780 if (nv_rd32(dev, 0x400824) & (1 << 31)) 777 spin_lock_init(&priv->lock);
781 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 778 return 0;
782} 779}
783 780
784static void 781static int
785nv50_graph_destroy(struct drm_device *dev, int engine) 782nv50_graph_init(struct nouveau_object *object)
786{ 783{
787 struct nv50_graph_engine *pgraph = nv_engine(dev, engine); 784 struct nv50_graph_priv *priv = (void *)object;
785 int ret, units, i;
788 786
789 NVOBJ_ENGINE_DEL(dev, GR); 787 ret = nouveau_graph_init(&priv->base);
788 if (ret)
789 return ret;
790 790
791 nouveau_irq_unregister(dev, 12); 791 /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
792 kfree(pgraph); 792 nv_wr32(priv, 0x40008c, 0x00000004);
793}
794 793
795int 794 /* reset/enable traps and interrupts */
796nv50_graph_create(struct drm_device *dev) 795 nv_wr32(priv, 0x400804, 0xc0000000);
797{ 796 nv_wr32(priv, 0x406800, 0xc0000000);
798 struct drm_nouveau_private *dev_priv = dev->dev_private; 797 nv_wr32(priv, 0x400c04, 0xc0000000);
799 struct nv50_graph_engine *pgraph; 798 nv_wr32(priv, 0x401800, 0xc0000000);
800 int ret; 799 nv_wr32(priv, 0x405018, 0xc0000000);
800 nv_wr32(priv, 0x402000, 0xc0000000);
801
802 units = nv_rd32(priv, 0x001540);
803 for (i = 0; i < 16; i++) {
804 if (!(units & (1 << i)))
805 continue;
801 806
802 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL); 807 if (nv_device(priv)->chipset < 0xa0) {
803 if (!pgraph) 808 nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
804 return -ENOMEM; 809 nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
805 810 nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
806 ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog), 811 } else {
807 &pgraph->ctxprog_size, 812 nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
808 &pgraph->grctx_size); 813 nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
809 if (ret) { 814 nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
810 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n"); 815 }
811 kfree(pgraph);
812 return 0;
813 } 816 }
814 817
815 pgraph->base.destroy = nv50_graph_destroy; 818 nv_wr32(priv, 0x400108, 0xffffffff);
816 pgraph->base.init = nv50_graph_init; 819 nv_wr32(priv, 0x400138, 0xffffffff);
817 pgraph->base.fini = nv50_graph_fini; 820 nv_wr32(priv, 0x400100, 0xffffffff);
818 pgraph->base.context_new = nv50_graph_context_new; 821 nv_wr32(priv, 0x40013c, 0xffffffff);
819 pgraph->base.context_del = nv50_graph_context_del; 822 nv_wr32(priv, 0x400500, 0x00010001);
820 pgraph->base.object_new = nv50_graph_object_new;
821 if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
822 pgraph->base.tlb_flush = nv50_graph_tlb_flush;
823 else
824 pgraph->base.tlb_flush = nv84_graph_tlb_flush;
825 823
826 nouveau_irq_register(dev, 12, nv50_graph_isr); 824 /* upload context program, initialise ctxctl defaults */
825 ret = nv50_grctx_init(nv_device(priv), &priv->size);
826 if (ret)
827 return ret;
827 828
828 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 829 nv_wr32(priv, 0x400824, 0x00000000);
829 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 830 nv_wr32(priv, 0x400828, 0x00000000);
830 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ 831 nv_wr32(priv, 0x40082c, 0x00000000);
831 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ 832 nv_wr32(priv, 0x400830, 0x00000000);
833 nv_wr32(priv, 0x400724, 0x00000000);
834 nv_wr32(priv, 0x40032c, 0x00000000);
835 nv_wr32(priv, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
832 836
833 /* tesla */ 837 /* some unknown zcull magic */
834 if (dev_priv->chipset == 0x50) 838 switch (nv_device(priv)->chipset & 0xf0) {
835 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ 839 case 0x50:
836 else 840 case 0x80:
837 if (dev_priv->chipset < 0xa0) 841 case 0x90:
838 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ 842 nv_wr32(priv, 0x402ca8, 0x00000800);
839 else { 843 break;
840 switch (dev_priv->chipset) { 844 case 0xa0:
841 case 0xa0: 845 default:
842 case 0xaa: 846 nv_wr32(priv, 0x402cc0, 0x00000000);
843 case 0xac: 847 if (nv_device(priv)->chipset == 0xa0 ||
844 NVOBJ_CLASS(dev, 0x8397, GR); 848 nv_device(priv)->chipset == 0xaa ||
845 break; 849 nv_device(priv)->chipset == 0xac) {
846 case 0xa3: 850 nv_wr32(priv, 0x402ca8, 0x00000802);
847 case 0xa5: 851 } else {
848 case 0xa8: 852 nv_wr32(priv, 0x402cc0, 0x00000000);
849 NVOBJ_CLASS(dev, 0x8597, GR); 853 nv_wr32(priv, 0x402ca8, 0x00000002);
850 break;
851 case 0xaf:
852 NVOBJ_CLASS(dev, 0x8697, GR);
853 break;
854 } 854 }
855 }
856 855
857 /* compute */ 856 break;
858 NVOBJ_CLASS(dev, 0x50c0, GR); 857 }
859 if (dev_priv->chipset > 0xa0 &&
860 dev_priv->chipset != 0xaa &&
861 dev_priv->chipset != 0xac)
862 NVOBJ_CLASS(dev, 0x85c0, GR);
863 858
859 /* zero out zcull regions */
860 for (i = 0; i < 8; i++) {
861 nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
862 nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
863 nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
864 nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
865 }
864 return 0; 866 return 0;
865} 867}
868
869struct nouveau_oclass
870nv50_graph_oclass = {
871 .handle = NV_ENGINE(GR, 0x50),
872 .ofuncs = &(struct nouveau_ofuncs) {
873 .ctor = nv50_graph_ctor,
874 .dtor = _nouveau_graph_dtor,
875 .init = nv50_graph_init,
876 .fini = _nouveau_graph_fini,
877 },
878};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
new file mode 100644
index 000000000000..0505fb419bde
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
@@ -0,0 +1,7 @@
1#ifndef __NV50_GRAPH_H__
2#define __NV50_GRAPH_H__
3
4int nv50_grctx_init(struct nouveau_device *, u32 *size);
5void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
6
7#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index f994d2f7e8d5..db8aefc3cf3e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,94 +22,92 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <linux/firmware.h>
26#include <linux/module.h>
27
28#include "drmP.h"
29
30#include "nouveau_drv.h"
31#include <core/mm.h>
32#include <engine/fifo.h>
33
34#include "nvc0.h" 25#include "nvc0.h"
35#include "fuc/hubnvc0.fuc.h" 26#include "fuc/hubnvc0.fuc.h"
36#include "fuc/gpcnvc0.fuc.h" 27#include "fuc/gpcnvc0.fuc.h"
37 28
38static void 29/*******************************************************************************
39nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base) 30 * Graphics object classes
40{ 31 ******************************************************************************/
41 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base, 32
42 nv_rd32(dev, base + 0x400)); 33static struct nouveau_oclass
43 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, 34nvc0_graph_sclass[] = {
44 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804), 35 { 0x902d, &nouveau_object_ofuncs },
45 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c)); 36 { 0x9039, &nouveau_object_ofuncs },
46 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, 37 { 0x9097, &nouveau_object_ofuncs },
47 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814), 38 { 0x90c0, &nouveau_object_ofuncs },
48 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c)); 39 {}
49} 40};
50 41
51void 42static struct nouveau_oclass
52nvc0_graph_ctxctl_debug(struct drm_device *dev) 43nvc1_graph_sclass[] = {
53{ 44 { 0x902d, &nouveau_object_ofuncs },
54 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff; 45 { 0x9039, &nouveau_object_ofuncs },
55 u32 gpc; 46 { 0x9097, &nouveau_object_ofuncs },
56 47 { 0x90c0, &nouveau_object_ofuncs },
57 nvc0_graph_ctxctl_debug_unit(dev, 0x409000); 48 { 0x9197, &nouveau_object_ofuncs },
58 for (gpc = 0; gpc < gpcnr; gpc++) 49 {}
59 nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000)); 50};
60} 51
52static struct nouveau_oclass
53nvc8_graph_sclass[] = {
54 { 0x902d, &nouveau_object_ofuncs },
55 { 0x9039, &nouveau_object_ofuncs },
56 { 0x9097, &nouveau_object_ofuncs },
57 { 0x90c0, &nouveau_object_ofuncs },
58 { 0x9197, &nouveau_object_ofuncs },
59 { 0x9297, &nouveau_object_ofuncs },
60 {}
61};
62
63/*******************************************************************************
64 * PGRAPH context
65 ******************************************************************************/
61 66
62int 67int
63nvc0_graph_context_new(struct nouveau_channel *chan, int engine) 68nvc0_graph_context_ctor(struct nouveau_object *parent,
69 struct nouveau_object *engine,
70 struct nouveau_oclass *oclass, void *args, u32 size,
71 struct nouveau_object **pobject)
64{ 72{
65 struct drm_device *dev = chan->dev; 73 struct nouveau_vm *vm = nouveau_client(parent)->vm;
66 struct nvc0_graph_priv *priv = nv_engine(dev, engine); 74 struct nvc0_graph_priv *priv = (void *)engine;
67 struct nvc0_graph_data *data = priv->mmio_data; 75 struct nvc0_graph_data *data = priv->mmio_data;
68 struct nvc0_graph_mmio *mmio = priv->mmio_list; 76 struct nvc0_graph_mmio *mmio = priv->mmio_list;
69 struct nvc0_graph_chan *grch; 77 struct nvc0_graph_chan *chan;
70 struct nouveau_gpuobj *grctx;
71 int ret, i; 78 int ret, i;
72 79
73 grch = kzalloc(sizeof(*grch), GFP_KERNEL); 80 /* allocate memory for context, and fill with default values */
74 if (!grch) 81 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
75 return -ENOMEM; 82 priv->size, 0x100,
76 chan->engctx[NVOBJ_ENGINE_GR] = grch; 83 NVOBJ_FLAG_ZERO_ALLOC, &chan);
77 84 *pobject = nv_object(chan);
78 ret = nouveau_gpuobj_new(dev, NULL, priv->size, 256, 0, &grch->grctx);
79 if (ret)
80 goto error;
81
82 ret = nouveau_gpuobj_map_vm(grch->grctx, chan->vm, NV_MEM_ACCESS_RW |
83 NV_MEM_ACCESS_SYS, &grch->grctx_vma);
84 if (ret) 85 if (ret)
85 return ret; 86 return ret;
86 87
87 grctx = grch->grctx;
88
89 /* allocate memory for a "mmio list" buffer that's used by the HUB 88 /* allocate memory for a "mmio list" buffer that's used by the HUB
90 * fuc to modify some per-context register settings on first load 89 * fuc to modify some per-context register settings on first load
91 * of the context. 90 * of the context.
92 */ 91 */
93 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x100, 0, &grch->mmio); 92 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio);
94 if (ret) 93 if (ret)
95 return ret; 94 return ret;
96 95
97 ret = nouveau_gpuobj_map_vm(grch->mmio, chan->vm, 96 ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
98 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, 97 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
99 &grch->mmio_vma); 98 &chan->mmio_vma);
100 if (ret) 99 if (ret)
101 return ret; 100 return ret;
102 101
103 /* allocate buffers referenced by mmio list */ 102 /* allocate buffers referenced by mmio list */
104 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) { 103 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
105 ret = nouveau_gpuobj_new(dev, NULL, data->size, data->align, 104 ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align,
106 0, &grch->data[i].mem); 105 0, &chan->data[i].mem);
107 if (ret) 106 if (ret)
108 return ret; 107 return ret;
109 108
110 ret = nouveau_gpuobj_map_vm(grch->data[i].mem, chan->vm, 109 ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
111 data->access, 110 &chan->data[i].vma);
112 &grch->data[i].vma);
113 if (ret) 111 if (ret)
114 return ret; 112 return ret;
115 113
@@ -122,117 +120,378 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
122 u32 data = mmio->data; 120 u32 data = mmio->data;
123 121
124 if (mmio->shift) { 122 if (mmio->shift) {
125 u64 info = grch->data[mmio->buffer].vma.offset; 123 u64 info = chan->data[mmio->buffer].vma.offset;
126 data |= info >> mmio->shift; 124 data |= info >> mmio->shift;
127 } 125 }
128 126
129 nv_wo32(grch->mmio, grch->mmio_nr++ * 4, addr); 127 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
130 nv_wo32(grch->mmio, grch->mmio_nr++ * 4, data); 128 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
131 mmio++; 129 mmio++;
132 } 130 }
133 131
134 for (i = 0; i < priv->size; i += 4) 132 for (i = 0; i < priv->size; i += 4)
135 nv_wo32(grch->grctx, i, priv->data[i / 4]); 133 nv_wo32(chan, i, priv->data[i / 4]);
136
137 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
138 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
139 nvimem_flush(dev);
140 134
141 if (!priv->firmware) { 135 if (!priv->firmware) {
142 nv_wo32(grctx, 0x00, grch->mmio_nr / 2); 136 nv_wo32(chan, 0x00, chan->mmio_nr / 2);
143 nv_wo32(grctx, 0x04, grch->mmio_vma.offset >> 8); 137 nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
144 } else { 138 } else {
145 nv_wo32(grctx, 0xf4, 0); 139 nv_wo32(chan, 0xf4, 0);
146 nv_wo32(grctx, 0xf8, 0); 140 nv_wo32(chan, 0xf8, 0);
147 nv_wo32(grctx, 0x10, grch->mmio_nr / 2); 141 nv_wo32(chan, 0x10, chan->mmio_nr / 2);
148 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio_vma.offset)); 142 nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
149 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio_vma.offset)); 143 nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
150 nv_wo32(grctx, 0x1c, 1); 144 nv_wo32(chan, 0x1c, 1);
151 nv_wo32(grctx, 0x20, 0); 145 nv_wo32(chan, 0x20, 0);
152 nv_wo32(grctx, 0x28, 0); 146 nv_wo32(chan, 0x28, 0);
153 nv_wo32(grctx, 0x2c, 0); 147 nv_wo32(chan, 0x2c, 0);
154 } 148 }
155 nvimem_flush(dev);
156 return 0;
157 149
158error: 150 return 0;
159 priv->base.context_del(chan, engine);
160 return ret;
161} 151}
162 152
163void 153void
164nvc0_graph_context_del(struct nouveau_channel *chan, int engine) 154nvc0_graph_context_dtor(struct nouveau_object *object)
165{ 155{
166 struct nvc0_graph_chan *grch = chan->engctx[engine]; 156 struct nvc0_graph_chan *chan = (void *)object;
167 int i; 157 int i;
168 158
169 for (i = 0; i < ARRAY_SIZE(grch->data); i++) { 159 for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
170 nouveau_gpuobj_unmap(&grch->data[i].vma); 160 nouveau_gpuobj_unmap(&chan->data[i].vma);
171 nouveau_gpuobj_ref(NULL, &grch->data[i].mem); 161 nouveau_gpuobj_ref(NULL, &chan->data[i].mem);
172 } 162 }
173 163
174 nouveau_gpuobj_unmap(&grch->mmio_vma); 164 nouveau_gpuobj_unmap(&chan->mmio_vma);
175 nouveau_gpuobj_ref(NULL, &grch->mmio); 165 nouveau_gpuobj_ref(NULL, &chan->mmio);
176 166
177 nouveau_gpuobj_unmap(&grch->grctx_vma); 167 nouveau_graph_context_destroy(&chan->base);
178 nouveau_gpuobj_ref(NULL, &grch->grctx);
179 chan->engctx[engine] = NULL;
180} 168}
181 169
182static int 170static struct nouveau_oclass
183nvc0_graph_object_new(struct nouveau_channel *chan, int engine, 171nvc0_graph_cclass = {
184 u32 handle, u16 class) 172 .ofuncs = &(struct nouveau_ofuncs) {
173 .ctor = nvc0_graph_context_ctor,
174 .dtor = nvc0_graph_context_dtor,
175 .init = _nouveau_graph_context_init,
176 .fini = _nouveau_graph_context_fini,
177 .rd32 = _nouveau_graph_context_rd32,
178 .wr32 = _nouveau_graph_context_wr32,
179 },
180};
181
182/*******************************************************************************
183 * PGRAPH engine/subdev functions
184 ******************************************************************************/
185
186static void
187nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base)
185{ 188{
186 return 0; 189 nv_error(priv, "%06x - done 0x%08x\n", base,
190 nv_rd32(priv, base + 0x400));
191 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
192 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
193 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
194 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
195 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
196 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
197}
198
199void
200nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
201{
202 u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
203 u32 gpc;
204
205 nvc0_graph_ctxctl_debug_unit(priv, 0x409000);
206 for (gpc = 0; gpc < gpcnr; gpc++)
207 nvc0_graph_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
208}
209
210static void
211nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
212{
213 u32 ustat = nv_rd32(priv, 0x409c18);
214
215 if (ustat & 0x00000001)
216 nv_error(priv, "CTXCTRL ucode error\n");
217 if (ustat & 0x00080000)
218 nv_error(priv, "CTXCTRL watchdog timeout\n");
219 if (ustat & ~0x00080001)
220 nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
221
222 nvc0_graph_ctxctl_debug(priv);
223 nv_wr32(priv, 0x409c20, ustat);
224}
225
226static void
227nvc0_graph_intr(struct nouveau_subdev *subdev)
228{
229 struct nvc0_graph_priv *priv = (void *)subdev;
230 struct nouveau_engine *engine = nv_engine(subdev);
231 struct nouveau_handle *handle = NULL;
232 u64 inst = (u64)(nv_rd32(priv, 0x409b00) & 0x0fffffff) << 12;
233 u32 stat = nv_rd32(priv, 0x400100);
234 u32 addr = nv_rd32(priv, 0x400704);
235 u32 mthd = (addr & 0x00003ffc);
236 u32 subc = (addr & 0x00070000) >> 16;
237 u32 data = nv_rd32(priv, 0x400708);
238 u32 code = nv_rd32(priv, 0x400110);
239 u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
240
241 if (stat & 0x00000010) {
242 handle = nouveau_engctx_lookup_class(engine, inst, class);
243 if (!handle || nv_call(handle->object, mthd, data)) {
244 nv_error(priv, "ILLEGAL_MTHD ch 0x%010llx "
245 "subc %d class 0x%04x mthd 0x%04x "
246 "data 0x%08x\n",
247 inst, subc, class, mthd, data);
248 }
249 nouveau_engctx_handle_put(handle);
250 nv_wr32(priv, 0x400100, 0x00000010);
251 stat &= ~0x00000010;
252 }
253
254 if (stat & 0x00000020) {
255 nv_error(priv, "ILLEGAL_CLASS ch 0x%010llx subc %d "
256 "class 0x%04x mthd 0x%04x data 0x%08x\n",
257 inst, subc, class, mthd, data);
258 nv_wr32(priv, 0x400100, 0x00000020);
259 stat &= ~0x00000020;
260 }
261
262 if (stat & 0x00100000) {
263 nv_error(priv, "DATA_ERROR [");
264 nouveau_enum_print(nv50_data_error_names, code);
265 printk("] ch 0x%010llx subc %d class 0x%04x "
266 "mthd 0x%04x data 0x%08x\n",
267 inst, subc, class, mthd, data);
268 nv_wr32(priv, 0x400100, 0x00100000);
269 stat &= ~0x00100000;
270 }
271
272 if (stat & 0x00200000) {
273 u32 trap = nv_rd32(priv, 0x400108);
274 nv_error(priv, "TRAP ch 0x%010llx status 0x%08x\n", inst, trap);
275 nv_wr32(priv, 0x400108, trap);
276 nv_wr32(priv, 0x400100, 0x00200000);
277 stat &= ~0x00200000;
278 }
279
280 if (stat & 0x00080000) {
281 nvc0_graph_ctxctl_isr(priv);
282 nv_wr32(priv, 0x400100, 0x00080000);
283 stat &= ~0x00080000;
284 }
285
286 if (stat) {
287 nv_error(priv, "unknown stat 0x%08x\n", stat);
288 nv_wr32(priv, 0x400100, stat);
289 }
290
291 nv_wr32(priv, 0x400500, 0x00010001);
292}
293
294int
295nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
296 struct nvc0_graph_fuc *fuc)
297{
298 struct nouveau_device *device = nv_device(priv);
299 const struct firmware *fw;
300 char f[32];
301 int ret;
302
303 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
304 ret = request_firmware(&fw, f, &device->pdev->dev);
305 if (ret) {
306 snprintf(f, sizeof(f), "nouveau/%s", fwname);
307 ret = request_firmware(&fw, f, &device->pdev->dev);
308 if (ret) {
309 nv_error(priv, "failed to load %s\n", fwname);
310 return ret;
311 }
312 }
313
314 fuc->size = fw->size;
315 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
316 release_firmware(fw);
317 return (fuc->data != NULL) ? 0 : -ENOMEM;
187} 318}
188 319
189static int 320static int
190nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend) 321nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
322 struct nouveau_oclass *oclass, void *data, u32 size,
323 struct nouveau_object **pobject)
191{ 324{
325 struct nouveau_device *device = nv_device(parent);
326 struct nvc0_graph_priv *priv;
327 bool enable = true;
328 int ret, i;
329
330 switch (device->chipset) {
331 case 0xd9: /* known broken without binary driver firmware */
332 enable = false;
333 break;
334 default:
335 break;
336 }
337
338 ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
339 *pobject = nv_object(priv);
340 if (ret)
341 return ret;
342
343 nv_subdev(priv)->unit = 0x18001000;
344 nv_subdev(priv)->intr = nvc0_graph_intr;
345 nv_engine(priv)->cclass = &nvc0_graph_cclass;
346
347 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
348 nv_info(priv, "using external firmware\n");
349 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
350 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
351 nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
352 nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
353 return -EINVAL;
354 priv->firmware = true;
355 }
356
357 switch (nvc0_graph_class(priv)) {
358 case 0x9097:
359 nv_engine(priv)->sclass = nvc0_graph_sclass;
360 break;
361 case 0x9197:
362 nv_engine(priv)->sclass = nvc1_graph_sclass;
363 break;
364 case 0x9297:
365 nv_engine(priv)->sclass = nvc8_graph_sclass;
366 break;
367 }
368
369 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
370 if (ret)
371 return ret;
372
373 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
374 if (ret)
375 return ret;
376
377 for (i = 0; i < 0x1000; i += 4) {
378 nv_wo32(priv->unk4188b4, i, 0x00000010);
379 nv_wo32(priv->unk4188b8, i, 0x00000010);
380 }
381
382 priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
383 priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
384 for (i = 0; i < priv->gpc_nr; i++) {
385 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
386 priv->tpc_total += priv->tpc_nr[i];
387 }
388
389 /*XXX: these need figuring out... though it might not even matter */
390 switch (nv_device(priv)->chipset) {
391 case 0xc0:
392 if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
393 priv->magic_not_rop_nr = 0x07;
394 } else
395 if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
396 priv->magic_not_rop_nr = 0x05;
397 } else
398 if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
399 priv->magic_not_rop_nr = 0x06;
400 }
401 break;
402 case 0xc3: /* 450, 4/0/0/0, 2 */
403 priv->magic_not_rop_nr = 0x03;
404 break;
405 case 0xc4: /* 460, 3/4/0/0, 4 */
406 priv->magic_not_rop_nr = 0x01;
407 break;
408 case 0xc1: /* 2/0/0/0, 1 */
409 priv->magic_not_rop_nr = 0x01;
410 break;
411 case 0xc8: /* 4/4/3/4, 5 */
412 priv->magic_not_rop_nr = 0x06;
413 break;
414 case 0xce: /* 4/4/0/0, 4 */
415 priv->magic_not_rop_nr = 0x03;
416 break;
417 case 0xcf: /* 4/0/0/0, 3 */
418 priv->magic_not_rop_nr = 0x03;
419 break;
420 case 0xd9: /* 1/0/0/0, 1 */
421 priv->magic_not_rop_nr = 0x01;
422 break;
423 }
424
192 return 0; 425 return 0;
193} 426}
194 427
195static void 428static void
196nvc0_graph_init_obj418880(struct drm_device *dev) 429nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
430{
431 if (fuc->data) {
432 kfree(fuc->data);
433 fuc->data = NULL;
434 }
435}
436
437void
438nvc0_graph_dtor(struct nouveau_object *object)
439{
440 struct nvc0_graph_priv *priv = (void *)object;
441
442 if (priv->data)
443 kfree(priv->data);
444
445 nvc0_graph_dtor_fw(&priv->fuc409c);
446 nvc0_graph_dtor_fw(&priv->fuc409d);
447 nvc0_graph_dtor_fw(&priv->fuc41ac);
448 nvc0_graph_dtor_fw(&priv->fuc41ad);
449
450 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
451 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
452
453 nouveau_graph_destroy(&priv->base);
454}
455
456static void
457nvc0_graph_init_obj418880(struct nvc0_graph_priv *priv)
197{ 458{
198 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
199 int i; 459 int i;
200 460
201 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000); 461 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
202 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000); 462 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
203 for (i = 0; i < 4; i++) 463 for (i = 0; i < 4; i++)
204 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000); 464 nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
205 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8); 465 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
206 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8); 466 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
207} 467}
208 468
209static void 469static void
210nvc0_graph_init_regs(struct drm_device *dev) 470nvc0_graph_init_regs(struct nvc0_graph_priv *priv)
211{ 471{
212 nv_wr32(dev, 0x400080, 0x003083c2); 472 nv_wr32(priv, 0x400080, 0x003083c2);
213 nv_wr32(dev, 0x400088, 0x00006fe7); 473 nv_wr32(priv, 0x400088, 0x00006fe7);
214 nv_wr32(dev, 0x40008c, 0x00000000); 474 nv_wr32(priv, 0x40008c, 0x00000000);
215 nv_wr32(dev, 0x400090, 0x00000030); 475 nv_wr32(priv, 0x400090, 0x00000030);
216 nv_wr32(dev, 0x40013c, 0x013901f7); 476 nv_wr32(priv, 0x40013c, 0x013901f7);
217 nv_wr32(dev, 0x400140, 0x00000100); 477 nv_wr32(priv, 0x400140, 0x00000100);
218 nv_wr32(dev, 0x400144, 0x00000000); 478 nv_wr32(priv, 0x400144, 0x00000000);
219 nv_wr32(dev, 0x400148, 0x00000110); 479 nv_wr32(priv, 0x400148, 0x00000110);
220 nv_wr32(dev, 0x400138, 0x00000000); 480 nv_wr32(priv, 0x400138, 0x00000000);
221 nv_wr32(dev, 0x400130, 0x00000000); 481 nv_wr32(priv, 0x400130, 0x00000000);
222 nv_wr32(dev, 0x400134, 0x00000000); 482 nv_wr32(priv, 0x400134, 0x00000000);
223 nv_wr32(dev, 0x400124, 0x00000002); 483 nv_wr32(priv, 0x400124, 0x00000002);
224} 484}
225 485
226static void 486static void
227nvc0_graph_init_gpc_0(struct drm_device *dev) 487nvc0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
228{ 488{
229 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
230 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); 489 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
231 u32 data[TPC_MAX / 8]; 490 u32 data[TPC_MAX / 8];
232 u8 tpnr[GPC_MAX]; 491 u8 tpcnr[GPC_MAX];
233 int i, gpc, tpc; 492 int i, gpc, tpc;
234 493
235 nv_wr32(dev, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */ 494 nv_wr32(priv, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
236 495
237 /* 496 /*
238 * TP ROP UNKVAL(magic_not_rop_nr) 497 * TP ROP UNKVAL(magic_not_rop_nr)
@@ -244,205 +503,208 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
244 */ 503 */
245 504
246 memset(data, 0x00, sizeof(data)); 505 memset(data, 0x00, sizeof(data));
247 memcpy(tpnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 506 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
248 for (i = 0, gpc = -1; i < priv->tpc_total; i++) { 507 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
249 do { 508 do {
250 gpc = (gpc + 1) % priv->gpc_nr; 509 gpc = (gpc + 1) % priv->gpc_nr;
251 } while (!tpnr[gpc]); 510 } while (!tpcnr[gpc]);
252 tpc = priv->tpc_nr[gpc] - tpnr[gpc]--; 511 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
253 512
254 data[i / 8] |= tpc << ((i % 8) * 4); 513 data[i / 8] |= tpc << ((i % 8) * 4);
255 } 514 }
256 515
257 nv_wr32(dev, GPC_BCAST(0x0980), data[0]); 516 nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
258 nv_wr32(dev, GPC_BCAST(0x0984), data[1]); 517 nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
259 nv_wr32(dev, GPC_BCAST(0x0988), data[2]); 518 nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
260 nv_wr32(dev, GPC_BCAST(0x098c), data[3]); 519 nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
261 520
262 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 521 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
263 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | 522 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
264 priv->tpc_nr[gpc]); 523 priv->tpc_nr[gpc]);
265 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total); 524 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
266 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918); 525 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
267 } 526 }
268 527
269 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918); 528 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
270 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800)); 529 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
271} 530}
272 531
273static void 532static void
274nvc0_graph_init_units(struct drm_device *dev) 533nvc0_graph_init_units(struct nvc0_graph_priv *priv)
275{ 534{
276 nv_wr32(dev, 0x409c24, 0x000f0000); 535 nv_wr32(priv, 0x409c24, 0x000f0000);
277 nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */ 536 nv_wr32(priv, 0x404000, 0xc0000000); /* DISPATCH */
278 nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */ 537 nv_wr32(priv, 0x404600, 0xc0000000); /* M2MF */
279 nv_wr32(dev, 0x408030, 0xc0000000); 538 nv_wr32(priv, 0x408030, 0xc0000000);
280 nv_wr32(dev, 0x40601c, 0xc0000000); 539 nv_wr32(priv, 0x40601c, 0xc0000000);
281 nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */ 540 nv_wr32(priv, 0x404490, 0xc0000000); /* MACRO */
282 nv_wr32(dev, 0x406018, 0xc0000000); 541 nv_wr32(priv, 0x406018, 0xc0000000);
283 nv_wr32(dev, 0x405840, 0xc0000000); 542 nv_wr32(priv, 0x405840, 0xc0000000);
284 nv_wr32(dev, 0x405844, 0x00ffffff); 543 nv_wr32(priv, 0x405844, 0x00ffffff);
285 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008); 544 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
286 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000); 545 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
287} 546}
288 547
289static void 548static void
290nvc0_graph_init_gpc_1(struct drm_device *dev) 549nvc0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
291{ 550{
292 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 551 int gpc, tpc;
293 int gpc, tp;
294 552
295 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 553 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
296 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000); 554 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
297 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000); 555 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
298 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000); 556 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
299 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000); 557 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
300 for (tp = 0; tp < priv->tpc_nr[gpc]; tp++) { 558 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
301 nv_wr32(dev, TPC_UNIT(gpc, tp, 0x508), 0xffffffff); 559 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
302 nv_wr32(dev, TPC_UNIT(gpc, tp, 0x50c), 0xffffffff); 560 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
303 nv_wr32(dev, TPC_UNIT(gpc, tp, 0x224), 0xc0000000); 561 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
304 nv_wr32(dev, TPC_UNIT(gpc, tp, 0x48c), 0xc0000000); 562 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
305 nv_wr32(dev, TPC_UNIT(gpc, tp, 0x084), 0xc0000000); 563 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
306 nv_wr32(dev, TPC_UNIT(gpc, tp, 0x644), 0x001ffffe); 564 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
307 nv_wr32(dev, TPC_UNIT(gpc, tp, 0x64c), 0x0000000f); 565 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
308 } 566 }
309 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 567 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
310 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 568 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
311 } 569 }
312} 570}
313 571
314static void 572static void
315nvc0_graph_init_rop(struct drm_device *dev) 573nvc0_graph_init_rop(struct nvc0_graph_priv *priv)
316{ 574{
317 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
318 int rop; 575 int rop;
319 576
320 for (rop = 0; rop < priv->rop_nr; rop++) { 577 for (rop = 0; rop < priv->rop_nr; rop++) {
321 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000); 578 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
322 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000); 579 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
323 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff); 580 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
324 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff); 581 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
325 } 582 }
326} 583}
327 584
328static void 585void
329nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base, 586nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
330 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data) 587 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
331{ 588{
332 int i; 589 int i;
333 590
334 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000); 591 nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
335 for (i = 0; i < data->size / 4; i++) 592 for (i = 0; i < data->size / 4; i++)
336 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]); 593 nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
337 594
338 nv_wr32(dev, fuc_base + 0x0180, 0x01000000); 595 nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
339 for (i = 0; i < code->size / 4; i++) { 596 for (i = 0; i < code->size / 4; i++) {
340 if ((i & 0x3f) == 0) 597 if ((i & 0x3f) == 0)
341 nv_wr32(dev, fuc_base + 0x0188, i >> 6); 598 nv_wr32(priv, fuc_base + 0x0188, i >> 6);
342 nv_wr32(dev, fuc_base + 0x0184, code->data[i]); 599 nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
343 } 600 }
344} 601}
345 602
346static int 603static int
347nvc0_graph_init_ctxctl(struct drm_device *dev) 604nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
348{ 605{
349 struct drm_nouveau_private *dev_priv = dev->dev_private;
350 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
351 u32 r000260; 606 u32 r000260;
352 int i; 607 int i;
353 608
354 if (priv->firmware) { 609 if (priv->firmware) {
355 /* load fuc microcode */ 610 /* load fuc microcode */
356 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); 611 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
357 nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, 612 nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
358 &priv->fuc409d); 613 &priv->fuc409d);
359 nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, 614 nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
360 &priv->fuc41ad); 615 &priv->fuc41ad);
361 nv_wr32(dev, 0x000260, r000260); 616 nv_wr32(priv, 0x000260, r000260);
362 617
363 /* start both of them running */ 618 /* start both of them running */
364 nv_wr32(dev, 0x409840, 0xffffffff); 619 nv_wr32(priv, 0x409840, 0xffffffff);
365 nv_wr32(dev, 0x41a10c, 0x00000000); 620 nv_wr32(priv, 0x41a10c, 0x00000000);
366 nv_wr32(dev, 0x40910c, 0x00000000); 621 nv_wr32(priv, 0x40910c, 0x00000000);
367 nv_wr32(dev, 0x41a100, 0x00000002); 622 nv_wr32(priv, 0x41a100, 0x00000002);
368 nv_wr32(dev, 0x409100, 0x00000002); 623 nv_wr32(priv, 0x409100, 0x00000002);
369 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001)) 624 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
370 NV_INFO(dev, "0x409800 wait failed\n"); 625 nv_info(priv, "0x409800 wait failed\n");
371 626
372 nv_wr32(dev, 0x409840, 0xffffffff); 627 nv_wr32(priv, 0x409840, 0xffffffff);
373 nv_wr32(dev, 0x409500, 0x7fffffff); 628 nv_wr32(priv, 0x409500, 0x7fffffff);
374 nv_wr32(dev, 0x409504, 0x00000021); 629 nv_wr32(priv, 0x409504, 0x00000021);
375 630
376 nv_wr32(dev, 0x409840, 0xffffffff); 631 nv_wr32(priv, 0x409840, 0xffffffff);
377 nv_wr32(dev, 0x409500, 0x00000000); 632 nv_wr32(priv, 0x409500, 0x00000000);
378 nv_wr32(dev, 0x409504, 0x00000010); 633 nv_wr32(priv, 0x409504, 0x00000010);
379 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 634 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
380 NV_ERROR(dev, "fuc09 req 0x10 timeout\n"); 635 nv_error(priv, "fuc09 req 0x10 timeout\n");
381 return -EBUSY; 636 return -EBUSY;
382 } 637 }
383 priv->size = nv_rd32(dev, 0x409800); 638 priv->size = nv_rd32(priv, 0x409800);
384 639
385 nv_wr32(dev, 0x409840, 0xffffffff); 640 nv_wr32(priv, 0x409840, 0xffffffff);
386 nv_wr32(dev, 0x409500, 0x00000000); 641 nv_wr32(priv, 0x409500, 0x00000000);
387 nv_wr32(dev, 0x409504, 0x00000016); 642 nv_wr32(priv, 0x409504, 0x00000016);
388 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 643 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
389 NV_ERROR(dev, "fuc09 req 0x16 timeout\n"); 644 nv_error(priv, "fuc09 req 0x16 timeout\n");
390 return -EBUSY; 645 return -EBUSY;
391 } 646 }
392 647
393 nv_wr32(dev, 0x409840, 0xffffffff); 648 nv_wr32(priv, 0x409840, 0xffffffff);
394 nv_wr32(dev, 0x409500, 0x00000000); 649 nv_wr32(priv, 0x409500, 0x00000000);
395 nv_wr32(dev, 0x409504, 0x00000025); 650 nv_wr32(priv, 0x409504, 0x00000025);
396 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 651 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
397 NV_ERROR(dev, "fuc09 req 0x25 timeout\n"); 652 nv_error(priv, "fuc09 req 0x25 timeout\n");
398 return -EBUSY; 653 return -EBUSY;
399 } 654 }
400 655
401 goto done; 656 if (priv->data == NULL) {
657 int ret = nvc0_grctx_generate(priv);
658 if (ret) {
659 nv_error(priv, "failed to construct context\n");
660 return ret;
661 }
662 }
663
664 return 0;
402 } 665 }
403 666
404 /* load HUB microcode */ 667 /* load HUB microcode */
405 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); 668 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
406 nv_wr32(dev, 0x4091c0, 0x01000000); 669 nv_wr32(priv, 0x4091c0, 0x01000000);
407 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++) 670 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
408 nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]); 671 nv_wr32(priv, 0x4091c4, nvc0_grhub_data[i]);
409 672
410 nv_wr32(dev, 0x409180, 0x01000000); 673 nv_wr32(priv, 0x409180, 0x01000000);
411 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) { 674 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
412 if ((i & 0x3f) == 0) 675 if ((i & 0x3f) == 0)
413 nv_wr32(dev, 0x409188, i >> 6); 676 nv_wr32(priv, 0x409188, i >> 6);
414 nv_wr32(dev, 0x409184, nvc0_grhub_code[i]); 677 nv_wr32(priv, 0x409184, nvc0_grhub_code[i]);
415 } 678 }
416 679
417 /* load GPC microcode */ 680 /* load GPC microcode */
418 nv_wr32(dev, 0x41a1c0, 0x01000000); 681 nv_wr32(priv, 0x41a1c0, 0x01000000);
419 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++) 682 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
420 nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]); 683 nv_wr32(priv, 0x41a1c4, nvc0_grgpc_data[i]);
421 684
422 nv_wr32(dev, 0x41a180, 0x01000000); 685 nv_wr32(priv, 0x41a180, 0x01000000);
423 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) { 686 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
424 if ((i & 0x3f) == 0) 687 if ((i & 0x3f) == 0)
425 nv_wr32(dev, 0x41a188, i >> 6); 688 nv_wr32(priv, 0x41a188, i >> 6);
426 nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]); 689 nv_wr32(priv, 0x41a184, nvc0_grgpc_code[i]);
427 } 690 }
428 nv_wr32(dev, 0x000260, r000260); 691 nv_wr32(priv, 0x000260, r000260);
429 692
430 /* start HUB ucode running, it'll init the GPCs */ 693 /* start HUB ucode running, it'll init the GPCs */
431 nv_wr32(dev, 0x409800, dev_priv->chipset); 694 nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
432 nv_wr32(dev, 0x40910c, 0x00000000); 695 nv_wr32(priv, 0x40910c, 0x00000000);
433 nv_wr32(dev, 0x409100, 0x00000002); 696 nv_wr32(priv, 0x409100, 0x00000002);
434 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { 697 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
435 NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n"); 698 nv_error(priv, "HUB_INIT timed out\n");
436 nvc0_graph_ctxctl_debug(dev); 699 nvc0_graph_ctxctl_debug(priv);
437 return -EBUSY; 700 return -EBUSY;
438 } 701 }
439 702
440 priv->size = nv_rd32(dev, 0x409804); 703 priv->size = nv_rd32(priv, 0x409804);
441done:
442 if (priv->data == NULL) { 704 if (priv->data == NULL) {
443 int ret = nvc0_grctx_generate(dev); 705 int ret = nvc0_grctx_generate(priv);
444 if (ret) { 706 if (ret) {
445 NV_ERROR(dev, "PGRAPH: failed to construct context\n"); 707 nv_error(priv, "failed to construct context\n");
446 return ret; 708 return ret;
447 } 709 }
448 710
@@ -453,37 +715,39 @@ done:
453} 715}
454 716
455static int 717static int
456nvc0_graph_init(struct drm_device *dev, int engine) 718nvc0_graph_init(struct nouveau_object *object)
457{ 719{
720 struct nvc0_graph_priv *priv = (void *)object;
458 int ret; 721 int ret;
459 722
460reset: 723reset:
461 nv_mask(dev, 0x000200, 0x18001000, 0x00000000); 724 ret = nouveau_graph_init(&priv->base);
462 nv_mask(dev, 0x000200, 0x18001000, 0x18001000); 725 if (ret)
463 726 return ret;
464 nvc0_graph_init_obj418880(dev); 727
465 nvc0_graph_init_regs(dev); 728 nvc0_graph_init_obj418880(priv);
466 /*nvc0_graph_init_unitplemented_magics(dev);*/ 729 nvc0_graph_init_regs(priv);
467 nvc0_graph_init_gpc_0(dev); 730 /*nvc0_graph_init_unitplemented_magics(priv);*/
468 /*nvc0_graph_init_unitplemented_c242(dev);*/ 731 nvc0_graph_init_gpc_0(priv);
469 732 /*nvc0_graph_init_unitplemented_c242(priv);*/
470 nv_wr32(dev, 0x400500, 0x00010001); 733
471 nv_wr32(dev, 0x400100, 0xffffffff); 734 nv_wr32(priv, 0x400500, 0x00010001);
472 nv_wr32(dev, 0x40013c, 0xffffffff); 735 nv_wr32(priv, 0x400100, 0xffffffff);
473 736 nv_wr32(priv, 0x40013c, 0xffffffff);
474 nvc0_graph_init_units(dev); 737
475 nvc0_graph_init_gpc_1(dev); 738 nvc0_graph_init_units(priv);
476 nvc0_graph_init_rop(dev); 739 nvc0_graph_init_gpc_1(priv);
477 740 nvc0_graph_init_rop(priv);
478 nv_wr32(dev, 0x400108, 0xffffffff); 741
479 nv_wr32(dev, 0x400138, 0xffffffff); 742 nv_wr32(priv, 0x400108, 0xffffffff);
480 nv_wr32(dev, 0x400118, 0xffffffff); 743 nv_wr32(priv, 0x400138, 0xffffffff);
481 nv_wr32(dev, 0x400130, 0xffffffff); 744 nv_wr32(priv, 0x400118, 0xffffffff);
482 nv_wr32(dev, 0x40011c, 0xffffffff); 745 nv_wr32(priv, 0x400130, 0xffffffff);
483 nv_wr32(dev, 0x400134, 0xffffffff); 746 nv_wr32(priv, 0x40011c, 0xffffffff);
484 nv_wr32(dev, 0x400054, 0x34ce3464); 747 nv_wr32(priv, 0x400134, 0xffffffff);
485 748 nv_wr32(priv, 0x400054, 0x34ce3464);
486 ret = nvc0_graph_init_ctxctl(dev); 749
750 ret = nvc0_graph_init_ctxctl(priv);
487 if (ret) { 751 if (ret) {
488 if (ret == 1) 752 if (ret == 1)
489 goto reset; 753 goto reset;
@@ -493,279 +757,13 @@ reset:
493 return 0; 757 return 0;
494} 758}
495 759
496int 760struct nouveau_oclass
497nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) 761nvc0_graph_oclass = {
498{ 762 .handle = NV_ENGINE(GR, 0xc0),
499 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 763 .ofuncs = &(struct nouveau_ofuncs) {
500 struct drm_nouveau_private *dev_priv = dev->dev_private; 764 .ctor = nvc0_graph_ctor,
501 struct nouveau_channel *chan; 765 .dtor = nvc0_graph_dtor,
502 unsigned long flags; 766 .init = nvc0_graph_init,
503 int i; 767 .fini = _nouveau_graph_fini,
504 768 },
505 spin_lock_irqsave(&dev_priv->channels.lock, flags); 769};
506 for (i = 0; i < pfifo->channels; i++) {
507 chan = dev_priv->channels.ptr[i];
508 if (!chan || !chan->ramin)
509 continue;
510
511 if (inst == chan->ramin->addr)
512 break;
513 }
514 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
515 return i;
516}
517
518static void
519nvc0_graph_ctxctl_isr(struct drm_device *dev)
520{
521 u32 ustat = nv_rd32(dev, 0x409c18);
522
523 if (ustat & 0x00000001)
524 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
525 if (ustat & 0x00080000)
526 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
527 if (ustat & ~0x00080001)
528 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
529
530 nvc0_graph_ctxctl_debug(dev);
531 nv_wr32(dev, 0x409c20, ustat);
532}
533
534static void
535nvc0_graph_isr(struct drm_device *dev)
536{
537 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
538 u32 chid = nvc0_graph_isr_chid(dev, inst);
539 u32 stat = nv_rd32(dev, 0x400100);
540 u32 addr = nv_rd32(dev, 0x400704);
541 u32 mthd = (addr & 0x00003ffc);
542 u32 subc = (addr & 0x00070000) >> 16;
543 u32 data = nv_rd32(dev, 0x400708);
544 u32 code = nv_rd32(dev, 0x400110);
545 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
546
547 if (stat & 0x00000010) {
548 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
549 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
550 "subc %d class 0x%04x mthd 0x%04x "
551 "data 0x%08x\n",
552 chid, inst, subc, class, mthd, data);
553 }
554 nv_wr32(dev, 0x400100, 0x00000010);
555 stat &= ~0x00000010;
556 }
557
558 if (stat & 0x00000020) {
559 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
560 "class 0x%04x mthd 0x%04x data 0x%08x\n",
561 chid, inst, subc, class, mthd, data);
562 nv_wr32(dev, 0x400100, 0x00000020);
563 stat &= ~0x00000020;
564 }
565
566 if (stat & 0x00100000) {
567 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
568 nouveau_enum_print(nv50_data_error_names, code);
569 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
570 "mthd 0x%04x data 0x%08x\n",
571 chid, inst, subc, class, mthd, data);
572 nv_wr32(dev, 0x400100, 0x00100000);
573 stat &= ~0x00100000;
574 }
575
576 if (stat & 0x00200000) {
577 u32 trap = nv_rd32(dev, 0x400108);
578 NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
579 nv_wr32(dev, 0x400108, trap);
580 nv_wr32(dev, 0x400100, 0x00200000);
581 stat &= ~0x00200000;
582 }
583
584 if (stat & 0x00080000) {
585 nvc0_graph_ctxctl_isr(dev);
586 nv_wr32(dev, 0x400100, 0x00080000);
587 stat &= ~0x00080000;
588 }
589
590 if (stat) {
591 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
592 nv_wr32(dev, 0x400100, stat);
593 }
594
595 nv_wr32(dev, 0x400500, 0x00010001);
596}
597
598static int
599nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
600 struct nvc0_graph_fuc *fuc)
601{
602 struct drm_nouveau_private *dev_priv = dev->dev_private;
603 const struct firmware *fw;
604 char f[32];
605 int ret;
606
607 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
608 ret = request_firmware(&fw, f, &dev->pdev->dev);
609 if (ret) {
610 snprintf(f, sizeof(f), "nouveau/%s", fwname);
611 ret = request_firmware(&fw, f, &dev->pdev->dev);
612 if (ret) {
613 NV_ERROR(dev, "failed to load %s\n", fwname);
614 return ret;
615 }
616 }
617
618 fuc->size = fw->size;
619 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
620 release_firmware(fw);
621 return (fuc->data != NULL) ? 0 : -ENOMEM;
622}
623
624static void
625nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
626{
627 if (fuc->data) {
628 kfree(fuc->data);
629 fuc->data = NULL;
630 }
631}
632
633static void
634nvc0_graph_destroy(struct drm_device *dev, int engine)
635{
636 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
637
638 nvc0_graph_destroy_fw(&priv->fuc409c);
639 nvc0_graph_destroy_fw(&priv->fuc409d);
640 nvc0_graph_destroy_fw(&priv->fuc41ac);
641 nvc0_graph_destroy_fw(&priv->fuc41ad);
642
643 nouveau_irq_unregister(dev, 12);
644
645 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
646 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
647
648 if (priv->data)
649 kfree(priv->data);
650
651 NVOBJ_ENGINE_DEL(dev, GR);
652 kfree(priv);
653}
654
655int
656nvc0_graph_create(struct drm_device *dev)
657{
658 struct drm_nouveau_private *dev_priv = dev->dev_private;
659 struct nvc0_graph_priv *priv;
660 int ret, gpc, i;
661 u32 fermi;
662
663 fermi = nvc0_graph_class(dev);
664 if (!fermi) {
665 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
666 return 0;
667 }
668
669 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
670 if (!priv)
671 return -ENOMEM;
672
673 priv->base.destroy = nvc0_graph_destroy;
674 priv->base.init = nvc0_graph_init;
675 priv->base.fini = nvc0_graph_fini;
676 priv->base.context_new = nvc0_graph_context_new;
677 priv->base.context_del = nvc0_graph_context_del;
678 priv->base.object_new = nvc0_graph_object_new;
679
680 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
681 nouveau_irq_register(dev, 12, nvc0_graph_isr);
682
683 if (nouveau_ctxfw) {
684 NV_INFO(dev, "PGRAPH: using external firmware\n");
685 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
686 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
687 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
688 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
689 ret = 0;
690 goto error;
691 }
692 priv->firmware = true;
693 }
694
695 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
696 if (ret)
697 goto error;
698
699 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
700 if (ret)
701 goto error;
702
703 for (i = 0; i < 0x1000; i += 4) {
704 nv_wo32(priv->unk4188b4, i, 0x00000010);
705 nv_wo32(priv->unk4188b8, i, 0x00000010);
706 }
707
708 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
709 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
710 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
711 priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
712 priv->tpc_total += priv->tpc_nr[gpc];
713 }
714
715 /*XXX: these need figuring out... */
716 switch (dev_priv->chipset) {
717 case 0xc0:
718 if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
719 priv->magic_not_rop_nr = 0x07;
720 } else
721 if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
722 priv->magic_not_rop_nr = 0x05;
723 } else
724 if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
725 priv->magic_not_rop_nr = 0x06;
726 }
727 break;
728 case 0xc3: /* 450, 4/0/0/0, 2 */
729 priv->magic_not_rop_nr = 0x03;
730 break;
731 case 0xc4: /* 460, 3/4/0/0, 4 */
732 priv->magic_not_rop_nr = 0x01;
733 break;
734 case 0xc1: /* 2/0/0/0, 1 */
735 priv->magic_not_rop_nr = 0x01;
736 break;
737 case 0xc8: /* 4/4/3/4, 5 */
738 priv->magic_not_rop_nr = 0x06;
739 break;
740 case 0xce: /* 4/4/0/0, 4 */
741 priv->magic_not_rop_nr = 0x03;
742 break;
743 case 0xcf: /* 4/0/0/0, 3 */
744 priv->magic_not_rop_nr = 0x03;
745 break;
746 case 0xd9: /* 1/0/0/0, 1 */
747 priv->magic_not_rop_nr = 0x01;
748 break;
749 }
750
751 if (!priv->magic_not_rop_nr) {
752 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
753 priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
754 priv->tpc_nr[3], priv->rop_nr);
755 priv->magic_not_rop_nr = 0x00;
756 }
757
758 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
759 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
760 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
761 if (fermi >= 0x9197)
762 NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
763 if (fermi >= 0x9297)
764 NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
765 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
766 return 0;
767
768error:
769 nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
770 return ret;
771}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 30ea3ab135c6..26f8268cc8c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -25,6 +25,18 @@
25#ifndef __NVC0_GRAPH_H__ 25#ifndef __NVC0_GRAPH_H__
26#define __NVC0_GRAPH_H__ 26#define __NVC0_GRAPH_H__
27 27
28#include <core/client.h>
29#include <core/handle.h>
30#include <core/gpuobj.h>
31#include <core/option.h>
32
33#include <subdev/fb.h>
34#include <subdev/vm.h>
35#include <subdev/bar.h>
36#include <subdev/timer.h>
37
38#include <engine/graph.h>
39
28#define GPC_MAX 4 40#define GPC_MAX 4
29#define TPC_MAX 32 41#define TPC_MAX 32
30 42
@@ -53,7 +65,7 @@ struct nvc0_graph_fuc {
53}; 65};
54 66
55struct nvc0_graph_priv { 67struct nvc0_graph_priv {
56 struct nouveau_exec_engine base; 68 struct nouveau_graph base;
57 69
58 struct nvc0_graph_fuc fuc409c; 70 struct nvc0_graph_fuc fuc409c;
59 struct nvc0_graph_fuc fuc409d; 71 struct nvc0_graph_fuc fuc409d;
@@ -78,11 +90,10 @@ struct nvc0_graph_priv {
78}; 90};
79 91
80struct nvc0_graph_chan { 92struct nvc0_graph_chan {
81 struct nouveau_gpuobj *grctx; 93 struct nouveau_graph_chan base;
82 struct nouveau_vma grctx_vma;
83 94
84 struct nouveau_gpuobj *mmio; 95 struct nouveau_gpuobj *mmio;
85 struct nouveau_vma mmio_vma; 96 struct nouveau_vma mmio_vma;
86 int mmio_nr; 97 int mmio_nr;
87 struct { 98 struct {
88 struct nouveau_gpuobj *mem; 99 struct nouveau_gpuobj *mem;
@@ -91,11 +102,11 @@ struct nvc0_graph_chan {
91}; 102};
92 103
93static inline u32 104static inline u32
94nvc0_graph_class(struct drm_device *priv) 105nvc0_graph_class(void *obj)
95{ 106{
96 struct drm_nouveau_private *dev_priv = priv->dev_private; 107 struct nouveau_device *device = nv_device(obj);
97 108
98 switch (dev_priv->chipset) { 109 switch (device->chipset) {
99 case 0xc0: 110 case 0xc0:
100 case 0xc3: 111 case 0xc3:
101 case 0xc4: 112 case 0xc4:
@@ -115,17 +126,16 @@ nvc0_graph_class(struct drm_device *priv)
115 } 126 }
116} 127}
117 128
118void nv_icmd(struct drm_device *priv, u32 icmd, u32 data); 129void nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data);
119 130
120static inline void 131static inline void
121nv_mthd(struct drm_device *priv, u32 class, u32 mthd, u32 data) 132nv_mthd(struct nvc0_graph_priv *priv, u32 class, u32 mthd, u32 data)
122{ 133{
123 nv_wr32(priv, 0x40448c, data); 134 nv_wr32(priv, 0x40448c, data);
124 nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class); 135 nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class);
125} 136}
126 137
127struct nvc0_grctx { 138struct nvc0_grctx {
128 struct drm_device *dev;
129 struct nvc0_graph_priv *priv; 139 struct nvc0_graph_priv *priv;
130 struct nvc0_graph_data *data; 140 struct nvc0_graph_data *data;
131 struct nvc0_graph_mmio *mmio; 141 struct nvc0_graph_mmio *mmio;
@@ -135,18 +145,18 @@ struct nvc0_grctx {
135 u64 addr; 145 u64 addr;
136}; 146};
137 147
138int nvc0_grctx_generate(struct drm_device *); 148int nvc0_grctx_generate(struct nvc0_graph_priv *);
139int nvc0_grctx_init(struct drm_device *, struct nvc0_graph_priv *, 149int nvc0_grctx_init(struct nvc0_graph_priv *, struct nvc0_grctx *);
140 struct nvc0_grctx *);
141void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32); 150void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32);
142void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32); 151void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32);
143int nvc0_grctx_fini(struct nvc0_grctx *); 152int nvc0_grctx_fini(struct nvc0_grctx *);
144 153
145int nve0_grctx_generate(struct drm_device *); 154int nve0_grctx_generate(struct nvc0_graph_priv *);
146 155
147#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p)) 156#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p))
148#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b)) 157#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b))
149 158
159void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *);
150int nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *, 160int nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *,
151 struct nvc0_graph_fuc *); 161 struct nvc0_graph_fuc *);
152void nvc0_graph_dtor(struct nouveau_object *); 162void nvc0_graph_dtor(struct nouveau_object *);
@@ -157,9 +167,4 @@ int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
157 struct nouveau_object **); 167 struct nouveau_object **);
158void nvc0_graph_context_dtor(struct nouveau_object *); 168void nvc0_graph_context_dtor(struct nouveau_object *);
159 169
160void nvc0_graph_ctxctl_debug(struct drm_device *);
161
162int nvc0_graph_context_new(struct nouveau_channel *, int);
163void nvc0_graph_context_del(struct nouveau_channel *, int);
164
165#endif 170#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index a3a4ee7c0b2e..c79748a6fa2b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,116 +22,290 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <linux/firmware.h>
26#include <linux/module.h>
27
28#include "drmP.h"
29
30#include "nouveau_drv.h"
31#include <core/mm.h>
32#include <engine/fifo.h>
33
34#include "nvc0.h" 25#include "nvc0.h"
35 26
27/*******************************************************************************
28 * Graphics object classes
29 ******************************************************************************/
30
31static struct nouveau_oclass
32nve0_graph_sclass[] = {
33 { 0x902d, &nouveau_object_ofuncs },
34 { 0xa040, &nouveau_object_ofuncs },
35 { 0xa097, &nouveau_object_ofuncs },
36 { 0xa0c0, &nouveau_object_ofuncs },
37 { 0xa0b5, &nouveau_object_ofuncs },
38 {}
39};
40
41/*******************************************************************************
42 * PGRAPH context
43 ******************************************************************************/
44
45static struct nouveau_oclass
46nve0_graph_cclass = {
47 .handle = NV_ENGCTX(GR, 0xe0),
48 .ofuncs = &(struct nouveau_ofuncs) {
49 .ctor = nvc0_graph_context_ctor,
50 .dtor = nvc0_graph_context_dtor,
51 .init = _nouveau_graph_context_init,
52 .fini = _nouveau_graph_context_fini,
53 .rd32 = _nouveau_graph_context_rd32,
54 .wr32 = _nouveau_graph_context_wr32,
55 },
56};
57
58/*******************************************************************************
59 * PGRAPH engine/subdev functions
60 ******************************************************************************/
61
36static void 62static void
37nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base) 63nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
38{ 64{
39 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base, 65 u32 ustat = nv_rd32(priv, 0x409c18);
40 nv_rd32(dev, base + 0x400)); 66
41 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, 67 if (ustat & 0x00000001)
42 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804), 68 nv_error(priv, "CTXCTRL ucode error\n");
43 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c)); 69 if (ustat & 0x00080000)
44 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, 70 nv_error(priv, "CTXCTRL watchdog timeout\n");
45 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814), 71 if (ustat & ~0x00080001)
46 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c)); 72 nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
73
74 nvc0_graph_ctxctl_debug(priv);
75 nv_wr32(priv, 0x409c20, ustat);
47} 76}
48 77
49static void 78static void
50nve0_graph_ctxctl_debug(struct drm_device *dev) 79nve0_graph_trap_isr(struct nvc0_graph_priv *priv, u64 inst)
51{ 80{
52 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff; 81 u32 trap = nv_rd32(priv, 0x400108);
53 u32 gpc; 82 int rop;
83
84 if (trap & 0x00000001) {
85 u32 stat = nv_rd32(priv, 0x404000);
86 nv_error(priv, "DISPATCH ch 0x%010llx 0x%08x\n", inst, stat);
87 nv_wr32(priv, 0x404000, 0xc0000000);
88 nv_wr32(priv, 0x400108, 0x00000001);
89 trap &= ~0x00000001;
90 }
54 91
55 nve0_graph_ctxctl_debug_unit(dev, 0x409000); 92 if (trap & 0x00000010) {
56 for (gpc = 0; gpc < gpcnr; gpc++) 93 u32 stat = nv_rd32(priv, 0x405840);
57 nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000)); 94 nv_error(priv, "SHADER ch 0x%010llx 0x%08x\n", inst, stat);
95 nv_wr32(priv, 0x405840, 0xc0000000);
96 nv_wr32(priv, 0x400108, 0x00000010);
97 trap &= ~0x00000010;
98 }
99
100 if (trap & 0x02000000) {
101 for (rop = 0; rop < priv->rop_nr; rop++) {
102 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
103 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
104 nv_error(priv, "ROP%d ch 0x%010llx 0x%08x 0x%08x\n",
105 rop, inst, statz, statc);
106 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
107 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
108 }
109 nv_wr32(priv, 0x400108, 0x02000000);
110 trap &= ~0x02000000;
111 }
112
113 if (trap) {
114 nv_error(priv, "TRAP ch 0x%010llx 0x%08x\n", inst, trap);
115 nv_wr32(priv, 0x400108, trap);
116 }
58} 117}
59 118
60static int 119static void
61nve0_graph_object_new(struct nouveau_channel *chan, int engine, 120nve0_graph_intr(struct nouveau_subdev *subdev)
62 u32 handle, u16 class)
63{ 121{
64 return 0; 122 struct nvc0_graph_priv *priv = (void *)subdev;
123 struct nouveau_engine *engine = nv_engine(subdev);
124 struct nouveau_handle *handle = NULL;
125 u64 inst = (u64)(nv_rd32(priv, 0x409b00) & 0x0fffffff) << 12;
126 u32 stat = nv_rd32(priv, 0x400100);
127 u32 addr = nv_rd32(priv, 0x400704);
128 u32 mthd = (addr & 0x00003ffc);
129 u32 subc = (addr & 0x00070000) >> 16;
130 u32 data = nv_rd32(priv, 0x400708);
131 u32 code = nv_rd32(priv, 0x400110);
132 u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
133
134 if (stat & 0x00000010) {
135 handle = nouveau_engctx_lookup_class(engine, inst, class);
136 if (!handle || nv_call(handle->object, mthd, data)) {
137 nv_error(priv, "ILLEGAL_MTHD ch 0x%010llx "
138 "subc %d class 0x%04x mthd 0x%04x "
139 "data 0x%08x\n",
140 inst, subc, class, mthd, data);
141 }
142 nouveau_engctx_handle_put(handle);
143 nv_wr32(priv, 0x400100, 0x00000010);
144 stat &= ~0x00000010;
145 }
146
147 if (stat & 0x00000020) {
148 nv_error(priv, "ILLEGAL_CLASS ch 0x%010llx subc %d "
149 "class 0x%04x mthd 0x%04x data 0x%08x\n",
150 inst, subc, class, mthd, data);
151 nv_wr32(priv, 0x400100, 0x00000020);
152 stat &= ~0x00000020;
153 }
154
155 if (stat & 0x00100000) {
156 nv_error(priv, "DATA_ERROR [");
157 nouveau_enum_print(nv50_data_error_names, code);
158 printk("] ch 0x%010llx subc %d class 0x%04x "
159 "mthd 0x%04x data 0x%08x\n",
160 inst, subc, class, mthd, data);
161 nv_wr32(priv, 0x400100, 0x00100000);
162 stat &= ~0x00100000;
163 }
164
165 if (stat & 0x00200000) {
166 nve0_graph_trap_isr(priv, inst);
167 nv_wr32(priv, 0x400100, 0x00200000);
168 stat &= ~0x00200000;
169 }
170
171 if (stat & 0x00080000) {
172 nve0_graph_ctxctl_isr(priv);
173 nv_wr32(priv, 0x400100, 0x00080000);
174 stat &= ~0x00080000;
175 }
176
177 if (stat) {
178 nv_error(priv, "unknown stat 0x%08x\n", stat);
179 nv_wr32(priv, 0x400100, stat);
180 }
181
182 nv_wr32(priv, 0x400500, 0x00010001);
65} 183}
66 184
67static int 185static int
68nve0_graph_fini(struct drm_device *dev, int engine, bool suspend) 186nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
187 struct nouveau_oclass *oclass, void *data, u32 size,
188 struct nouveau_object **pobject)
69{ 189{
190 struct nvc0_graph_priv *priv;
191 int ret, i;
192
193 ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
194 *pobject = nv_object(priv);
195 if (ret)
196 return ret;
197
198 nv_subdev(priv)->unit = 0x18001000;
199 nv_subdev(priv)->intr = nve0_graph_intr;
200 nv_engine(priv)->cclass = &nve0_graph_cclass;
201 nv_engine(priv)->sclass = nve0_graph_sclass;
202
203 nv_info(priv, "using external firmware\n");
204 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
205 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
206 nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
207 nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
208 return -EINVAL;
209 priv->firmware = true;
210
211 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
212 if (ret)
213 return ret;
214
215 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
216 if (ret)
217 return ret;
218
219 for (i = 0; i < 0x1000; i += 4) {
220 nv_wo32(priv->unk4188b4, i, 0x00000010);
221 nv_wo32(priv->unk4188b8, i, 0x00000010);
222 }
223
224 priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
225 priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
226 for (i = 0; i < priv->gpc_nr; i++) {
227 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
228 priv->tpc_total += priv->tpc_nr[i];
229 }
230
231 switch (nv_device(priv)->chipset) {
232 case 0xe4:
233 if (priv->tpc_total == 8)
234 priv->magic_not_rop_nr = 3;
235 else
236 if (priv->tpc_total == 7)
237 priv->magic_not_rop_nr = 1;
238 break;
239 case 0xe7:
240 priv->magic_not_rop_nr = 1;
241 break;
242 default:
243 break;
244 }
245
70 return 0; 246 return 0;
71} 247}
72 248
73static void 249static void
74nve0_graph_init_obj418880(struct drm_device *dev) 250nve0_graph_init_obj418880(struct nvc0_graph_priv *priv)
75{ 251{
76 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
77 int i; 252 int i;
78 253
79 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000); 254 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
80 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000); 255 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
81 for (i = 0; i < 4; i++) 256 for (i = 0; i < 4; i++)
82 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000); 257 nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
83 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8); 258 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
84 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8); 259 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
85} 260}
86 261
87static void 262static void
88nve0_graph_init_regs(struct drm_device *dev) 263nve0_graph_init_regs(struct nvc0_graph_priv *priv)
89{ 264{
90 nv_wr32(dev, 0x400080, 0x003083c2); 265 nv_wr32(priv, 0x400080, 0x003083c2);
91 nv_wr32(dev, 0x400088, 0x0001ffe7); 266 nv_wr32(priv, 0x400088, 0x0001ffe7);
92 nv_wr32(dev, 0x40008c, 0x00000000); 267 nv_wr32(priv, 0x40008c, 0x00000000);
93 nv_wr32(dev, 0x400090, 0x00000030); 268 nv_wr32(priv, 0x400090, 0x00000030);
94 nv_wr32(dev, 0x40013c, 0x003901f7); 269 nv_wr32(priv, 0x40013c, 0x003901f7);
95 nv_wr32(dev, 0x400140, 0x00000100); 270 nv_wr32(priv, 0x400140, 0x00000100);
96 nv_wr32(dev, 0x400144, 0x00000000); 271 nv_wr32(priv, 0x400144, 0x00000000);
97 nv_wr32(dev, 0x400148, 0x00000110); 272 nv_wr32(priv, 0x400148, 0x00000110);
98 nv_wr32(dev, 0x400138, 0x00000000); 273 nv_wr32(priv, 0x400138, 0x00000000);
99 nv_wr32(dev, 0x400130, 0x00000000); 274 nv_wr32(priv, 0x400130, 0x00000000);
100 nv_wr32(dev, 0x400134, 0x00000000); 275 nv_wr32(priv, 0x400134, 0x00000000);
101 nv_wr32(dev, 0x400124, 0x00000002); 276 nv_wr32(priv, 0x400124, 0x00000002);
102} 277}
103 278
104static void 279static void
105nve0_graph_init_units(struct drm_device *dev) 280nve0_graph_init_units(struct nvc0_graph_priv *priv)
106{ 281{
107 nv_wr32(dev, 0x409ffc, 0x00000000); 282 nv_wr32(priv, 0x409ffc, 0x00000000);
108 nv_wr32(dev, 0x409c14, 0x00003e3e); 283 nv_wr32(priv, 0x409c14, 0x00003e3e);
109 nv_wr32(dev, 0x409c24, 0x000f0000); 284 nv_wr32(priv, 0x409c24, 0x000f0000);
110 285
111 nv_wr32(dev, 0x404000, 0xc0000000); 286 nv_wr32(priv, 0x404000, 0xc0000000);
112 nv_wr32(dev, 0x404600, 0xc0000000); 287 nv_wr32(priv, 0x404600, 0xc0000000);
113 nv_wr32(dev, 0x408030, 0xc0000000); 288 nv_wr32(priv, 0x408030, 0xc0000000);
114 nv_wr32(dev, 0x404490, 0xc0000000); 289 nv_wr32(priv, 0x404490, 0xc0000000);
115 nv_wr32(dev, 0x406018, 0xc0000000); 290 nv_wr32(priv, 0x406018, 0xc0000000);
116 nv_wr32(dev, 0x407020, 0xc0000000); 291 nv_wr32(priv, 0x407020, 0xc0000000);
117 nv_wr32(dev, 0x405840, 0xc0000000); 292 nv_wr32(priv, 0x405840, 0xc0000000);
118 nv_wr32(dev, 0x405844, 0x00ffffff); 293 nv_wr32(priv, 0x405844, 0x00ffffff);
119 294
120 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008); 295 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
121 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000); 296 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
122 297
123} 298}
124 299
125static void 300static void
126nve0_graph_init_gpc_0(struct drm_device *dev) 301nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
127{ 302{
128 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
129 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); 303 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
130 u32 data[TPC_MAX / 8]; 304 u32 data[TPC_MAX / 8];
131 u8 tpcnr[GPC_MAX]; 305 u8 tpcnr[GPC_MAX];
132 int i, gpc, tpc; 306 int i, gpc, tpc;
133 307
134 nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001); 308 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
135 309
136 memset(data, 0x00, sizeof(data)); 310 memset(data, 0x00, sizeof(data));
137 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); 311 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
@@ -144,164 +318,143 @@ nve0_graph_init_gpc_0(struct drm_device *dev)
144 data[i / 8] |= tpc << ((i % 8) * 4); 318 data[i / 8] |= tpc << ((i % 8) * 4);
145 } 319 }
146 320
147 nv_wr32(dev, GPC_BCAST(0x0980), data[0]); 321 nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
148 nv_wr32(dev, GPC_BCAST(0x0984), data[1]); 322 nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
149 nv_wr32(dev, GPC_BCAST(0x0988), data[2]); 323 nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
150 nv_wr32(dev, GPC_BCAST(0x098c), data[3]); 324 nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
151 325
152 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 326 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
153 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | 327 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
154 priv->tpc_nr[gpc]); 328 priv->tpc_nr[gpc]);
155 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total); 329 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
156 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918); 330 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
157 } 331 }
158 332
159 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918); 333 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
160 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800)); 334 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
161} 335}
162 336
163static void 337static void
164nve0_graph_init_gpc_1(struct drm_device *dev) 338nve0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
165{ 339{
166 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
167 int gpc, tpc; 340 int gpc, tpc;
168 341
169 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 342 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
170 nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000); 343 nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
171 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000); 344 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
172 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000); 345 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
173 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000); 346 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
174 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000); 347 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
175 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 348 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
176 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 349 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
177 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 350 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
178 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 351 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
179 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 352 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
180 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 353 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
181 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); 354 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
182 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); 355 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
183 } 356 }
184 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 357 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
185 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 358 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
186 } 359 }
187} 360}
188 361
189static void 362static void
190nve0_graph_init_rop(struct drm_device *dev) 363nve0_graph_init_rop(struct nvc0_graph_priv *priv)
191{ 364{
192 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
193 int rop; 365 int rop;
194 366
195 for (rop = 0; rop < priv->rop_nr; rop++) { 367 for (rop = 0; rop < priv->rop_nr; rop++) {
196 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000); 368 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
197 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000); 369 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
198 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff); 370 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
199 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff); 371 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
200 }
201}
202
203static void
204nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
205 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
206{
207 int i;
208
209 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
210 for (i = 0; i < data->size / 4; i++)
211 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
212
213 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
214 for (i = 0; i < code->size / 4; i++) {
215 if ((i & 0x3f) == 0)
216 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
217 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
218 } 372 }
219} 373}
220 374
221static int 375static int
222nve0_graph_init_ctxctl(struct drm_device *dev) 376nve0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
223{ 377{
224 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
225 u32 r000260; 378 u32 r000260;
226 379
227 /* load fuc microcode */ 380 /* load fuc microcode */
228 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); 381 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
229 nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d); 382 nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d);
230 nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad); 383 nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
231 nv_wr32(dev, 0x000260, r000260); 384 nv_wr32(priv, 0x000260, r000260);
232 385
233 /* start both of them running */ 386 /* start both of them running */
234 nv_wr32(dev, 0x409840, 0xffffffff); 387 nv_wr32(priv, 0x409840, 0xffffffff);
235 nv_wr32(dev, 0x41a10c, 0x00000000); 388 nv_wr32(priv, 0x41a10c, 0x00000000);
236 nv_wr32(dev, 0x40910c, 0x00000000); 389 nv_wr32(priv, 0x40910c, 0x00000000);
237 nv_wr32(dev, 0x41a100, 0x00000002); 390 nv_wr32(priv, 0x41a100, 0x00000002);
238 nv_wr32(dev, 0x409100, 0x00000002); 391 nv_wr32(priv, 0x409100, 0x00000002);
239 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001)) 392 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
240 NV_INFO(dev, "0x409800 wait failed\n"); 393 nv_error(priv, "0x409800 wait failed\n");
241 394
242 nv_wr32(dev, 0x409840, 0xffffffff); 395 nv_wr32(priv, 0x409840, 0xffffffff);
243 nv_wr32(dev, 0x409500, 0x7fffffff); 396 nv_wr32(priv, 0x409500, 0x7fffffff);
244 nv_wr32(dev, 0x409504, 0x00000021); 397 nv_wr32(priv, 0x409504, 0x00000021);
245 398
246 nv_wr32(dev, 0x409840, 0xffffffff); 399 nv_wr32(priv, 0x409840, 0xffffffff);
247 nv_wr32(dev, 0x409500, 0x00000000); 400 nv_wr32(priv, 0x409500, 0x00000000);
248 nv_wr32(dev, 0x409504, 0x00000010); 401 nv_wr32(priv, 0x409504, 0x00000010);
249 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 402 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
250 NV_ERROR(dev, "fuc09 req 0x10 timeout\n"); 403 nv_error(priv, "fuc09 req 0x10 timeout\n");
251 return -EBUSY; 404 return -EBUSY;
252 } 405 }
253 priv->size = nv_rd32(dev, 0x409800); 406 priv->size = nv_rd32(priv, 0x409800);
254 407
255 nv_wr32(dev, 0x409840, 0xffffffff); 408 nv_wr32(priv, 0x409840, 0xffffffff);
256 nv_wr32(dev, 0x409500, 0x00000000); 409 nv_wr32(priv, 0x409500, 0x00000000);
257 nv_wr32(dev, 0x409504, 0x00000016); 410 nv_wr32(priv, 0x409504, 0x00000016);
258 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 411 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
259 NV_ERROR(dev, "fuc09 req 0x16 timeout\n"); 412 nv_error(priv, "fuc09 req 0x16 timeout\n");
260 return -EBUSY; 413 return -EBUSY;
261 } 414 }
262 415
263 nv_wr32(dev, 0x409840, 0xffffffff); 416 nv_wr32(priv, 0x409840, 0xffffffff);
264 nv_wr32(dev, 0x409500, 0x00000000); 417 nv_wr32(priv, 0x409500, 0x00000000);
265 nv_wr32(dev, 0x409504, 0x00000025); 418 nv_wr32(priv, 0x409504, 0x00000025);
266 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 419 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
267 NV_ERROR(dev, "fuc09 req 0x25 timeout\n"); 420 nv_error(priv, "fuc09 req 0x25 timeout\n");
268 return -EBUSY; 421 return -EBUSY;
269 } 422 }
270 423
271 nv_wr32(dev, 0x409800, 0x00000000); 424 nv_wr32(priv, 0x409800, 0x00000000);
272 nv_wr32(dev, 0x409500, 0x00000001); 425 nv_wr32(priv, 0x409500, 0x00000001);
273 nv_wr32(dev, 0x409504, 0x00000030); 426 nv_wr32(priv, 0x409504, 0x00000030);
274 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 427 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
275 NV_ERROR(dev, "fuc09 req 0x30 timeout\n"); 428 nv_error(priv, "fuc09 req 0x30 timeout\n");
276 return -EBUSY; 429 return -EBUSY;
277 } 430 }
278 431
279 nv_wr32(dev, 0x409810, 0xb00095c8); 432 nv_wr32(priv, 0x409810, 0xb00095c8);
280 nv_wr32(dev, 0x409800, 0x00000000); 433 nv_wr32(priv, 0x409800, 0x00000000);
281 nv_wr32(dev, 0x409500, 0x00000001); 434 nv_wr32(priv, 0x409500, 0x00000001);
282 nv_wr32(dev, 0x409504, 0x00000031); 435 nv_wr32(priv, 0x409504, 0x00000031);
283 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 436 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
284 NV_ERROR(dev, "fuc09 req 0x31 timeout\n"); 437 nv_error(priv, "fuc09 req 0x31 timeout\n");
285 return -EBUSY; 438 return -EBUSY;
286 } 439 }
287 440
288 nv_wr32(dev, 0x409810, 0x00080420); 441 nv_wr32(priv, 0x409810, 0x00080420);
289 nv_wr32(dev, 0x409800, 0x00000000); 442 nv_wr32(priv, 0x409800, 0x00000000);
290 nv_wr32(dev, 0x409500, 0x00000001); 443 nv_wr32(priv, 0x409500, 0x00000001);
291 nv_wr32(dev, 0x409504, 0x00000032); 444 nv_wr32(priv, 0x409504, 0x00000032);
292 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { 445 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
293 NV_ERROR(dev, "fuc09 req 0x32 timeout\n"); 446 nv_error(priv, "fuc09 req 0x32 timeout\n");
294 return -EBUSY; 447 return -EBUSY;
295 } 448 }
296 449
297 nv_wr32(dev, 0x409614, 0x00000070); 450 nv_wr32(priv, 0x409614, 0x00000070);
298 nv_wr32(dev, 0x409614, 0x00000770); 451 nv_wr32(priv, 0x409614, 0x00000770);
299 nv_wr32(dev, 0x40802c, 0x00000001); 452 nv_wr32(priv, 0x40802c, 0x00000001);
300 453
301 if (priv->data == NULL) { 454 if (priv->data == NULL) {
302 int ret = nve0_grctx_generate(dev); 455 int ret = nve0_grctx_generate(priv);
303 if (ret) { 456 if (ret) {
304 NV_ERROR(dev, "PGRAPH: failed to construct context\n"); 457 nv_error(priv, "failed to construct context\n");
305 return ret; 458 return ret;
306 } 459 }
307 460
@@ -312,325 +465,53 @@ nve0_graph_init_ctxctl(struct drm_device *dev)
312} 465}
313 466
314static int 467static int
315nve0_graph_init(struct drm_device *dev, int engine) 468nve0_graph_init(struct nouveau_object *object)
316{ 469{
470 struct nvc0_graph_priv *priv = (void *)object;
317 int ret; 471 int ret;
318 472
319reset: 473reset:
320 nv_mask(dev, 0x000200, 0x18001000, 0x00000000); 474 ret = nouveau_graph_init(&priv->base);
321 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
322
323 nve0_graph_init_obj418880(dev);
324 nve0_graph_init_regs(dev);
325 nve0_graph_init_gpc_0(dev);
326
327 nv_wr32(dev, 0x400500, 0x00010001);
328 nv_wr32(dev, 0x400100, 0xffffffff);
329 nv_wr32(dev, 0x40013c, 0xffffffff);
330
331 nve0_graph_init_units(dev);
332 nve0_graph_init_gpc_1(dev);
333 nve0_graph_init_rop(dev);
334
335 nv_wr32(dev, 0x400108, 0xffffffff);
336 nv_wr32(dev, 0x400138, 0xffffffff);
337 nv_wr32(dev, 0x400118, 0xffffffff);
338 nv_wr32(dev, 0x400130, 0xffffffff);
339 nv_wr32(dev, 0x40011c, 0xffffffff);
340 nv_wr32(dev, 0x400134, 0xffffffff);
341 nv_wr32(dev, 0x400054, 0x34ce3464);
342
343 ret = nve0_graph_init_ctxctl(dev);
344 if (ret) {
345 if (ret == 1)
346 goto reset;
347 return ret;
348 }
349
350 return 0;
351}
352
353int
354nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
355{
356 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
357 struct drm_nouveau_private *dev_priv = dev->dev_private;
358 struct nouveau_channel *chan;
359 unsigned long flags;
360 int i;
361
362 spin_lock_irqsave(&dev_priv->channels.lock, flags);
363 for (i = 0; i < pfifo->channels; i++) {
364 chan = dev_priv->channels.ptr[i];
365 if (!chan || !chan->ramin)
366 continue;
367
368 if (inst == chan->ramin->addr)
369 break;
370 }
371 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
372 return i;
373}
374
375static void
376nve0_graph_ctxctl_isr(struct drm_device *dev)
377{
378 u32 ustat = nv_rd32(dev, 0x409c18);
379
380 if (ustat & 0x00000001)
381 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
382 if (ustat & 0x00080000)
383 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
384 if (ustat & ~0x00080001)
385 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
386
387 nve0_graph_ctxctl_debug(dev);
388 nv_wr32(dev, 0x409c20, ustat);
389}
390
391static void
392nve0_graph_trap_isr(struct drm_device *dev, int chid)
393{
394 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
395 u32 trap = nv_rd32(dev, 0x400108);
396 int rop;
397
398 if (trap & 0x00000001) {
399 u32 stat = nv_rd32(dev, 0x404000);
400 NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
401 nv_wr32(dev, 0x404000, 0xc0000000);
402 nv_wr32(dev, 0x400108, 0x00000001);
403 trap &= ~0x00000001;
404 }
405
406 if (trap & 0x00000010) {
407 u32 stat = nv_rd32(dev, 0x405840);
408 NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
409 nv_wr32(dev, 0x405840, 0xc0000000);
410 nv_wr32(dev, 0x400108, 0x00000010);
411 trap &= ~0x00000010;
412 }
413
414 if (trap & 0x02000000) {
415 for (rop = 0; rop < priv->rop_nr; rop++) {
416 u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
417 u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
418 NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
419 rop, chid, statz, statc);
420 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
421 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
422 }
423 nv_wr32(dev, 0x400108, 0x02000000);
424 trap &= ~0x02000000;
425 }
426
427 if (trap) {
428 NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
429 nv_wr32(dev, 0x400108, trap);
430 }
431}
432
433static void
434nve0_graph_isr(struct drm_device *dev)
435{
436 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
437 u32 chid = nve0_graph_isr_chid(dev, inst);
438 u32 stat = nv_rd32(dev, 0x400100);
439 u32 addr = nv_rd32(dev, 0x400704);
440 u32 mthd = (addr & 0x00003ffc);
441 u32 subc = (addr & 0x00070000) >> 16;
442 u32 data = nv_rd32(dev, 0x400708);
443 u32 code = nv_rd32(dev, 0x400110);
444 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
445
446 if (stat & 0x00000010) {
447 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
448 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
449 "subc %d class 0x%04x mthd 0x%04x "
450 "data 0x%08x\n",
451 chid, inst, subc, class, mthd, data);
452 }
453 nv_wr32(dev, 0x400100, 0x00000010);
454 stat &= ~0x00000010;
455 }
456
457 if (stat & 0x00000020) {
458 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
459 "class 0x%04x mthd 0x%04x data 0x%08x\n",
460 chid, inst, subc, class, mthd, data);
461 nv_wr32(dev, 0x400100, 0x00000020);
462 stat &= ~0x00000020;
463 }
464
465 if (stat & 0x00100000) {
466 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
467 nouveau_enum_print(nv50_data_error_names, code);
468 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
469 "mthd 0x%04x data 0x%08x\n",
470 chid, inst, subc, class, mthd, data);
471 nv_wr32(dev, 0x400100, 0x00100000);
472 stat &= ~0x00100000;
473 }
474
475 if (stat & 0x00200000) {
476 nve0_graph_trap_isr(dev, chid);
477 nv_wr32(dev, 0x400100, 0x00200000);
478 stat &= ~0x00200000;
479 }
480
481 if (stat & 0x00080000) {
482 nve0_graph_ctxctl_isr(dev);
483 nv_wr32(dev, 0x400100, 0x00080000);
484 stat &= ~0x00080000;
485 }
486
487 if (stat) {
488 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
489 nv_wr32(dev, 0x400100, stat);
490 }
491
492 nv_wr32(dev, 0x400500, 0x00010001);
493}
494
495static int
496nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
497 struct nvc0_graph_fuc *fuc)
498{
499 struct drm_nouveau_private *dev_priv = dev->dev_private;
500 const struct firmware *fw;
501 char f[32];
502 int ret;
503
504 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
505 ret = request_firmware(&fw, f, &dev->pdev->dev);
506 if (ret) 475 if (ret)
507 return ret; 476 return ret;
508 477
509 fuc->size = fw->size; 478 nve0_graph_init_obj418880(priv);
510 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 479 nve0_graph_init_regs(priv);
511 release_firmware(fw); 480 nve0_graph_init_gpc_0(priv);
512 return (fuc->data != NULL) ? 0 : -ENOMEM;
513}
514
515static void
516nve0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
517{
518 if (fuc->data) {
519 kfree(fuc->data);
520 fuc->data = NULL;
521 }
522}
523
524static void
525nve0_graph_destroy(struct drm_device *dev, int engine)
526{
527 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
528
529 nve0_graph_destroy_fw(&priv->fuc409c);
530 nve0_graph_destroy_fw(&priv->fuc409d);
531 nve0_graph_destroy_fw(&priv->fuc41ac);
532 nve0_graph_destroy_fw(&priv->fuc41ad);
533
534 nouveau_irq_unregister(dev, 12);
535
536 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
537 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
538
539 if (priv->data)
540 kfree(priv->data);
541
542 NVOBJ_ENGINE_DEL(dev, GR);
543 kfree(priv);
544}
545
546int
547nve0_graph_create(struct drm_device *dev)
548{
549 struct drm_nouveau_private *dev_priv = dev->dev_private;
550 struct nvc0_graph_priv *priv;
551 int ret, gpc, i;
552 u32 kepler;
553
554 kepler = nvc0_graph_class(dev);
555 if (!kepler) {
556 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
557 return 0;
558 }
559
560 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
561 if (!priv)
562 return -ENOMEM;
563
564 priv->base.destroy = nve0_graph_destroy;
565 priv->base.init = nve0_graph_init;
566 priv->base.fini = nve0_graph_fini;
567 priv->base.context_new = nvc0_graph_context_new;
568 priv->base.context_del = nvc0_graph_context_del;
569 priv->base.object_new = nve0_graph_object_new;
570
571 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
572 nouveau_irq_register(dev, 12, nve0_graph_isr);
573
574 NV_INFO(dev, "PGRAPH: using external firmware\n");
575 if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
576 nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
577 nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
578 nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
579 ret = 0;
580 goto error;
581 }
582 priv->firmware = true;
583
584 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
585 if (ret)
586 goto error;
587 481
588 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8); 482 nv_wr32(priv, 0x400500, 0x00010001);
589 if (ret) 483 nv_wr32(priv, 0x400100, 0xffffffff);
590 goto error; 484 nv_wr32(priv, 0x40013c, 0xffffffff);
591 485
592 for (i = 0; i < 0x1000; i += 4) { 486 nve0_graph_init_units(priv);
593 nv_wo32(priv->unk4188b4, i, 0x00000010); 487 nve0_graph_init_gpc_1(priv);
594 nv_wo32(priv->unk4188b8, i, 0x00000010); 488 nve0_graph_init_rop(priv);
595 }
596 489
597 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f; 490 nv_wr32(priv, 0x400108, 0xffffffff);
598 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16; 491 nv_wr32(priv, 0x400138, 0xffffffff);
599 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 492 nv_wr32(priv, 0x400118, 0xffffffff);
600 priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608)); 493 nv_wr32(priv, 0x400130, 0xffffffff);
601 priv->tpc_total += priv->tpc_nr[gpc]; 494 nv_wr32(priv, 0x40011c, 0xffffffff);
602 } 495 nv_wr32(priv, 0x400134, 0xffffffff);
603 496 nv_wr32(priv, 0x400054, 0x34ce3464);
604 switch (dev_priv->chipset) {
605 case 0xe4:
606 if (priv->tpc_total == 8)
607 priv->magic_not_rop_nr = 3;
608 else
609 if (priv->tpc_total == 7)
610 priv->magic_not_rop_nr = 1;
611 break;
612 case 0xe7:
613 priv->magic_not_rop_nr = 1;
614 break;
615 default:
616 break;
617 }
618 497
619 if (!priv->magic_not_rop_nr) { 498 ret = nve0_graph_init_ctxctl(priv);
620 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n", 499 if (ret) {
621 priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2], 500 if (ret == 1)
622 priv->tpc_nr[3], priv->rop_nr); 501 goto reset;
623 priv->magic_not_rop_nr = 0x00; 502 return ret;
624 } 503 }
625 504
626 NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
627 NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
628 NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
629 NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
630 NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
631 return 0; 505 return 0;
632
633error:
634 nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
635 return ret;
636} 506}
507
508struct nouveau_oclass
509nve0_graph_oclass = {
510 .handle = NV_ENGINE(GR, 0xe0),
511 .ofuncs = &(struct nouveau_ofuncs) {
512 .ctor = nve0_graph_ctor,
513 .dtor = nvc0_graph_dtor,
514 .init = nve0_graph_init,
515 .fini = _nouveau_graph_fini,
516 },
517};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
new file mode 100644
index 000000000000..9c715a25cecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -0,0 +1,269 @@
1#ifndef __NOUVEAU_GRAPH_REGS_H__
2#define __NOUVEAU_GRAPH_REGS_H__
3
4#define NV04_PGRAPH_DEBUG_0 0x00400080
5#define NV04_PGRAPH_DEBUG_1 0x00400084
6#define NV04_PGRAPH_DEBUG_2 0x00400088
7#define NV04_PGRAPH_DEBUG_3 0x0040008c
8#define NV10_PGRAPH_DEBUG_4 0x00400090
9#define NV03_PGRAPH_INTR 0x00400100
10#define NV03_PGRAPH_NSTATUS 0x00400104
11# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
12# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
13# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
14# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
15# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
16# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
17# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
18# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
19#define NV03_PGRAPH_NSOURCE 0x00400108
20# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
21# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
22# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
23# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
24# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
25# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
26# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
27# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
28# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
29# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
30# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
31# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
32# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
33# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
34# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
35# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
36# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
37# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
38# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
39#define NV03_PGRAPH_INTR_EN 0x00400140
40#define NV40_PGRAPH_INTR_EN 0x0040013C
41# define NV_PGRAPH_INTR_NOTIFY (1<<0)
42# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
43# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
44# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
45# define NV_PGRAPH_INTR_ERROR (1<<20)
46#define NV10_PGRAPH_CTX_CONTROL 0x00400144
47#define NV10_PGRAPH_CTX_USER 0x00400148
48#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i))
49#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
50#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \
51 + 0x4*(i) + 0x20*(j))
52#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
53#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
54#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
55#define NV04_PGRAPH_CTX_CONTROL 0x00400170
56#define NV04_PGRAPH_CTX_USER 0x00400174
57#define NV04_PGRAPH_CTX_CACHE1 0x00400180
58#define NV03_PGRAPH_CTX_CONTROL 0x00400190
59#define NV03_PGRAPH_CTX_USER 0x00400194
60#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
61#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
62#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
63#define NV40_PGRAPH_CTXCTL_0304 0x00400304
64#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
65#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
66#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
67#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
68#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
69#define NV40_PGRAPH_CTXCTL_0310 0x00400310
70#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
71#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
72#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
73#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
74#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
75#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
76#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
77#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
78#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
79#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
80#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
81#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
82#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
83#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
84#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
85#define NV03_PGRAPH_ABS_X_RAM 0x00400400
86#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
87#define NV03_PGRAPH_X_MISC 0x00400500
88#define NV03_PGRAPH_Y_MISC 0x00400504
89#define NV04_PGRAPH_VALID1 0x00400508
90#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
91#define NV04_PGRAPH_MISC24_0 0x00400510
92#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
93#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
94#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
95#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
96#define NV03_PGRAPH_CLIPX_0 0x00400524
97#define NV03_PGRAPH_CLIPX_1 0x00400528
98#define NV03_PGRAPH_CLIPY_0 0x0040052C
99#define NV03_PGRAPH_CLIPY_1 0x00400530
100#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
101#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
102#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
103#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
104#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
105#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
106#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
107#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
108#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
109#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
110#define NV04_PGRAPH_MISC24_1 0x00400570
111#define NV04_PGRAPH_MISC24_2 0x00400574
112#define NV04_PGRAPH_VALID2 0x00400578
113#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
114#define NV04_PGRAPH_PASSTHRU_1 0x00400580
115#define NV04_PGRAPH_PASSTHRU_2 0x00400584
116#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
117#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
118#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
119#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
120#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
121#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
122#define NV04_PGRAPH_FORMAT_0 0x004005A8
123#define NV04_PGRAPH_FORMAT_1 0x004005AC
124#define NV04_PGRAPH_FILTER_0 0x004005B0
125#define NV04_PGRAPH_FILTER_1 0x004005B4
126#define NV03_PGRAPH_MONO_COLOR0 0x00400600
127#define NV04_PGRAPH_ROP3 0x00400604
128#define NV04_PGRAPH_BETA_AND 0x00400608
129#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
130#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
131#define NV04_PGRAPH_FORMATS 0x00400618
132#define NV10_PGRAPH_DEBUG_2 0x00400620
133#define NV04_PGRAPH_BOFFSET0 0x00400640
134#define NV04_PGRAPH_BOFFSET1 0x00400644
135#define NV04_PGRAPH_BOFFSET2 0x00400648
136#define NV04_PGRAPH_BOFFSET3 0x0040064C
137#define NV04_PGRAPH_BOFFSET4 0x00400650
138#define NV04_PGRAPH_BOFFSET5 0x00400654
139#define NV04_PGRAPH_BBASE0 0x00400658
140#define NV04_PGRAPH_BBASE1 0x0040065C
141#define NV04_PGRAPH_BBASE2 0x00400660
142#define NV04_PGRAPH_BBASE3 0x00400664
143#define NV04_PGRAPH_BBASE4 0x00400668
144#define NV04_PGRAPH_BBASE5 0x0040066C
145#define NV04_PGRAPH_BPITCH0 0x00400670
146#define NV04_PGRAPH_BPITCH1 0x00400674
147#define NV04_PGRAPH_BPITCH2 0x00400678
148#define NV04_PGRAPH_BPITCH3 0x0040067C
149#define NV04_PGRAPH_BPITCH4 0x00400680
150#define NV04_PGRAPH_BLIMIT0 0x00400684
151#define NV04_PGRAPH_BLIMIT1 0x00400688
152#define NV04_PGRAPH_BLIMIT2 0x0040068C
153#define NV04_PGRAPH_BLIMIT3 0x00400690
154#define NV04_PGRAPH_BLIMIT4 0x00400694
155#define NV04_PGRAPH_BLIMIT5 0x00400698
156#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
157#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
158#define NV03_PGRAPH_STATUS 0x004006B0
159#define NV04_PGRAPH_STATUS 0x00400700
160# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
161#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
162#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
163#define NV04_PGRAPH_SURFACE 0x0040070C
164#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
165#define NV04_PGRAPH_STATE 0x00400710
166#define NV10_PGRAPH_SURFACE 0x00400710
167#define NV04_PGRAPH_NOTIFY 0x00400714
168#define NV10_PGRAPH_STATE 0x00400714
169#define NV10_PGRAPH_NOTIFY 0x00400718
170
171#define NV04_PGRAPH_FIFO 0x00400720
172
173#define NV04_PGRAPH_BPIXEL 0x00400724
174#define NV10_PGRAPH_RDI_INDEX 0x00400750
175#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
176#define NV10_PGRAPH_RDI_DATA 0x00400754
177#define NV04_PGRAPH_DMA_PITCH 0x00400760
178#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760
179#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
180#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
181#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
182#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768
183#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c
184#define NV10_PGRAPH_DMA_PITCH 0x00400770
185#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
186#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
187#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
188#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
189#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
190#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
191#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
192#define NV04_PGRAPH_PATT_COLOR0 0x00400800
193#define NV04_PGRAPH_PATT_COLOR1 0x00400804
194#define NV04_PGRAPH_PATTERN 0x00400808
195#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
196#define NV04_PGRAPH_CHROMA 0x00400814
197#define NV04_PGRAPH_CONTROL0 0x00400818
198#define NV04_PGRAPH_CONTROL1 0x0040081C
199#define NV04_PGRAPH_CONTROL2 0x00400820
200#define NV04_PGRAPH_BLEND 0x00400824
201#define NV04_PGRAPH_STORED_FMT 0x00400830
202#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
203#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
204#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
205#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
206#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
207#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
208#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
209#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
210#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
211#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
212#define NV04_PGRAPH_U_RAM 0x00400D00
213#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
214#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
215#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
216#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
217#define NV04_PGRAPH_V_RAM 0x00400D40
218#define NV04_PGRAPH_W_RAM 0x00400D80
219#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
220#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
221#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
222#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
223#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
224#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
225#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
226#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
227#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
228#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
229#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
230#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
231#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
232#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
233#define NV10_PGRAPH_XFMODE0 0x00400F40
234#define NV10_PGRAPH_XFMODE1 0x00400F44
235#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
236#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
237#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
238#define NV10_PGRAPH_PIPE_DATA 0x00400F54
239#define NV04_PGRAPH_DMA_START_0 0x00401000
240#define NV04_PGRAPH_DMA_START_1 0x00401004
241#define NV04_PGRAPH_DMA_LENGTH 0x00401008
242#define NV04_PGRAPH_DMA_MISC 0x0040100C
243#define NV04_PGRAPH_DMA_DATA_0 0x00401020
244#define NV04_PGRAPH_DMA_DATA_1 0x00401024
245#define NV04_PGRAPH_DMA_RM 0x00401030
246#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
247#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
248#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
249#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
250#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
251#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
252#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
253#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
254#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
255#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
256#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
257#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
258#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
259#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
260#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
261#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
262#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
263#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
264#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
265#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
266#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
267#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
268
269#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index a0258c766850..7a1bc7641b58 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,159 +22,62 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27#include <engine/fifo.h> 27#include <core/engctx.h>
28#include <core/ramht.h> 28#include <core/handle.h>
29#include <core/engine/graph/nv40.h>
29 30
30struct nv31_mpeg_engine { 31#include <subdev/fb.h>
31 struct nouveau_exec_engine base; 32#include <subdev/timer.h>
32 atomic_t refcount; 33#include <subdev/instmem.h>
33};
34
35
36static int
37nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
38{
39 struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
40
41 if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
42 return -EBUSY;
43
44 chan->engctx[engine] = (void *)0xdeadcafe;
45 return 0;
46}
47
48static void
49nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
50{
51 struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
52 atomic_dec(&pmpeg->refcount);
53 chan->engctx[engine] = NULL;
54}
55 34
56static int 35#include <engine/mpeg.h>
57nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
58{
59 struct drm_device *dev = chan->dev;
60 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_gpuobj *ctx = NULL;
62 unsigned long flags;
63 int ret;
64
65 NV_DEBUG(dev, "ch%d\n", chan->id);
66 36
67 ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC | 37struct nv31_mpeg_priv {
68 NVOBJ_FLAG_ZERO_FREE, &ctx); 38 struct nouveau_mpeg base;
69 if (ret) 39 atomic_t refcount;
70 return ret; 40};
71
72 nv_wo32(ctx, 0x78, 0x02001ec1);
73
74 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
75 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
76 if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
77 nv_wr32(dev, 0x00330c, ctx->addr >> 4);
78 nv_wo32(chan->ramfc, 0x54, ctx->addr >> 4);
79 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
81 41
82 chan->engctx[engine] = ctx; 42struct nv31_mpeg_chan {
83 return 0; 43 struct nouveau_object base;
84} 44};
85 45
86static void 46/*******************************************************************************
87nv40_mpeg_context_del(struct nouveau_channel *chan, int engine) 47 * MPEG object classes
88{ 48 ******************************************************************************/
89 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
90 struct nouveau_gpuobj *ctx = chan->engctx[engine];
91 struct drm_device *dev = chan->dev;
92 unsigned long flags;
93 u32 inst = 0x80000000 | (ctx->addr >> 4);
94
95 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
97 if (nv_rd32(dev, 0x00b318) == inst)
98 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
99 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
100 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
101
102 nouveau_gpuobj_ref(NULL, &ctx);
103 chan->engctx[engine] = NULL;
104}
105 49
106static int 50static int
107nv31_mpeg_object_new(struct nouveau_channel *chan, int engine, 51nv31_mpeg_object_ctor(struct nouveau_object *parent,
108 u32 handle, u16 class) 52 struct nouveau_object *engine,
53 struct nouveau_oclass *oclass, void *data, u32 size,
54 struct nouveau_object **pobject)
109{ 55{
110 struct drm_device *dev = chan->dev; 56 struct nouveau_gpuobj *obj;
111 struct nouveau_gpuobj *obj = NULL;
112 int ret; 57 int ret;
113 58
114 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC | 59 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
115 NVOBJ_FLAG_ZERO_FREE, &obj); 60 20, 16, 0, &obj);
61 *pobject = nv_object(obj);
116 if (ret) 62 if (ret)
117 return ret; 63 return ret;
118 obj->engine = 2;
119 obj->class = class;
120
121 nv_wo32(obj, 0x00, class);
122
123 ret = nouveau_ramht_insert(chan, handle, obj);
124 nouveau_gpuobj_ref(NULL, &obj);
125 return ret;
126}
127
128static int
129nv31_mpeg_init(struct drm_device *dev, int engine)
130{
131 struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
132 int i;
133
134 /* VPE init */
135 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
136 nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
137 nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
138 nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
139 64
140 for (i = 0; i < nvfb_tile_nr(dev); i++) 65 nv_wo32(obj, 0x00, nv_mclass(obj));
141 pmpeg->base.set_tile_region(dev, i); 66 nv_wo32(obj, 0x04, 0x00000000);
142 67 nv_wo32(obj, 0x08, 0x00000000);
143 /* PMPEG init */ 68 nv_wo32(obj, 0x0c, 0x00000000);
144 nv_wr32(dev, 0x00b32c, 0x00000000);
145 nv_wr32(dev, 0x00b314, 0x00000100);
146 nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
147 nv_wr32(dev, 0x00b300, 0x02001ec1);
148 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
149
150 nv_wr32(dev, 0x00b100, 0xffffffff);
151 nv_wr32(dev, 0x00b140, 0xffffffff);
152
153 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
154 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
155 return -EBUSY;
156 }
157
158 return 0;
159}
160
161static int
162nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
163{
164 /*XXX: context save? */
165 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
166 nv_wr32(dev, 0x00b140, 0x00000000);
167 return 0; 69 return 0;
168} 70}
169 71
170static int 72static int
171nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) 73nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
172{ 74{
173 struct drm_device *dev = chan->dev; 75 struct nouveau_instmem *imem = nouveau_instmem(object);
174 u32 inst = data << 4; 76 struct nv31_mpeg_priv *priv = (void *)object->engine;
175 u32 dma0 = nv_ri32(dev, inst + 0); 77 u32 inst = *(u32 *)arg << 4;
176 u32 dma1 = nv_ri32(dev, inst + 4); 78 u32 dma0 = nv_ro32(imem, inst + 0);
177 u32 dma2 = nv_ri32(dev, inst + 8); 79 u32 dma1 = nv_ro32(imem, inst + 4);
80 u32 dma2 = nv_ro32(imem, inst + 8);
178 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20); 81 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
179 u32 size = dma1 + 1; 82 u32 size = dma1 + 1;
180 83
@@ -184,160 +87,215 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
184 87
185 if (mthd == 0x0190) { 88 if (mthd == 0x0190) {
186 /* DMA_CMD */ 89 /* DMA_CMD */
187 nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000)); 90 nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
188 nv_wr32(dev, 0x00b334, base); 91 nv_wr32(priv, 0x00b334, base);
189 nv_wr32(dev, 0x00b324, size); 92 nv_wr32(priv, 0x00b324, size);
190 } else 93 } else
191 if (mthd == 0x01a0) { 94 if (mthd == 0x01a0) {
192 /* DMA_DATA */ 95 /* DMA_DATA */
193 nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2); 96 nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
194 nv_wr32(dev, 0x00b360, base); 97 nv_wr32(priv, 0x00b360, base);
195 nv_wr32(dev, 0x00b364, size); 98 nv_wr32(priv, 0x00b364, size);
196 } else { 99 } else {
197 /* DMA_IMAGE, VRAM only */ 100 /* DMA_IMAGE, VRAM only */
198 if (dma0 & 0x000c0000) 101 if (dma0 & 0x000c0000)
199 return -EINVAL; 102 return -EINVAL;
200 103
201 nv_wr32(dev, 0x00b370, base); 104 nv_wr32(priv, 0x00b370, base);
202 nv_wr32(dev, 0x00b374, size); 105 nv_wr32(priv, 0x00b374, size);
203 } 106 }
204 107
205 return 0; 108 return 0;
206} 109}
207 110
111struct nouveau_ofuncs
112nv31_mpeg_ofuncs = {
113 .ctor = nv31_mpeg_object_ctor,
114 .dtor = _nouveau_gpuobj_dtor,
115 .init = _nouveau_gpuobj_init,
116 .fini = _nouveau_gpuobj_fini,
117 .rd32 = _nouveau_gpuobj_rd32,
118 .wr32 = _nouveau_gpuobj_wr32,
119};
120
121struct nouveau_omthds
122nv31_mpeg_omthds[] = {
123 { 0x0190, nv31_mpeg_mthd_dma },
124 { 0x01a0, nv31_mpeg_mthd_dma },
125 { 0x01b0, nv31_mpeg_mthd_dma },
126 {}
127};
128
129struct nouveau_oclass
130nv31_mpeg_sclass[] = {
131 { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
132 {}
133};
134
135/*******************************************************************************
136 * PMPEG context
137 ******************************************************************************/
138
208static int 139static int
209nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst) 140nv31_mpeg_context_ctor(struct nouveau_object *parent,
141 struct nouveau_object *engine,
142 struct nouveau_oclass *oclass, void *data, u32 size,
143 struct nouveau_object **pobject)
210{ 144{
211 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 145 struct nv31_mpeg_priv *priv = (void *)engine;
212 struct drm_nouveau_private *dev_priv = dev->dev_private; 146 struct nv31_mpeg_chan *chan;
213 struct nouveau_gpuobj *ctx; 147 int ret;
214 unsigned long flags; 148
215 int i; 149 if (!atomic_add_unless(&priv->refcount, 1, 1))
216 150 return -EBUSY;
217 /* hardcode drm channel id on nv3x, so swmthd lookup works */ 151
218 if (dev_priv->card_type < NV_40) 152 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
219 return 0; 153 *pobject = nv_object(chan);
220 154 if (ret)
221 spin_lock_irqsave(&dev_priv->channels.lock, flags); 155 return ret;
222 for (i = 0; i < pfifo->channels; i++) { 156
223 if (!dev_priv->channels.ptr[i]) 157 return 0;
224 continue;
225
226 ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
227 if (ctx && ctx->addr == inst)
228 break;
229 }
230 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
231 return i;
232} 158}
233 159
234static void 160static void
235nv31_vpe_set_tile_region(struct drm_device *dev, int i) 161nv31_mpeg_context_dtor(struct nouveau_object *object)
236{ 162{
237 struct nouveau_fb_tile *tile = nvfb_tile(dev, i); 163 struct nv31_mpeg_priv *priv = (void *)object->engine;
238 nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch); 164 struct nv31_mpeg_chan *chan = (void *)object;
239 nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit); 165 atomic_dec(&priv->refcount);
240 nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr); 166 nouveau_object_destroy(&chan->base);
241} 167}
242 168
243static void 169static struct nouveau_oclass
244nv31_mpeg_isr(struct drm_device *dev) 170nv31_mpeg_cclass = {
171 .handle = NV_ENGCTX(MPEG, 0x31),
172 .ofuncs = &(struct nouveau_ofuncs) {
173 .ctor = nv31_mpeg_context_ctor,
174 .dtor = nv31_mpeg_context_dtor,
175 .init = nouveau_object_init,
176 .fini = nouveau_object_fini,
177 },
178};
179
180/*******************************************************************************
181 * PMPEG engine/subdev functions
182 ******************************************************************************/
183
184void
185nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
245{ 186{
246 u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4; 187 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
247 u32 chid = nv31_mpeg_isr_chid(dev, inst); 188 struct nv31_mpeg_priv *priv = (void *)engine;
248 u32 stat = nv_rd32(dev, 0x00b100); 189
249 u32 type = nv_rd32(dev, 0x00b230); 190 nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
250 u32 mthd = nv_rd32(dev, 0x00b234); 191 nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
251 u32 data = nv_rd32(dev, 0x00b238); 192 nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
193}
194
195void
196nv31_mpeg_intr(struct nouveau_subdev *subdev)
197{
198 struct nv31_mpeg_priv *priv = (void *)subdev;
199 struct nouveau_engine *engine = nv_engine(subdev);
200 struct nouveau_handle *handle = NULL;
201 u32 inst = (nv_rd32(priv, 0x00b318) & 0x000fffff) << 4;
202 u32 stat = nv_rd32(priv, 0x00b100);
203 u32 type = nv_rd32(priv, 0x00b230);
204 u32 mthd = nv_rd32(priv, 0x00b234);
205 u32 data = nv_rd32(priv, 0x00b238);
252 u32 show = stat; 206 u32 show = stat;
253 207
254 if (stat & 0x01000000) { 208 if (stat & 0x01000000) {
255 /* happens on initial binding of the object */ 209 /* happens on initial binding of the object */
256 if (type == 0x00000020 && mthd == 0x0000) { 210 if (handle && type == 0x00000020 && mthd == 0x0000) {
257 nv_mask(dev, 0x00b308, 0x00000000, 0x00000000); 211 nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
258 show &= ~0x01000000; 212 show &= ~0x01000000;
259 } 213 }
260 214
261 if (type == 0x00000010) { 215 if (handle && type == 0x00000010) {
262 if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data)) 216 handle = nouveau_engctx_lookup_class(engine, inst, 0x3174);
217
218 if (handle && !nv_call(handle->object, mthd, data)) {
219 nouveau_engctx_handle_put(handle);
263 show &= ~0x01000000; 220 show &= ~0x01000000;
221 }
264 } 222 }
265 } 223 }
266 224
267 nv_wr32(dev, 0x00b100, stat); 225 nv_wr32(priv, 0x00b100, stat);
268 nv_wr32(dev, 0x00b230, 0x00000001); 226 nv_wr32(priv, 0x00b230, 0x00000001);
269 227
270 if (show && nouveau_ratelimit()) { 228 if (show) {
271 NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n", 229 nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
272 chid, inst, stat, type, mthd, data); 230 inst, stat, type, mthd, data);
273 } 231 }
274} 232}
275 233
276static void 234static int
277nv31_vpe_isr(struct drm_device *dev) 235nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
236 struct nouveau_oclass *oclass, void *data, u32 size,
237 struct nouveau_object **pobject)
278{ 238{
279 if (nv_rd32(dev, 0x00b100)) 239 struct nv31_mpeg_priv *priv;
280 nv31_mpeg_isr(dev); 240 int ret;
281 241
282 if (nv_rd32(dev, 0x00b800)) { 242 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
283 u32 stat = nv_rd32(dev, 0x00b800); 243 *pobject = nv_object(priv);
284 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat); 244 if (ret)
285 nv_wr32(dev, 0xb800, stat); 245 return ret;
286 } 246
247 nv_subdev(priv)->unit = 0x00000002;
248 nv_subdev(priv)->intr = nv31_mpeg_intr;
249 nv_engine(priv)->cclass = &nv31_mpeg_cclass;
250 nv_engine(priv)->sclass = nv31_mpeg_sclass;
251 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
252 return 0;
287} 253}
288 254
289static void 255int
290nv31_mpeg_destroy(struct drm_device *dev, int engine) 256nv31_mpeg_init(struct nouveau_object *object)
291{ 257{
292 struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine); 258 struct nouveau_engine *engine = nv_engine(object->engine);
259 struct nv31_mpeg_priv *priv = (void *)engine;
260 struct nouveau_fb *pfb = nouveau_fb(object);
261 int ret, i;
293 262
294 nouveau_irq_unregister(dev, 0); 263 ret = nouveau_mpeg_init(&priv->base);
264 if (ret)
265 return ret;
295 266
296 NVOBJ_ENGINE_DEL(dev, MPEG); 267 /* VPE init */
297 kfree(pmpeg); 268 nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
298} 269 nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
299 270
300int 271 for (i = 0; i < pfb->tile.regions; i++)
301nv31_mpeg_create(struct drm_device *dev) 272 engine->tile_prog(engine, i);
302{ 273
303 struct drm_nouveau_private *dev_priv = dev->dev_private; 274 /* PMPEG init */
304 struct nv31_mpeg_engine *pmpeg; 275 nv_wr32(priv, 0x00b32c, 0x00000000);
305 276 nv_wr32(priv, 0x00b314, 0x00000100);
306 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); 277 nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
307 if (!pmpeg) 278 nv_wr32(priv, 0x00b300, 0x02001ec1);
308 return -ENOMEM; 279 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
309 atomic_set(&pmpeg->refcount, 0); 280
310 281 nv_wr32(priv, 0x00b100, 0xffffffff);
311 pmpeg->base.destroy = nv31_mpeg_destroy; 282 nv_wr32(priv, 0x00b140, 0xffffffff);
312 pmpeg->base.init = nv31_mpeg_init; 283
313 pmpeg->base.fini = nv31_mpeg_fini; 284 if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
314 if (dev_priv->card_type < NV_40) { 285 nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
315 pmpeg->base.context_new = nv31_mpeg_context_new; 286 return -EBUSY;
316 pmpeg->base.context_del = nv31_mpeg_context_del;
317 } else {
318 pmpeg->base.context_new = nv40_mpeg_context_new;
319 pmpeg->base.context_del = nv40_mpeg_context_del;
320 } 287 }
321 pmpeg->base.object_new = nv31_mpeg_object_new;
322
323 /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
324 * all VPE engines, for this driver's purposes the PMPEG engine
325 * will be treated as the "master" and handle the global VPE
326 * bits too
327 */
328 pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
329 nouveau_irq_register(dev, 0, nv31_vpe_isr);
330
331 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
332 NVOBJ_CLASS(dev, 0x3174, MPEG);
333 NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
334 NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
335 NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
336
337#if 0
338 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
339 NVOBJ_CLASS(dev, 0x4075, ME);
340#endif
341 return 0;
342 288
289 return 0;
343} 290}
291
292struct nouveau_oclass
293nv31_mpeg_oclass = {
294 .handle = NV_ENGINE(MPEG, 0x31),
295 .ofuncs = &(struct nouveau_ofuncs) {
296 .ctor = nv31_mpeg_ctor,
297 .dtor = _nouveau_mpeg_dtor,
298 .init = nv31_mpeg_init,
299 .fini = _nouveau_mpeg_fini,
300 },
301};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
new file mode 100644
index 000000000000..12418574efea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <subdev/fb.h>
30#include <subdev/timer.h>
31#include <subdev/instmem.h>
32
33#include <engine/mpeg.h>
34#include <engine/graph/nv40.h>
35
36struct nv40_mpeg_priv {
37 struct nouveau_mpeg base;
38};
39
40struct nv40_mpeg_chan {
41 struct nouveau_mpeg base;
42};
43
44/*******************************************************************************
45 * PMPEG context
46 ******************************************************************************/
47
48static int
49nv40_mpeg_context_ctor(struct nouveau_object *parent,
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{
54 struct nv40_mpeg_chan *chan;
55 int ret;
56
57 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
58 264 * 4, 16,
59 NVOBJ_FLAG_ZERO_ALLOC, &chan);
60 *pobject = nv_object(chan);
61 if (ret)
62 return ret;
63
64 return 0;
65}
66
67static int
68nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
69{
70
71 struct nv40_mpeg_priv *priv = (void *)object->engine;
72 struct nv40_mpeg_chan *chan = (void *)object;
73 u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
74
75 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
76 if (nv_rd32(priv, 0x00b318) == inst)
77 nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
78 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
79 return 0;
80}
81
82static struct nouveau_oclass
83nv40_mpeg_cclass = {
84 .handle = NV_ENGCTX(MPEG, 0x40),
85 .ofuncs = &(struct nouveau_ofuncs) {
86 .ctor = nv40_mpeg_context_ctor,
87 .dtor = _nouveau_mpeg_context_dtor,
88 .init = _nouveau_mpeg_context_init,
89 .fini = nv40_mpeg_context_fini,
90 .rd32 = _nouveau_mpeg_context_rd32,
91 .wr32 = _nouveau_mpeg_context_wr32,
92 },
93};
94
95/*******************************************************************************
96 * PMPEG engine/subdev functions
97 ******************************************************************************/
98
99static void
100nv40_mpeg_intr(struct nouveau_subdev *subdev)
101{
102 struct nv40_mpeg_priv *priv = (void *)subdev;
103 u32 stat;
104
105 if ((stat = nv_rd32(priv, 0x00b100)))
106 nv31_mpeg_intr(subdev);
107
108 if ((stat = nv_rd32(priv, 0x00b800))) {
109 nv_error(priv, "PMSRCH 0x%08x\n", stat);
110 nv_wr32(priv, 0x00b800, stat);
111 }
112}
113
114static int
115nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
116 struct nouveau_oclass *oclass, void *data, u32 size,
117 struct nouveau_object **pobject)
118{
119 struct nv40_mpeg_priv *priv;
120 int ret;
121
122 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
123 *pobject = nv_object(priv);
124 if (ret)
125 return ret;
126
127 nv_subdev(priv)->unit = 0x00000002;
128 nv_subdev(priv)->intr = nv40_mpeg_intr;
129 nv_engine(priv)->cclass = &nv40_mpeg_cclass;
130 nv_engine(priv)->sclass = nv31_mpeg_sclass;
131 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
132 return 0;
133}
134
135struct nouveau_oclass
136nv40_mpeg_oclass = {
137 .handle = NV_ENGINE(MPEG, 0x40),
138 .ofuncs = &(struct nouveau_ofuncs) {
139 .ctor = nv40_mpeg_ctor,
140 .dtor = _nouveau_mpeg_dtor,
141 .init = nv31_mpeg_init,
142 .fini = _nouveau_mpeg_fini,
143 },
144};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 4e3292ed80c1..8678a9996d57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,218 +22,219 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27#include <core/ramht.h> 27#include <core/engctx.h>
28 28
29struct nv50_mpeg_engine { 29#include <subdev/vm.h>
30 struct nouveau_exec_engine base; 30#include <subdev/bar.h>
31}; 31#include <subdev/timer.h>
32 32
33static inline u32 33#include <engine/mpeg.h>
34CTX_PTR(struct drm_device *dev, u32 offset)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 34
38 if (dev_priv->chipset == 0x50) 35struct nv50_mpeg_priv {
39 offset += 0x0260; 36 struct nouveau_mpeg base;
40 else 37};
41 offset += 0x0060;
42 38
43 return offset; 39struct nv50_mpeg_chan {
44} 40 struct nouveau_mpeg_chan base;
41};
42
43/*******************************************************************************
44 * MPEG object classes
45 ******************************************************************************/
45 46
46static int 47static int
47nv50_mpeg_context_new(struct nouveau_channel *chan, int engine) 48nv50_mpeg_object_ctor(struct nouveau_object *parent,
49 struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
48{ 52{
49 struct drm_device *dev = chan->dev; 53 struct nouveau_gpuobj *obj;
50 struct nouveau_gpuobj *ramin = chan->ramin;
51 struct nouveau_gpuobj *ctx = NULL;
52 int ret; 54 int ret;
53 55
54 NV_DEBUG(dev, "ch%d\n", chan->id); 56 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
55 57 16, 16, 0, &obj);
56 ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC | 58 *pobject = nv_object(obj);
57 NVOBJ_FLAG_ZERO_FREE, &ctx);
58 if (ret) 59 if (ret)
59 return ret; 60 return ret;
60 61
61 nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002); 62 nv_wo32(obj, 0x00, nv_mclass(obj));
62 nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->addr + ctx->size - 1); 63 nv_wo32(obj, 0x04, 0x00000000);
63 nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->addr); 64 nv_wo32(obj, 0x08, 0x00000000);
64 nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0); 65 nv_wo32(obj, 0x0c, 0x00000000);
65 nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
66 nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
67
68 nv_wo32(ctx, 0x70, 0x00801ec1);
69 nv_wo32(ctx, 0x7c, 0x0000037c);
70 nvimem_flush(dev);
71
72 chan->engctx[engine] = ctx;
73 return 0; 66 return 0;
74} 67}
75 68
76static void 69struct nouveau_ofuncs
77nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) 70nv50_mpeg_ofuncs = {
78{ 71 .ctor = nv50_mpeg_object_ctor,
79 struct nouveau_gpuobj *ctx = chan->engctx[engine]; 72 .dtor = _nouveau_gpuobj_dtor,
80 struct drm_device *dev = chan->dev; 73 .init = _nouveau_gpuobj_init,
81 int i; 74 .fini = _nouveau_gpuobj_fini,
75 .rd32 = _nouveau_gpuobj_rd32,
76 .wr32 = _nouveau_gpuobj_wr32,
77};
82 78
83 for (i = 0x00; i <= 0x14; i += 4) 79static struct nouveau_oclass
84 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); 80nv50_mpeg_sclass[] = {
81 { 0x3174, &nv50_mpeg_ofuncs },
82 {}
83};
85 84
86 nouveau_gpuobj_ref(NULL, &ctx); 85/*******************************************************************************
87 chan->engctx[engine] = NULL; 86 * PMPEG context
88} 87 ******************************************************************************/
89 88
90static int 89int
91nv50_mpeg_object_new(struct nouveau_channel *chan, int engine, 90nv50_mpeg_context_ctor(struct nouveau_object *parent,
92 u32 handle, u16 class) 91 struct nouveau_object *engine,
92 struct nouveau_oclass *oclass, void *data, u32 size,
93 struct nouveau_object **pobject)
93{ 94{
94 struct drm_device *dev = chan->dev; 95 struct nouveau_bar *bar = nouveau_bar(parent);
95 struct nouveau_gpuobj *obj = NULL; 96 struct nv50_mpeg_chan *chan;
96 int ret; 97 int ret;
97 98
98 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); 99 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
100 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
101 *pobject = nv_object(chan);
99 if (ret) 102 if (ret)
100 return ret; 103 return ret;
101 obj->engine = 2;
102 obj->class = class;
103
104 nv_wo32(obj, 0x00, class);
105 nv_wo32(obj, 0x04, 0x00000000);
106 nv_wo32(obj, 0x08, 0x00000000);
107 nv_wo32(obj, 0x0c, 0x00000000);
108 nvimem_flush(dev);
109
110 ret = nouveau_ramht_insert(chan, handle, obj);
111 nouveau_gpuobj_ref(NULL, &obj);
112 return ret;
113}
114 104
115static void 105 nv_wo32(chan, 0x0070, 0x00801ec1);
116nv50_mpeg_tlb_flush(struct drm_device *dev, int engine) 106 nv_wo32(chan, 0x007c, 0x0000037c);
117{ 107 bar->flush(bar);
118 nv50_vm_flush_engine(dev, 0x08); 108 return 0;
119} 109}
120 110
121static int 111static struct nouveau_oclass
122nv50_mpeg_init(struct drm_device *dev, int engine) 112nv50_mpeg_cclass = {
123{ 113 .handle = NV_ENGCTX(MPEG, 0x50),
124 nv_wr32(dev, 0x00b32c, 0x00000000); 114 .ofuncs = &(struct nouveau_ofuncs) {
125 nv_wr32(dev, 0x00b314, 0x00000100); 115 .ctor = nv50_mpeg_context_ctor,
126 nv_wr32(dev, 0x00b0e0, 0x0000001a); 116 .dtor = _nouveau_mpeg_context_dtor,
127 117 .init = _nouveau_mpeg_context_init,
128 nv_wr32(dev, 0x00b220, 0x00000044); 118 .fini = _nouveau_mpeg_context_fini,
129 nv_wr32(dev, 0x00b300, 0x00801ec1); 119 .rd32 = _nouveau_mpeg_context_rd32,
130 nv_wr32(dev, 0x00b390, 0x00000000); 120 .wr32 = _nouveau_mpeg_context_wr32,
131 nv_wr32(dev, 0x00b394, 0x00000000); 121 },
132 nv_wr32(dev, 0x00b398, 0x00000000); 122};
133 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
134
135 nv_wr32(dev, 0x00b100, 0xffffffff);
136 nv_wr32(dev, 0x00b140, 0xffffffff);
137
138 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
139 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
140 return -EBUSY;
141 }
142 123
143 return 0; 124/*******************************************************************************
144} 125 * PMPEG engine/subdev functions
126 ******************************************************************************/
145 127
146static int 128int
147nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend) 129nv50_mpeg_tlb_flush(struct nouveau_engine *engine)
148{ 130{
149 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 131 nv50_vm_flush_engine(&engine->base, 0x08);
150 nv_wr32(dev, 0x00b140, 0x00000000);
151 return 0; 132 return 0;
152} 133}
153 134
154static void 135void
155nv50_mpeg_isr(struct drm_device *dev) 136nv50_mpeg_intr(struct nouveau_subdev *subdev)
156{ 137{
157 u32 stat = nv_rd32(dev, 0x00b100); 138 struct nv50_mpeg_priv *priv = (void *)subdev;
158 u32 type = nv_rd32(dev, 0x00b230); 139 u32 stat = nv_rd32(priv, 0x00b100);
159 u32 mthd = nv_rd32(dev, 0x00b234); 140 u32 type = nv_rd32(priv, 0x00b230);
160 u32 data = nv_rd32(dev, 0x00b238); 141 u32 mthd = nv_rd32(priv, 0x00b234);
142 u32 data = nv_rd32(priv, 0x00b238);
161 u32 show = stat; 143 u32 show = stat;
162 144
163 if (stat & 0x01000000) { 145 if (stat & 0x01000000) {
164 /* happens on initial binding of the object */ 146 /* happens on initial binding of the object */
165 if (type == 0x00000020 && mthd == 0x0000) { 147 if (type == 0x00000020 && mthd == 0x0000) {
166 nv_wr32(dev, 0x00b308, 0x00000100); 148 nv_wr32(priv, 0x00b308, 0x00000100);
167 show &= ~0x01000000; 149 show &= ~0x01000000;
168 } 150 }
169 } 151 }
170 152
171 if (show && nouveau_ratelimit()) { 153 if (show) {
172 NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n", 154 nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
173 stat, type, mthd, data); 155 stat, type, mthd, data);
174 } 156 }
175 157
176 nv_wr32(dev, 0x00b100, stat); 158 nv_wr32(priv, 0x00b100, stat);
177 nv_wr32(dev, 0x00b230, 0x00000001); 159 nv_wr32(priv, 0x00b230, 0x00000001);
178 nv50_fb_vm_trap(dev, 1); 160 nv50_fb_trap(nouveau_fb(priv), 1);
179} 161}
180 162
181static void 163static void
182nv50_vpe_isr(struct drm_device *dev) 164nv50_vpe_intr(struct nouveau_subdev *subdev)
183{ 165{
184 if (nv_rd32(dev, 0x00b100)) 166 struct nv50_mpeg_priv *priv = (void *)subdev;
185 nv50_mpeg_isr(dev);
186 167
187 if (nv_rd32(dev, 0x00b800)) { 168 if (nv_rd32(priv, 0x00b100))
188 u32 stat = nv_rd32(dev, 0x00b800); 169 nv50_mpeg_intr(subdev);
189 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat); 170
190 nv_wr32(dev, 0xb800, stat); 171 if (nv_rd32(priv, 0x00b800)) {
172 u32 stat = nv_rd32(priv, 0x00b800);
173 nv_info(priv, "PMSRCH: 0x%08x\n", stat);
174 nv_wr32(priv, 0xb800, stat);
191 } 175 }
192} 176}
193 177
194static void 178static int
195nv50_mpeg_destroy(struct drm_device *dev, int engine) 179nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
180 struct nouveau_oclass *oclass, void *data, u32 size,
181 struct nouveau_object **pobject)
196{ 182{
197 struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine); 183 struct nv50_mpeg_priv *priv;
184 int ret;
198 185
199 nouveau_irq_unregister(dev, 0); 186 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
187 *pobject = nv_object(priv);
188 if (ret)
189 return ret;
200 190
201 NVOBJ_ENGINE_DEL(dev, MPEG); 191 nv_subdev(priv)->unit = 0x00400002;
202 kfree(pmpeg); 192 nv_subdev(priv)->intr = nv50_vpe_intr;
193 nv_engine(priv)->cclass = &nv50_mpeg_cclass;
194 nv_engine(priv)->sclass = nv50_mpeg_sclass;
195 nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
196 return 0;
203} 197}
204 198
205int 199int
206nv50_mpeg_create(struct drm_device *dev) 200nv50_mpeg_init(struct nouveau_object *object)
207{ 201{
208 struct drm_nouveau_private *dev_priv = dev->dev_private; 202 struct nv50_mpeg_priv *priv = (void *)object;
209 struct nv50_mpeg_engine *pmpeg; 203 int ret;
210 204
211 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); 205 ret = nouveau_mpeg_init(&priv->base);
212 if (!pmpeg) 206 if (ret)
213 return -ENOMEM; 207 return ret;
214 208
215 pmpeg->base.destroy = nv50_mpeg_destroy; 209 nv_wr32(priv, 0x00b32c, 0x00000000);
216 pmpeg->base.init = nv50_mpeg_init; 210 nv_wr32(priv, 0x00b314, 0x00000100);
217 pmpeg->base.fini = nv50_mpeg_fini; 211 nv_wr32(priv, 0x00b0e0, 0x0000001a);
218 pmpeg->base.context_new = nv50_mpeg_context_new; 212
219 pmpeg->base.context_del = nv50_mpeg_context_del; 213 nv_wr32(priv, 0x00b220, 0x00000044);
220 pmpeg->base.object_new = nv50_mpeg_object_new; 214 nv_wr32(priv, 0x00b300, 0x00801ec1);
221 pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush; 215 nv_wr32(priv, 0x00b390, 0x00000000);
222 216 nv_wr32(priv, 0x00b394, 0x00000000);
223 if (dev_priv->chipset == 0x50) { 217 nv_wr32(priv, 0x00b398, 0x00000000);
224 nouveau_irq_register(dev, 0, nv50_vpe_isr); 218 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
225 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); 219
226 NVOBJ_CLASS(dev, 0x3174, MPEG); 220 nv_wr32(priv, 0x00b100, 0xffffffff);
227#if 0 221 nv_wr32(priv, 0x00b140, 0xffffffff);
228 NVOBJ_ENGINE_ADD(dev, ME, &pme->base); 222
229 NVOBJ_CLASS(dev, 0x4075, ME); 223 if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
230#endif 224 nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
231 } else { 225 return -EBUSY;
232 nouveau_irq_register(dev, 0, nv50_mpeg_isr);
233 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
234 NVOBJ_CLASS(dev, 0x8274, MPEG);
235 } 226 }
236 227
237 return 0; 228 return 0;
238
239} 229}
230
231struct nouveau_oclass
232nv50_mpeg_oclass = {
233 .handle = NV_ENGINE(MPEG, 0x50),
234 .ofuncs = &(struct nouveau_ofuncs) {
235 .ctor = nv50_mpeg_ctor,
236 .dtor = _nouveau_mpeg_dtor,
237 .init = nv50_mpeg_init,
238 .fini = _nouveau_mpeg_fini,
239 },
240};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
new file mode 100644
index 000000000000..8f805b44d59e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <subdev/vm.h>
30#include <subdev/bar.h>
31#include <subdev/timer.h>
32
33#include <engine/mpeg.h>
34
35struct nv84_mpeg_priv {
36 struct nouveau_mpeg base;
37};
38
39struct nv84_mpeg_chan {
40 struct nouveau_mpeg_chan base;
41};
42
43/*******************************************************************************
44 * MPEG object classes
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nv84_mpeg_sclass[] = {
49 { 0x8274, &nv50_mpeg_ofuncs },
50 {}
51};
52
53/*******************************************************************************
54 * PMPEG context
55 ******************************************************************************/
56
57static struct nouveau_oclass
58nv84_mpeg_cclass = {
59 .handle = NV_ENGCTX(MPEG, 0x84),
60 .ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = nv50_mpeg_context_ctor,
62 .dtor = _nouveau_mpeg_context_dtor,
63 .init = _nouveau_mpeg_context_init,
64 .fini = _nouveau_mpeg_context_fini,
65 .rd32 = _nouveau_mpeg_context_rd32,
66 .wr32 = _nouveau_mpeg_context_wr32,
67 },
68};
69
70/*******************************************************************************
71 * PMPEG engine/subdev functions
72 ******************************************************************************/
73
74static int
75nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
76 struct nouveau_oclass *oclass, void *data, u32 size,
77 struct nouveau_object **pobject)
78{
79 struct nv84_mpeg_priv *priv;
80 int ret;
81
82 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
83 *pobject = nv_object(priv);
84 if (ret)
85 return ret;
86
87 nv_subdev(priv)->unit = 0x00000002;
88 nv_subdev(priv)->intr = nv50_mpeg_intr;
89 nv_engine(priv)->cclass = &nv84_mpeg_cclass;
90 nv_engine(priv)->sclass = nv84_mpeg_sclass;
91 nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
92 return 0;
93}
94
95struct nouveau_oclass
96nv84_mpeg_oclass = {
97 .handle = NV_ENGINE(MPEG, 0x84),
98 .ofuncs = &(struct nouveau_ofuncs) {
99 .ctor = nv84_mpeg_ctor,
100 .dtor = _nouveau_mpeg_dtor,
101 .init = nv50_mpeg_init,
102 .fini = _nouveau_mpeg_fini,
103 },
104};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 384de6deeeea..50e7e0da1981 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,56 +22,154 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27#include "nouveau_util.h" 27#include <core/engctx.h>
28#include <core/ramht.h>
29 28
30struct nv98_ppp_engine { 29#include <engine/ppp.h>
31 struct nouveau_exec_engine base; 30
31struct nv98_ppp_priv {
32 struct nouveau_ppp base;
33};
34
35struct nv98_ppp_chan {
36 struct nouveau_ppp_chan base;
32}; 37};
33 38
39/*******************************************************************************
40 * PPP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv98_ppp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * PPPP context
50 ******************************************************************************/
51
34static int 52static int
35nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend) 53nv98_ppp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
36{ 57{
37 if (!(nv_rd32(dev, 0x000200) & 0x00000002)) 58 struct nv98_ppp_chan *priv;
38 return 0; 59 int ret;
60
61 ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
39 66
40 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
41 return 0; 67 return 0;
42} 68}
43 69
70static void
71nv98_ppp_context_dtor(struct nouveau_object *object)
72{
73 struct nv98_ppp_chan *priv = (void *)object;
74 nouveau_ppp_context_destroy(&priv->base);
75}
76
44static int 77static int
45nv98_ppp_init(struct drm_device *dev, int engine) 78nv98_ppp_context_init(struct nouveau_object *object)
46{ 79{
47 nv_mask(dev, 0x000200, 0x00000002, 0x00000000); 80 struct nv98_ppp_chan *priv = (void *)object;
48 nv_mask(dev, 0x000200, 0x00000002, 0x00000002); 81 int ret;
82
83 ret = nouveau_ppp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
49 return 0; 87 return 0;
50} 88}
51 89
90static int
91nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv98_ppp_chan *priv = (void *)object;
94 return nouveau_ppp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv98_ppp_cclass = {
99 .handle = NV_ENGCTX(PPP, 0x98),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv98_ppp_context_ctor,
102 .dtor = nv98_ppp_context_dtor,
103 .init = nv98_ppp_context_init,
104 .fini = nv98_ppp_context_fini,
105 .rd32 = _nouveau_ppp_context_rd32,
106 .wr32 = _nouveau_ppp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * PPPP engine/subdev functions
112 ******************************************************************************/
113
52static void 114static void
53nv98_ppp_destroy(struct drm_device *dev, int engine) 115nv98_ppp_intr(struct nouveau_subdev *subdev)
54{ 116{
55 struct nv98_ppp_engine *pppp = nv_engine(dev, engine); 117}
56 118
57 NVOBJ_ENGINE_DEL(dev, PPP); 119static int
120nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv98_ppp_priv *priv;
125 int ret;
126
127 ret = nouveau_ppp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
58 131
59 kfree(pppp); 132 nv_subdev(priv)->unit = 0x00400002;
133 nv_subdev(priv)->intr = nv98_ppp_intr;
134 nv_engine(priv)->cclass = &nv98_ppp_cclass;
135 nv_engine(priv)->sclass = nv98_ppp_sclass;
136 return 0;
60} 137}
61 138
62int 139static void
63nv98_ppp_create(struct drm_device *dev) 140nv98_ppp_dtor(struct nouveau_object *object)
64{ 141{
65 struct nv98_ppp_engine *pppp; 142 struct nv98_ppp_priv *priv = (void *)object;
143 nouveau_ppp_destroy(&priv->base);
144}
66 145
67 pppp = kzalloc(sizeof(*pppp), GFP_KERNEL); 146static int
68 if (!pppp) 147nv98_ppp_init(struct nouveau_object *object)
69 return -ENOMEM; 148{
149 struct nv98_ppp_priv *priv = (void *)object;
150 int ret;
70 151
71 pppp->base.destroy = nv98_ppp_destroy; 152 ret = nouveau_ppp_init(&priv->base);
72 pppp->base.init = nv98_ppp_init; 153 if (ret)
73 pppp->base.fini = nv98_ppp_fini; 154 return ret;
74 155
75 NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
76 return 0; 156 return 0;
77} 157}
158
159static int
160nv98_ppp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv98_ppp_priv *priv = (void *)object;
163 return nouveau_ppp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv98_ppp_oclass = {
168 .handle = NV_ENGINE(PPP, 0x98),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv98_ppp_ctor,
171 .dtor = nv98_ppp_dtor,
172 .init = nv98_ppp_init,
173 .fini = nv98_ppp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
new file mode 100644
index 000000000000..f48da7577cc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30#include <engine/fifo.h>
31
32struct nv04_software_priv {
33 struct nouveau_software base;
34};
35
36struct nv04_software_chan {
37 struct nouveau_software_chan base;
38};
39
40/*******************************************************************************
41 * software object classes
42 ******************************************************************************/
43
44static int
45nv04_software_set_ref(struct nouveau_object *object, u32 mthd,
46 void *data, u32 size)
47{
48 struct nouveau_object *channel = (void *)nv_engctx(object->parent);
49 struct nouveau_fifo_chan *fifo = (void *)channel->parent;
50 atomic_set(&fifo->refcnt, *(u32*)data);
51 return 0;
52}
53
54static int
55nv04_software_flip(struct nouveau_object *object, u32 mthd,
56 void *args, u32 size)
57{
58 struct nv04_software_chan *chan = (void *)nv_engctx(object->parent);
59 if (chan->base.flip)
60 return chan->base.flip(chan->base.flip_data);
61 return -EINVAL;
62}
63
64static struct nouveau_omthds
65nv04_software_omthds[] = {
66 { 0x0150, nv04_software_set_ref },
67 { 0x0500, nv04_software_flip },
68 {}
69};
70
71static struct nouveau_oclass
72nv04_software_sclass[] = {
73 { 0x006e, &nouveau_object_ofuncs, nv04_software_omthds },
74 {}
75};
76
77/*******************************************************************************
78 * software context
79 ******************************************************************************/
80
81static int
82nv04_software_context_ctor(struct nouveau_object *parent,
83 struct nouveau_object *engine,
84 struct nouveau_oclass *oclass, void *data, u32 size,
85 struct nouveau_object **pobject)
86{
87 struct nv04_software_chan *chan;
88 int ret;
89
90 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
91 *pobject = nv_object(chan);
92 if (ret)
93 return ret;
94
95 return 0;
96}
97
98static struct nouveau_oclass
99nv04_software_cclass = {
100 .handle = NV_ENGCTX(SW, 0x04),
101 .ofuncs = &(struct nouveau_ofuncs) {
102 .ctor = nv04_software_context_ctor,
103 .dtor = _nouveau_software_context_dtor,
104 .init = _nouveau_software_context_init,
105 .fini = _nouveau_software_context_fini,
106 },
107};
108
109/*******************************************************************************
110 * software engine/subdev functions
111 ******************************************************************************/
112
113static int
114nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
115 struct nouveau_oclass *oclass, void *data, u32 size,
116 struct nouveau_object **pobject)
117{
118 struct nv04_software_priv *priv;
119 int ret;
120
121 ret = nouveau_software_create(parent, engine, oclass, &priv);
122 *pobject = nv_object(priv);
123 if (ret)
124 return ret;
125
126 nv_engine(priv)->cclass = &nv04_software_cclass;
127 nv_engine(priv)->sclass = nv04_software_sclass;
128 return 0;
129}
130
131struct nouveau_oclass
132nv04_software_oclass = {
133 .handle = NV_ENGINE(SW, 0x04),
134 .ofuncs = &(struct nouveau_ofuncs) {
135 .ctor = nv04_software_ctor,
136 .dtor = _nouveau_software_dtor,
137 .init = _nouveau_software_init,
138 .fini = _nouveau_software_fini,
139 },
140};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
new file mode 100644
index 000000000000..46dada53d272
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -0,0 +1,128 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30
31struct nv10_software_priv {
32 struct nouveau_software base;
33};
34
35struct nv10_software_chan {
36 struct nouveau_software_chan base;
37};
38
39/*******************************************************************************
40 * software object classes
41 ******************************************************************************/
42
43static int
44nv10_software_flip(struct nouveau_object *object, u32 mthd,
45 void *args, u32 size)
46{
47 struct nv10_software_chan *chan = (void *)nv_engctx(object->parent);
48 if (chan->base.flip)
49 return chan->base.flip(chan->base.flip_data);
50 return -EINVAL;
51}
52
53static struct nouveau_omthds
54nv10_software_omthds[] = {
55 { 0x0500, nv10_software_flip },
56 {}
57};
58
59static struct nouveau_oclass
60nv10_software_sclass[] = {
61 { 0x016e, &nouveau_object_ofuncs, nv10_software_omthds },
62 {}
63};
64
65/*******************************************************************************
66 * software context
67 ******************************************************************************/
68
69static int
70nv10_software_context_ctor(struct nouveau_object *parent,
71 struct nouveau_object *engine,
72 struct nouveau_oclass *oclass, void *data, u32 size,
73 struct nouveau_object **pobject)
74{
75 struct nv10_software_chan *chan;
76 int ret;
77
78 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
79 *pobject = nv_object(chan);
80 if (ret)
81 return ret;
82
83 return 0;
84}
85
86static struct nouveau_oclass
87nv10_software_cclass = {
88 .handle = NV_ENGCTX(SW, 0x04),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nv10_software_context_ctor,
91 .dtor = _nouveau_software_context_dtor,
92 .init = _nouveau_software_context_init,
93 .fini = _nouveau_software_context_fini,
94 },
95};
96
97/*******************************************************************************
98 * software engine/subdev functions
99 ******************************************************************************/
100
101static int
102nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
103 struct nouveau_oclass *oclass, void *data, u32 size,
104 struct nouveau_object **pobject)
105{
106 struct nv10_software_priv *priv;
107 int ret;
108
109 ret = nouveau_software_create(parent, engine, oclass, &priv);
110 *pobject = nv_object(priv);
111 if (ret)
112 return ret;
113
114 nv_engine(priv)->cclass = &nv10_software_cclass;
115 nv_engine(priv)->sclass = nv10_software_sclass;
116 return 0;
117}
118
119struct nouveau_oclass
120nv10_software_oclass = {
121 .handle = NV_ENGINE(SW, 0x10),
122 .ofuncs = &(struct nouveau_ofuncs) {
123 .ctor = nv10_software_ctor,
124 .dtor = _nouveau_software_dtor,
125 .init = _nouveau_software_init,
126 .fini = _nouveau_software_fini,
127 },
128};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
new file mode 100644
index 000000000000..6b889713480d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/namedb.h>
29#include <core/handle.h>
30#include <core/gpuobj.h>
31
32#include <engine/software.h>
33#include <engine/disp.h>
34
35struct nv50_software_priv {
36 struct nouveau_software base;
37};
38
39struct nv50_software_chan {
40 struct nouveau_software_chan base;
41};
42
43/*******************************************************************************
44 * software object classes
45 ******************************************************************************/
46
47static int
48nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
49 void *args, u32 size)
50{
51 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
52 struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent;
53 struct nouveau_handle *handle;
54 int ret = -EINVAL;
55
56 handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args);
57 if (!handle)
58 return -ENOENT;
59
60 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
61 struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
62 chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
63 ret = 0;
64 }
65 nouveau_namedb_put(handle);
66 return ret;
67}
68
69static int
70nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
71 void *args, u32 size)
72{
73 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
74 chan->base.vblank.offset = *(u32 *)args;
75 return 0;
76}
77
78static int
79nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
80 void *args, u32 size)
81{
82 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
83 chan->base.vblank.value = *(u32 *)args;
84 return 0;
85}
86
87static int
88nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
89 void *args, u32 size)
90{
91 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
92 struct nouveau_disp *disp = nouveau_disp(object);
93 unsigned long flags;
94 u32 crtc = *(u32 *)args;
95
96 if (crtc > 1)
97 return -EINVAL;
98
99 disp->vblank.get(disp->vblank.data, crtc);
100
101 spin_lock_irqsave(&disp->vblank.lock, flags);
102 list_add(&chan->base.vblank.head, &disp->vblank.list);
103 chan->base.vblank.crtc = crtc;
104 spin_unlock_irqrestore(&disp->vblank.lock, flags);
105 return 0;
106}
107
108static int
109nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
110 void *args, u32 size)
111{
112 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
113 if (chan->base.flip)
114 return chan->base.flip(chan->base.flip_data);
115 return -EINVAL;
116}
117
118static struct nouveau_omthds
119nv50_software_omthds[] = {
120 { 0x018c, nv50_software_mthd_dma_vblsem },
121 { 0x0400, nv50_software_mthd_vblsem_offset },
122 { 0x0404, nv50_software_mthd_vblsem_value },
123 { 0x0408, nv50_software_mthd_vblsem_release },
124 { 0x0500, nv50_software_mthd_flip },
125 {}
126};
127
128static struct nouveau_oclass
129nv50_software_sclass[] = {
130 { 0x506e, &nouveau_object_ofuncs, nv50_software_omthds },
131 {}
132};
133
134/*******************************************************************************
135 * software context
136 ******************************************************************************/
137
138static int
139nv50_software_context_ctor(struct nouveau_object *parent,
140 struct nouveau_object *engine,
141 struct nouveau_oclass *oclass, void *data, u32 size,
142 struct nouveau_object **pobject)
143{
144 struct nv50_software_chan *chan;
145 int ret;
146
147 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
148 *pobject = nv_object(chan);
149 if (ret)
150 return ret;
151
152 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
153 return 0;
154}
155
156static struct nouveau_oclass
157nv50_software_cclass = {
158 .handle = NV_ENGCTX(SW, 0x50),
159 .ofuncs = &(struct nouveau_ofuncs) {
160 .ctor = nv50_software_context_ctor,
161 .dtor = _nouveau_software_context_dtor,
162 .init = _nouveau_software_context_init,
163 .fini = _nouveau_software_context_fini,
164 },
165};
166
167/*******************************************************************************
168 * software engine/subdev functions
169 ******************************************************************************/
170
171static int
172nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
173 struct nouveau_oclass *oclass, void *data, u32 size,
174 struct nouveau_object **pobject)
175{
176 struct nv50_software_priv *priv;
177 int ret;
178
179 ret = nouveau_software_create(parent, engine, oclass, &priv);
180 *pobject = nv_object(priv);
181 if (ret)
182 return ret;
183
184 nv_engine(priv)->cclass = &nv50_software_cclass;
185 nv_engine(priv)->sclass = nv50_software_sclass;
186 return 0;
187}
188
189struct nouveau_oclass
190nv50_software_oclass = {
191 .handle = NV_ENGINE(SW, 0x50),
192 .ofuncs = &(struct nouveau_ofuncs) {
193 .ctor = nv50_software_ctor,
194 .dtor = _nouveau_software_dtor,
195 .init = _nouveau_software_init,
196 .fini = _nouveau_software_fini,
197 },
198};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
new file mode 100644
index 000000000000..e3be78f3a5d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -0,0 +1,180 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30#include <engine/disp.h>
31
32struct nvc0_software_priv {
33 struct nouveau_software base;
34};
35
36struct nvc0_software_chan {
37 struct nouveau_software_chan base;
38};
39
40/*******************************************************************************
41 * software object classes
42 ******************************************************************************/
43
44static int
45nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
46 void *args, u32 size)
47{
48 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
49 u64 data = *(u32 *)args;
50 if (mthd == 0x0400) {
51 chan->base.vblank.offset &= 0x00ffffffffULL;
52 chan->base.vblank.offset |= data << 32;
53 } else {
54 chan->base.vblank.offset &= 0xff00000000ULL;
55 chan->base.vblank.offset |= data;
56 }
57 return 0;
58}
59
60static int
61nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
62 void *args, u32 size)
63{
64 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
65 chan->base.vblank.value = *(u32 *)args;
66 return 0;
67}
68
69static int
70nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
71 void *args, u32 size)
72{
73 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
74 struct nouveau_disp *disp = nouveau_disp(object);
75 unsigned long flags;
76 u32 crtc = *(u32 *)args;
77
78 if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
79 return -EINVAL;
80
81 disp->vblank.get(disp->vblank.data, crtc);
82
83 spin_lock_irqsave(&disp->vblank.lock, flags);
84 list_add(&chan->base.vblank.head, &disp->vblank.list);
85 chan->base.vblank.crtc = crtc;
86 spin_unlock_irqrestore(&disp->vblank.lock, flags);
87 return 0;
88}
89
90static int
91nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
92 void *args, u32 size)
93{
94 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
95 if (chan->base.flip)
96 return chan->base.flip(chan->base.flip_data);
97 return -EINVAL;
98}
99
100static struct nouveau_omthds
101nvc0_software_omthds[] = {
102 { 0x0400, nvc0_software_mthd_vblsem_offset },
103 { 0x0404, nvc0_software_mthd_vblsem_offset },
104 { 0x0408, nvc0_software_mthd_vblsem_value },
105 { 0x040c, nvc0_software_mthd_vblsem_release },
106 { 0x0500, nvc0_software_mthd_flip },
107 {}
108};
109
110static struct nouveau_oclass
111nvc0_software_sclass[] = {
112 { 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds },
113 {}
114};
115
116/*******************************************************************************
117 * software context
118 ******************************************************************************/
119
120static int
121nvc0_software_context_ctor(struct nouveau_object *parent,
122 struct nouveau_object *engine,
123 struct nouveau_oclass *oclass, void *data, u32 size,
124 struct nouveau_object **pobject)
125{
126 struct nvc0_software_chan *chan;
127 int ret;
128
129 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
130 *pobject = nv_object(chan);
131 if (ret)
132 return ret;
133
134 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
135 return 0;
136}
137
138static struct nouveau_oclass
139nvc0_software_cclass = {
140 .handle = NV_ENGCTX(SW, 0xc0),
141 .ofuncs = &(struct nouveau_ofuncs) {
142 .ctor = nvc0_software_context_ctor,
143 .dtor = _nouveau_software_context_dtor,
144 .init = _nouveau_software_context_init,
145 .fini = _nouveau_software_context_fini,
146 },
147};
148
149/*******************************************************************************
150 * software engine/subdev functions
151 ******************************************************************************/
152
153static int
154nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
155 struct nouveau_oclass *oclass, void *data, u32 size,
156 struct nouveau_object **pobject)
157{
158 struct nvc0_software_priv *priv;
159 int ret;
160
161 ret = nouveau_software_create(parent, engine, oclass, &priv);
162 *pobject = nv_object(priv);
163 if (ret)
164 return ret;
165
166 nv_engine(priv)->cclass = &nvc0_software_cclass;
167 nv_engine(priv)->sclass = nvc0_software_sclass;
168 return 0;
169}
170
171struct nouveau_oclass
172nvc0_software_oclass = {
173 .handle = NV_ENGINE(SW, 0xc0),
174 .ofuncs = &(struct nouveau_ofuncs) {
175 .ctor = nvc0_software_ctor,
176 .dtor = _nouveau_software_dtor,
177 .init = _nouveau_software_init,
178 .fini = _nouveau_software_fini,
179 },
180};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index 5e164a684aec..dd23c80e5405 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,61 +22,154 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/os.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27#include "nouveau_util.h" 27#include <core/engctx.h>
28#include <core/ramht.h>
29 28
30/*XXX: This stub is currently used on NV98+ also, as soon as this becomes 29#include <engine/vp.h>
31 * more than just an enable/disable stub this needs to be split out to 30
32 * nv98_vp.c... 31struct nv84_vp_priv {
33 */ 32 struct nouveau_vp base;
33};
34 34
35struct nv84_vp_engine { 35struct nv84_vp_chan {
36 struct nouveau_exec_engine base; 36 struct nouveau_vp_chan base;
37}; 37};
38 38
39/*******************************************************************************
40 * VP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv84_vp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * PVP context
50 ******************************************************************************/
51
39static int 52static int
40nv84_vp_fini(struct drm_device *dev, int engine, bool suspend) 53nv84_vp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
41{ 57{
42 if (!(nv_rd32(dev, 0x000200) & 0x00020000)) 58 struct nv84_vp_chan *priv;
43 return 0; 59 int ret;
60
61 ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
44 66
45 nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
46 return 0; 67 return 0;
47} 68}
48 69
70static void
71nv84_vp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_vp_chan *priv = (void *)object;
74 nouveau_vp_context_destroy(&priv->base);
75}
76
49static int 77static int
50nv84_vp_init(struct drm_device *dev, int engine) 78nv84_vp_context_init(struct nouveau_object *object)
51{ 79{
52 nv_mask(dev, 0x000200, 0x00020000, 0x00000000); 80 struct nv84_vp_chan *priv = (void *)object;
53 nv_mask(dev, 0x000200, 0x00020000, 0x00020000); 81 int ret;
82
83 ret = nouveau_vp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
54 return 0; 87 return 0;
55} 88}
56 89
90static int
91nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_vp_chan *priv = (void *)object;
94 return nouveau_vp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv84_vp_cclass = {
99 .handle = NV_ENGCTX(VP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_vp_context_ctor,
102 .dtor = nv84_vp_context_dtor,
103 .init = nv84_vp_context_init,
104 .fini = nv84_vp_context_fini,
105 .rd32 = _nouveau_vp_context_rd32,
106 .wr32 = _nouveau_vp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * PVP engine/subdev functions
112 ******************************************************************************/
113
57static void 114static void
58nv84_vp_destroy(struct drm_device *dev, int engine) 115nv84_vp_intr(struct nouveau_subdev *subdev)
59{ 116{
60 struct nv84_vp_engine *pvp = nv_engine(dev, engine); 117}
61 118
62 NVOBJ_ENGINE_DEL(dev, VP); 119static int
120nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv84_vp_priv *priv;
125 int ret;
126
127 ret = nouveau_vp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
63 131
64 kfree(pvp); 132 nv_subdev(priv)->unit = 0x01020000;
133 nv_subdev(priv)->intr = nv84_vp_intr;
134 nv_engine(priv)->cclass = &nv84_vp_cclass;
135 nv_engine(priv)->sclass = nv84_vp_sclass;
136 return 0;
65} 137}
66 138
67int 139static void
68nv84_vp_create(struct drm_device *dev) 140nv84_vp_dtor(struct nouveau_object *object)
69{ 141{
70 struct nv84_vp_engine *pvp; 142 struct nv84_vp_priv *priv = (void *)object;
143 nouveau_vp_destroy(&priv->base);
144}
71 145
72 pvp = kzalloc(sizeof(*pvp), GFP_KERNEL); 146static int
73 if (!pvp) 147nv84_vp_init(struct nouveau_object *object)
74 return -ENOMEM; 148{
149 struct nv84_vp_priv *priv = (void *)object;
150 int ret;
75 151
76 pvp->base.destroy = nv84_vp_destroy; 152 ret = nouveau_vp_init(&priv->base);
77 pvp->base.init = nv84_vp_init; 153 if (ret)
78 pvp->base.fini = nv84_vp_fini; 154 return ret;
79 155
80 NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
81 return 0; 156 return 0;
82} 157}
158
159static int
160nv84_vp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_vp_priv *priv = (void *)object;
163 return nouveau_vp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv84_vp_oclass = {
168 .handle = NV_ENGINE(VP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_vp_ctor,
171 .dtor = nv84_vp_dtor,
172 .init = nv84_vp_init,
173 .fini = nv84_vp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ramht.h b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
index c82de98fee0e..47e4cacbca37 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
@@ -1,55 +1,23 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_RAMHT_H__ 1#ifndef __NOUVEAU_RAMHT_H__
26#define __NOUVEAU_RAMHT_H__ 2#define __NOUVEAU_RAMHT_H__
27 3
28struct nouveau_ramht_entry { 4#include <core/gpuobj.h>
29 struct list_head head;
30 struct nouveau_channel *channel;
31 struct nouveau_gpuobj *gpuobj;
32 u32 handle;
33};
34 5
35struct nouveau_ramht { 6struct nouveau_ramht {
36 struct drm_device *dev; 7 struct nouveau_gpuobj base;
37 struct kref refcount;
38 spinlock_t lock;
39 struct nouveau_gpuobj *gpuobj;
40 struct list_head entries;
41 int bits; 8 int bits;
42}; 9};
43 10
44extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *, 11int nouveau_ramht_insert(struct nouveau_ramht *, int chid,
45 struct nouveau_ramht **); 12 u32 handle, u32 context);
46extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, 13void nouveau_ramht_remove(struct nouveau_ramht *, int cookie);
47 struct nouveau_channel *unref_channel); 14int nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *,
15 u32 size, u32 align, struct nouveau_ramht **);
48 16
49extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, 17static inline void
50 struct nouveau_gpuobj *); 18nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref)
51extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle); 19{
52extern struct nouveau_gpuobj * 20 nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref);
53nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); 21}
54 22
55#endif 23#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
new file mode 100644
index 000000000000..75d1ed5f85fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_BSP_H__
2#define __NOUVEAU_BSP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_bsp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_bsp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_bsp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_bsp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_bsp_context_init _nouveau_engctx_init
22#define _nouveau_bsp_context_fini _nouveau_engctx_fini
23#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_bsp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_bsp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
32#define nouveau_bsp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_bsp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_bsp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_bsp_dtor _nouveau_engine_dtor
40#define _nouveau_bsp_init _nouveau_engine_init
41#define _nouveau_bsp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_bsp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
new file mode 100644
index 000000000000..23bb9dfeaf67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -0,0 +1,47 @@
1#ifndef __NOUVEAU_COPY_H__
2#define __NOUVEAU_COPY_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_copy_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_copy_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_copy_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_copy_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
21#define _nouveau_copy_context_init _nouveau_engctx_init
22#define _nouveau_copy_context_fini _nouveau_engctx_fini
23#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_copy {
27 struct nouveau_engine base;
28};
29
30#define nouveau_copy_create(p,e,c,y,i,d) \
31 nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
32#define nouveau_copy_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_copy_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_copy_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_copy_dtor _nouveau_engine_dtor
40#define _nouveau_copy_init _nouveau_engine_init
41#define _nouveau_copy_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nva3_copy_oclass;
44extern struct nouveau_oclass nvc0_copy0_oclass;
45extern struct nouveau_oclass nvc0_copy1_oclass;
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
new file mode 100644
index 000000000000..e3674743baaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -0,0 +1,46 @@
1#ifndef __NOUVEAU_CRYPT_H__
2#define __NOUVEAU_CRYPT_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_crypt_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_crypt_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_crypt_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_crypt_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
21#define _nouveau_crypt_context_init _nouveau_engctx_init
22#define _nouveau_crypt_context_fini _nouveau_engctx_fini
23#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_crypt {
27 struct nouveau_engine base;
28};
29
30#define nouveau_crypt_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
32#define nouveau_crypt_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_crypt_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_crypt_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_crypt_dtor _nouveau_engine_dtor
40#define _nouveau_crypt_init _nouveau_engine_init
41#define _nouveau_crypt_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_crypt_oclass;
44extern struct nouveau_oclass nv98_crypt_oclass;
45
46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
new file mode 100644
index 000000000000..38ec1252cbaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -0,0 +1,44 @@
1#ifndef __NOUVEAU_DISP_H__
2#define __NOUVEAU_DISP_H__
3
4#include <core/object.h>
5#include <core/engine.h>
6#include <core/device.h>
7
8struct nouveau_disp {
9 struct nouveau_engine base;
10
11 struct {
12 struct list_head list;
13 spinlock_t lock;
14 void (*notify)(void *, int);
15 void (*get)(void *, int);
16 void (*put)(void *, int);
17 void *data;
18 } vblank;
19};
20
21static inline struct nouveau_disp *
22nouveau_disp(void *obj)
23{
24 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
25}
26
27#define nouveau_disp_create(p,e,c,i,x,d) \
28 nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
29#define nouveau_disp_destroy(d) \
30 nouveau_engine_destroy(&(d)->base)
31#define nouveau_disp_init(d) \
32 nouveau_engine_init(&(d)->base)
33#define nouveau_disp_fini(d,s) \
34 nouveau_engine_fini(&(d)->base, (s))
35
36#define _nouveau_disp_dtor _nouveau_engine_dtor
37#define _nouveau_disp_init _nouveau_engine_init
38#define _nouveau_disp_fini _nouveau_engine_fini
39
40extern struct nouveau_oclass nv04_disp_oclass;
41extern struct nouveau_oclass nv50_disp_oclass;
42extern struct nouveau_oclass nvd0_disp_oclass;
43
44#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
new file mode 100644
index 000000000000..700ccbb1941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -0,0 +1,57 @@
1#ifndef __NOUVEAU_DMAOBJ_H__
2#define __NOUVEAU_DMAOBJ_H__
3
4#include <core/object.h>
5#include <core/engine.h>
6
7struct nouveau_gpuobj;
8
9struct nouveau_dmaobj {
10 struct nouveau_object base;
11 u32 target;
12 u32 access;
13 u64 start;
14 u64 limit;
15};
16
17#define nouveau_dmaobj_create(p,e,c,a,s,d) \
18 nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
19#define nouveau_dmaobj_destroy(p) \
20 nouveau_object_destroy(&(p)->base)
21#define nouveau_dmaobj_init(p) \
22 nouveau_object_init(&(p)->base)
23#define nouveau_dmaobj_fini(p,s) \
24 nouveau_object_fini(&(p)->base, (s))
25
26int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
27 struct nouveau_oclass *, void *data, u32 size,
28 int length, void **);
29
30#define _nouveau_dmaobj_dtor nouveau_object_destroy
31#define _nouveau_dmaobj_init nouveau_object_init
32#define _nouveau_dmaobj_fini nouveau_object_fini
33
34struct nouveau_dmaeng {
35 struct nouveau_engine base;
36 int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
37 struct nouveau_dmaobj *, struct nouveau_gpuobj **);
38};
39
40#define nouveau_dmaeng_create(p,e,c,d) \
41 nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
42#define nouveau_dmaeng_destroy(p) \
43 nouveau_engine_destroy(&(p)->base)
44#define nouveau_dmaeng_init(p) \
45 nouveau_engine_init(&(p)->base)
46#define nouveau_dmaeng_fini(p,s) \
47 nouveau_engine_fini(&(p)->base, (s))
48
49#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
50#define _nouveau_dmaeng_init _nouveau_engine_init
51#define _nouveau_dmaeng_fini _nouveau_engine_fini
52
53extern struct nouveau_oclass nv04_dmaeng_oclass;
54extern struct nouveau_oclass nv50_dmaeng_oclass;
55extern struct nouveau_oclass nvc0_dmaeng_oclass;
56
57#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index ce99cab2f257..65ee929a75f0 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -1,32 +1,109 @@
1#ifndef __NOUVEAU_FIFO_H__ 1#ifndef __NOUVEAU_FIFO_H__
2#define __NOUVEAU_FIFO_H__ 2#define __NOUVEAU_FIFO_H__
3 3
4struct nouveau_fifo_priv { 4#include <core/namedb.h>
5 struct nouveau_exec_engine base; 5#include <core/gpuobj.h>
6 u32 channels; 6#include <core/engine.h>
7};
8 7
9struct nouveau_fifo_chan { 8struct nouveau_fifo_chan {
9 struct nouveau_namedb base;
10 struct nouveau_dmaobj *pushdma;
11 struct nouveau_gpuobj *pushgpu;
12 void __iomem *user;
13 u32 size;
14 u16 chid;
15 atomic_t refcnt; /* NV04_NVSW_SET_REF */
16};
17
18static inline struct nouveau_fifo_chan *
19nouveau_fifo_chan(void *obj)
20{
21 return (void *)nv_namedb(obj);
22}
23
24#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
25 nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
26 (m), sizeof(**d), (void **)d)
27#define nouveau_fifo_channel_init(p) \
28 nouveau_namedb_init(&(p)->base)
29#define nouveau_fifo_channel_fini(p,s) \
30 nouveau_namedb_fini(&(p)->base, (s))
31
32int nouveau_fifo_channel_create_(struct nouveau_object *,
33 struct nouveau_object *,
34 struct nouveau_oclass *,
35 int bar, u32 addr, u32 size, u32 push,
36 u32 engmask, int len, void **);
37void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
38
39#define _nouveau_fifo_channel_init _nouveau_namedb_init
40#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
41
42void _nouveau_fifo_channel_dtor(struct nouveau_object *);
43u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
44void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
45
46struct nouveau_fifo_base {
47 struct nouveau_gpuobj base;
10}; 48};
11 49
12bool nv04_fifo_cache_pull(struct drm_device *, bool); 50#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d) \
13void nv04_fifo_context_del(struct nouveau_channel *, int); 51 nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
14int nv04_fifo_fini(struct drm_device *, int, bool); 52#define nouveau_fifo_context_destroy(p) \
15int nv04_fifo_init(struct drm_device *, int); 53 nouveau_gpuobj_destroy(&(p)->base)
16void nv04_fifo_isr(struct drm_device *); 54#define nouveau_fifo_context_init(p) \
17void nv04_fifo_destroy(struct drm_device *, int); 55 nouveau_gpuobj_init(&(p)->base)
18 56#define nouveau_fifo_context_fini(p,s) \
19void nv50_fifo_playlist_update(struct drm_device *); 57 nouveau_gpuobj_fini(&(p)->base, (s))
20void nv50_fifo_destroy(struct drm_device *, int); 58
21void nv50_fifo_tlb_flush(struct drm_device *, int); 59#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor
22 60#define _nouveau_fifo_context_init _nouveau_gpuobj_init
23int nv04_fifo_create(struct drm_device *); 61#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini
24int nv10_fifo_create(struct drm_device *); 62#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32
25int nv17_fifo_create(struct drm_device *); 63#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32
26int nv40_fifo_create(struct drm_device *); 64
27int nv50_fifo_create(struct drm_device *); 65struct nouveau_fifo {
28int nv84_fifo_create(struct drm_device *); 66 struct nouveau_engine base;
29int nvc0_fifo_create(struct drm_device *); 67
30int nve0_fifo_create(struct drm_device *); 68 struct nouveau_object **channel;
69 spinlock_t lock;
70 u16 min;
71 u16 max;
72
73 void (*pause)(struct nouveau_fifo *, unsigned long *);
74 void (*start)(struct nouveau_fifo *, unsigned long *);
75};
76
77static inline struct nouveau_fifo *
78nouveau_fifo(void *obj)
79{
80 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO];
81}
82
83#define nouveau_fifo_create(o,e,c,fc,lc,d) \
84 nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
85#define nouveau_fifo_init(p) \
86 nouveau_engine_init(&(p)->base)
87#define nouveau_fifo_fini(p,s) \
88 nouveau_engine_fini(&(p)->base, (s))
89
90int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
91 struct nouveau_oclass *, int min, int max,
92 int size, void **);
93void nouveau_fifo_destroy(struct nouveau_fifo *);
94
95#define _nouveau_fifo_init _nouveau_engine_init
96#define _nouveau_fifo_fini _nouveau_engine_fini
97
98extern struct nouveau_oclass nv04_fifo_oclass;
99extern struct nouveau_oclass nv10_fifo_oclass;
100extern struct nouveau_oclass nv17_fifo_oclass;
101extern struct nouveau_oclass nv40_fifo_oclass;
102extern struct nouveau_oclass nv50_fifo_oclass;
103extern struct nouveau_oclass nv84_fifo_oclass;
104extern struct nouveau_oclass nvc0_fifo_oclass;
105extern struct nouveau_oclass nve0_fifo_oclass;
106
107void nv04_fifo_intr(struct nouveau_subdev *);
31 108
32#endif 109#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
new file mode 100644
index 000000000000..388cfcff7bd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -0,0 +1,72 @@
1#ifndef __NOUVEAU_GRAPH_H__
2#define __NOUVEAU_GRAPH_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6#include <core/enum.h>
7
8struct nouveau_graph_chan {
9 struct nouveau_engctx base;
10};
11
12#define nouveau_graph_context_create(p,e,c,g,s,a,f,d) \
13 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
14#define nouveau_graph_context_destroy(d) \
15 nouveau_engctx_destroy(&(d)->base)
16#define nouveau_graph_context_init(d) \
17 nouveau_engctx_init(&(d)->base)
18#define nouveau_graph_context_fini(d,s) \
19 nouveau_engctx_fini(&(d)->base, (s))
20
21#define _nouveau_graph_context_dtor _nouveau_engctx_dtor
22#define _nouveau_graph_context_init _nouveau_engctx_init
23#define _nouveau_graph_context_fini _nouveau_engctx_fini
24#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32
25#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32
26
27struct nouveau_graph {
28 struct nouveau_engine base;
29};
30
31static inline struct nouveau_graph *
32nouveau_graph(void *obj)
33{
34 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR];
35}
36
37#define nouveau_graph_create(p,e,c,y,d) \
38 nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
39#define nouveau_graph_destroy(d) \
40 nouveau_engine_destroy(&(d)->base)
41#define nouveau_graph_init(d) \
42 nouveau_engine_init(&(d)->base)
43#define nouveau_graph_fini(d,s) \
44 nouveau_engine_fini(&(d)->base, (s))
45
46#define _nouveau_graph_dtor _nouveau_engine_dtor
47#define _nouveau_graph_init _nouveau_engine_init
48#define _nouveau_graph_fini _nouveau_engine_fini
49
50extern struct nouveau_oclass nv04_graph_oclass;
51extern struct nouveau_oclass nv10_graph_oclass;
52extern struct nouveau_oclass nv20_graph_oclass;
53extern struct nouveau_oclass nv25_graph_oclass;
54extern struct nouveau_oclass nv2a_graph_oclass;
55extern struct nouveau_oclass nv30_graph_oclass;
56extern struct nouveau_oclass nv34_graph_oclass;
57extern struct nouveau_oclass nv35_graph_oclass;
58extern struct nouveau_oclass nv40_graph_oclass;
59extern struct nouveau_oclass nv50_graph_oclass;
60extern struct nouveau_oclass nvc0_graph_oclass;
61extern struct nouveau_oclass nve0_graph_oclass;
62
63extern struct nouveau_bitfield nv04_graph_nsource[];
64extern struct nouveau_ofuncs nv04_graph_ofuncs;
65bool nv04_graph_idle(void *obj);
66
67extern struct nouveau_bitfield nv10_graph_intr_name[];
68extern struct nouveau_bitfield nv10_graph_nstatus[];
69
70extern struct nouveau_enum nv50_data_error_names[];
71
72#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
new file mode 100644
index 000000000000..bbf0d4a5bbd7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -0,0 +1,61 @@
1#ifndef __NOUVEAU_MPEG_H__
2#define __NOUVEAU_MPEG_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_mpeg_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_mpeg_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_mpeg_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_mpeg_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor
21#define _nouveau_mpeg_context_init _nouveau_engctx_init
22#define _nouveau_mpeg_context_fini _nouveau_engctx_fini
23#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_mpeg {
27 struct nouveau_engine base;
28};
29
30#define nouveau_mpeg_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
32#define nouveau_mpeg_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_mpeg_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_mpeg_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_mpeg_dtor _nouveau_engine_dtor
40#define _nouveau_mpeg_init _nouveau_engine_init
41#define _nouveau_mpeg_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv31_mpeg_oclass;
44extern struct nouveau_oclass nv40_mpeg_oclass;
45extern struct nouveau_oclass nv50_mpeg_oclass;
46extern struct nouveau_oclass nv84_mpeg_oclass;
47
48extern struct nouveau_oclass nv31_mpeg_sclass[];
49void nv31_mpeg_intr(struct nouveau_subdev *);
50void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
51int nv31_mpeg_init(struct nouveau_object *);
52
53extern struct nouveau_ofuncs nv50_mpeg_ofuncs;
54int nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *,
55 struct nouveau_oclass *, void *, u32,
56 struct nouveau_object **);
57int nv50_mpeg_tlb_flush(struct nouveau_engine *);
58void nv50_mpeg_intr(struct nouveau_subdev *);
59int nv50_mpeg_init(struct nouveau_object *);
60
61#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
new file mode 100644
index 000000000000..74d554fb3281
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_PPP_H__
2#define __NOUVEAU_PPP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_ppp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_ppp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_ppp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_ppp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_ppp_context_init _nouveau_engctx_init
22#define _nouveau_ppp_context_fini _nouveau_engctx_fini
23#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_ppp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_ppp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
32#define nouveau_ppp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_ppp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_ppp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_ppp_dtor _nouveau_engine_dtor
40#define _nouveau_ppp_init _nouveau_engine_init
41#define _nouveau_ppp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv98_ppp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
new file mode 100644
index 000000000000..8d740793cf8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -0,0 +1,58 @@
1#ifndef __NOUVEAU_SOFTWARE_H__
2#define __NOUVEAU_SOFTWARE_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_software_chan {
8 struct nouveau_engctx base;
9
10 struct {
11 struct list_head head;
12 u32 channel;
13 u32 ctxdma;
14 u64 offset;
15 u32 value;
16 u32 crtc;
17 } vblank;
18
19 int (*flip)(void *);
20 void *flip_data;
21};
22
23#define nouveau_software_context_create(p,e,c,d) \
24 nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
25#define nouveau_software_context_destroy(d) \
26 nouveau_engctx_destroy(&(d)->base)
27#define nouveau_software_context_init(d) \
28 nouveau_engctx_init(&(d)->base)
29#define nouveau_software_context_fini(d,s) \
30 nouveau_engctx_fini(&(d)->base, (s))
31
32#define _nouveau_software_context_dtor _nouveau_engctx_dtor
33#define _nouveau_software_context_init _nouveau_engctx_init
34#define _nouveau_software_context_fini _nouveau_engctx_fini
35
36struct nouveau_software {
37 struct nouveau_engine base;
38};
39
40#define nouveau_software_create(p,e,c,d) \
41 nouveau_engine_create((p), (e), (c), true, "SW", "software", (d))
42#define nouveau_software_destroy(d) \
43 nouveau_engine_destroy(&(d)->base)
44#define nouveau_software_init(d) \
45 nouveau_engine_init(&(d)->base)
46#define nouveau_software_fini(d,s) \
47 nouveau_engine_fini(&(d)->base, (s))
48
49#define _nouveau_software_dtor _nouveau_engine_dtor
50#define _nouveau_software_init _nouveau_engine_init
51#define _nouveau_software_fini _nouveau_engine_fini
52
53extern struct nouveau_oclass nv04_software_oclass;
54extern struct nouveau_oclass nv10_software_oclass;
55extern struct nouveau_oclass nv50_software_oclass;
56extern struct nouveau_oclass nvc0_software_oclass;
57
58#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
new file mode 100644
index 000000000000..05cd08fba377
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_VP_H__
2#define __NOUVEAU_VP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_vp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_vp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_vp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_vp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_vp_context_init _nouveau_engctx_init
22#define _nouveau_vp_context_fini _nouveau_engctx_fini
23#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_vp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_vp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
32#define nouveau_vp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_vp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_vp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_vp_dtor _nouveau_engine_dtor
40#define _nouveau_vp_init _nouveau_engine_init
41#define _nouveau_vp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_vp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/device.h b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
index 5eec03a40f6d..c9e4c4afa50e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
@@ -8,7 +8,6 @@
8 8
9int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname, 9int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname,
10 const char *cfg, const char *dbg, int, void **); 10 const char *cfg, const char *dbg, int, void **);
11void nouveau_device_destroy(struct nouveau_device **);
12 11
13int nv04_identify(struct nouveau_device *); 12int nv04_identify(struct nouveau_device *);
14int nv10_identify(struct nouveau_device *); 13int nv10_identify(struct nouveau_device *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index 2adfcafa4478..ec7a54e91a08 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -8,7 +8,6 @@
8struct nouveau_instobj { 8struct nouveau_instobj {
9 struct nouveau_object base; 9 struct nouveau_object base;
10 struct list_head head; 10 struct list_head head;
11 struct nouveau_mm heap;
12 u32 *suspend; 11 u32 *suspend;
13 u64 addr; 12 u64 addr;
14 u32 size; 13 u32 size;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index 81577bb783e8..747781c2371d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -73,6 +73,7 @@ struct nouveau_vm {
73struct nouveau_vmmgr { 73struct nouveau_vmmgr {
74 struct nouveau_subdev base; 74 struct nouveau_subdev base;
75 75
76 u64 limit;
76 u32 pgt_bits; 77 u32 pgt_bits;
77 u8 spg_shift; 78 u8 spg_shift;
78 u8 lpg_shift; 79 u8 lpg_shift;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index 40456b99cb5f..d8d101630e46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -241,6 +241,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
241 if (!device->subdev[i]) { 241 if (!device->subdev[i]) {
242 ret = nouveau_object_ctor(nv_object(device), NULL, 242 ret = nouveau_object_ctor(nv_object(device), NULL,
243 oclass, NULL, i, &subdev); 243 oclass, NULL, i, &subdev);
244 if (ret == -ENODEV)
245 continue;
244 if (ret) 246 if (ret)
245 return ret; 247 return ret;
246 248
@@ -404,10 +406,26 @@ nouveau_device_sclass[] = {
404 {} 406 {}
405}; 407};
406 408
409static void
410nouveau_device_dtor(struct nouveau_object *object)
411{
412 struct nouveau_device *device = (void *)object;
413
414 mutex_lock(&nv_devices_mutex);
415 list_del(&device->head);
416 mutex_unlock(&nv_devices_mutex);
417
418 if (device->base.mmio)
419 iounmap(device->base.mmio);
420
421 nouveau_subdev_destroy(&device->base);
422}
423
407static struct nouveau_oclass 424static struct nouveau_oclass
408nouveau_device_oclass = { 425nouveau_device_oclass = {
409 .handle = NV_SUBDEV(DEVICE, 0x00), 426 .handle = NV_SUBDEV(DEVICE, 0x00),
410 .ofuncs = &(struct nouveau_ofuncs) { 427 .ofuncs = &(struct nouveau_ofuncs) {
428 .dtor = nouveau_device_dtor,
411 }, 429 },
412}; 430};
413 431
@@ -444,18 +462,3 @@ done:
444 mutex_unlock(&nv_devices_mutex); 462 mutex_unlock(&nv_devices_mutex);
445 return ret; 463 return ret;
446} 464}
447
448void
449nouveau_device_destroy(struct nouveau_device **pdevice)
450{
451 struct nouveau_device *device = *pdevice;
452 if (device) {
453 mutex_lock(&nv_devices_mutex);
454 list_del(&device->head);
455 mutex_unlock(&nv_devices_mutex);
456 if (device->base.mmio)
457 iounmap(device->base.mmio);
458 nouveau_subdev_destroy(&device->base);
459 }
460 *pdevice = NULL;
461}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
index 5173c785b061..693d200a3e22 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -33,6 +33,12 @@
33#include <subdev/instmem.h> 33#include <subdev/instmem.h>
34#include <subdev/vm.h> 34#include <subdev/vm.h>
35 35
36#include <engine/dmaobj.h>
37#include <engine/fifo.h>
38#include <engine/software.h>
39#include <engine/graph.h>
40#include <engine/disp.h>
41
36int 42int
37nv04_identify(struct nouveau_device *device) 43nv04_identify(struct nouveau_device *device)
38{ 44{
@@ -47,6 +53,11 @@ nv04_identify(struct nouveau_device *device)
47 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 53 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
48 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 54 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
49 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 55 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
56 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
57 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
58 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
59 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
60 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
50 break; 61 break;
51 case 0x05: 62 case 0x05:
52 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 63 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -58,6 +69,11 @@ nv04_identify(struct nouveau_device *device)
58 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 69 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
59 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 70 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 71 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
72 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
73 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
74 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
75 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
76 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
61 break; 77 break;
62 default: 78 default:
63 nv_fatal(device, "unknown RIVA chipset\n"); 79 nv_fatal(device, "unknown RIVA chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index c4f2c2d3eaec..de6ce890e842 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -34,6 +34,12 @@
34#include <subdev/instmem.h> 34#include <subdev/instmem.h>
35#include <subdev/vm.h> 35#include <subdev/vm.h>
36 36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/software.h>
40#include <engine/graph.h>
41#include <engine/disp.h>
42
37int 43int
38nv10_identify(struct nouveau_device *device) 44nv10_identify(struct nouveau_device *device)
39{ 45{
@@ -49,6 +55,9 @@ nv10_identify(struct nouveau_device *device)
49 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 55 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
50 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 56 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
51 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 57 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
58 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
59 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
60 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
52 break; 61 break;
53 case 0x15: 62 case 0x15:
54 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 63 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -61,6 +70,11 @@ nv10_identify(struct nouveau_device *device)
61 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 70 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
62 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 71 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
63 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 72 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
73 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
74 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
75 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
76 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
77 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
64 break; 78 break;
65 case 0x16: 79 case 0x16:
66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -73,6 +87,11 @@ nv10_identify(struct nouveau_device *device)
73 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 87 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 88 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 89 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
90 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
91 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
92 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
93 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
94 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
76 break; 95 break;
77 case 0x1a: 96 case 0x1a:
78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 97 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +104,11 @@ nv10_identify(struct nouveau_device *device)
85 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 104 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
86 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 105 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
87 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 106 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
107 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
108 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
109 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
110 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
111 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
88 break; 112 break;
89 case 0x11: 113 case 0x11:
90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -97,6 +121,11 @@ nv10_identify(struct nouveau_device *device)
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 121 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 122 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 123 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
124 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
125 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
126 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
127 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
128 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
100 break; 129 break;
101 case 0x17: 130 case 0x17:
102 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 131 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -109,6 +138,11 @@ nv10_identify(struct nouveau_device *device)
109 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 138 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
110 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 139 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
111 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 140 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
141 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
142 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
143 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
144 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
145 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
112 break; 146 break;
113 case 0x1f: 147 case 0x1f:
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 148 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -121,6 +155,11 @@ nv10_identify(struct nouveau_device *device)
121 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 155 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
122 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 156 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
123 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 157 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
158 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
159 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
160 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
161 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
162 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
124 break; 163 break;
125 case 0x18: 164 case 0x18:
126 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 165 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -133,6 +172,11 @@ nv10_identify(struct nouveau_device *device)
133 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 172 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
134 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 173 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
135 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 174 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
175 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
176 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
177 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
178 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
179 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
136 break; 180 break;
137 default: 181 default:
138 nv_fatal(device, "unknown Celsius chipset\n"); 182 nv_fatal(device, "unknown Celsius chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 719b72a43e47..0b30143d0114 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -34,6 +34,12 @@
34#include <subdev/instmem.h> 34#include <subdev/instmem.h>
35#include <subdev/vm.h> 35#include <subdev/vm.h>
36 36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/software.h>
40#include <engine/graph.h>
41#include <engine/disp.h>
42
37int 43int
38nv20_identify(struct nouveau_device *device) 44nv20_identify(struct nouveau_device *device)
39{ 45{
@@ -49,6 +55,11 @@ nv20_identify(struct nouveau_device *device)
49 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 55 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
50 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 56 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
51 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 57 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
58 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
59 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
60 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
61 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
62 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
52 break; 63 break;
53 case 0x25: 64 case 0x25:
54 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 65 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -61,6 +72,11 @@ nv20_identify(struct nouveau_device *device)
61 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
62 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 73 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
63 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 74 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
75 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
76 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
77 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
78 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
79 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
64 break; 80 break;
65 case 0x28: 81 case 0x28:
66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 82 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -73,6 +89,11 @@ nv20_identify(struct nouveau_device *device)
73 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 89 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 90 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 91 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
92 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
93 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
94 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
95 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
96 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
76 break; 97 break;
77 case 0x2a: 98 case 0x2a:
78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 99 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +106,11 @@ nv20_identify(struct nouveau_device *device)
85 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 106 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
86 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 107 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
87 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 108 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
109 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
110 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
111 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
112 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
113 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
88 break; 114 break;
89 default: 115 default:
90 nv_fatal(device, "unknown Kelvin chipset\n"); 116 nv_fatal(device, "unknown Kelvin chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 0a1a72809d82..1d5c6977c86a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -34,6 +34,13 @@
34#include <subdev/instmem.h> 34#include <subdev/instmem.h>
35#include <subdev/vm.h> 35#include <subdev/vm.h>
36 36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/software.h>
40#include <engine/graph.h>
41#include <engine/mpeg.h>
42#include <engine/disp.h>
43
37int 44int
38nv30_identify(struct nouveau_device *device) 45nv30_identify(struct nouveau_device *device)
39{ 46{
@@ -49,6 +56,11 @@ nv30_identify(struct nouveau_device *device)
49 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 56 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
50 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
51 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
60 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
61 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
63 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
52 break; 64 break;
53 case 0x35: 65 case 0x35:
54 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -61,6 +73,11 @@ nv30_identify(struct nouveau_device *device)
61 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 73 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
62 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
63 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
76 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
77 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
78 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
79 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
80 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
64 break; 81 break;
65 case 0x31: 82 case 0x31:
66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 83 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -73,6 +90,12 @@ nv30_identify(struct nouveau_device *device)
73 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 90 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 91 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 92 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
93 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
94 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
95 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
96 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
97 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
98 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
76 break; 99 break;
77 case 0x36: 100 case 0x36:
78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 101 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +108,12 @@ nv30_identify(struct nouveau_device *device)
85 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 108 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
86 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
87 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 110 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
111 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
112 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
113 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
114 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
115 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
116 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
88 break; 117 break;
89 case 0x34: 118 case 0x34:
90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 119 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -97,6 +126,12 @@ nv30_identify(struct nouveau_device *device)
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 126 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 127 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 128 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
133 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
134 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
100 break; 135 break;
101 default: 136 default:
102 nv_fatal(device, "unknown Rankine chipset\n"); 137 nv_fatal(device, "unknown Rankine chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 5e1ef5e4cf7f..2e071fa9fca0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -34,6 +34,13 @@
34#include <subdev/instmem.h> 34#include <subdev/instmem.h>
35#include <subdev/vm.h> 35#include <subdev/vm.h>
36 36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/software.h>
40#include <engine/graph.h>
41#include <engine/mpeg.h>
42#include <engine/disp.h>
43
37int 44int
38nv40_identify(struct nouveau_device *device) 45nv40_identify(struct nouveau_device *device)
39{ 46{
@@ -49,6 +56,12 @@ nv40_identify(struct nouveau_device *device)
49 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 56 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
50 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
51 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
60 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
61 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
63 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
64 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
52 break; 65 break;
53 case 0x41: 66 case 0x41:
54 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -61,6 +74,12 @@ nv40_identify(struct nouveau_device *device)
61 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 74 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
62 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
63 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
78 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
79 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
80 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
81 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
64 break; 83 break;
65 case 0x42: 84 case 0x42:
66 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 85 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -73,6 +92,12 @@ nv40_identify(struct nouveau_device *device)
73 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 92 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
74 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 93 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
75 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 94 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
95 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
96 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
97 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
98 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
99 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
100 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
76 break; 101 break;
77 case 0x43: 102 case 0x43:
78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 103 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +110,12 @@ nv40_identify(struct nouveau_device *device)
85 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 110 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
86 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 111 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
87 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 112 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
113 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
114 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
115 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
116 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
117 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
118 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
88 break; 119 break;
89 case 0x45: 120 case 0x45:
90 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 121 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -97,6 +128,12 @@ nv40_identify(struct nouveau_device *device)
97 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 128 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 129 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 130 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
131 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
132 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
133 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
134 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
135 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
136 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
100 break; 137 break;
101 case 0x47: 138 case 0x47:
102 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 139 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -109,6 +146,12 @@ nv40_identify(struct nouveau_device *device)
109 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 146 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
110 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 147 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
111 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 148 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
149 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
150 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
151 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
152 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
153 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
154 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
112 break; 155 break;
113 case 0x49: 156 case 0x49:
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 157 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -121,6 +164,12 @@ nv40_identify(struct nouveau_device *device)
121 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 164 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
122 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 165 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
123 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 166 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
167 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
168 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
169 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
170 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
171 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
172 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
124 break; 173 break;
125 case 0x4b: 174 case 0x4b:
126 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 175 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -133,6 +182,12 @@ nv40_identify(struct nouveau_device *device)
133 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 182 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
134 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 183 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
135 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 184 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
185 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
186 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
187 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
188 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
189 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
190 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
136 break; 191 break;
137 case 0x44: 192 case 0x44:
138 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 193 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -145,6 +200,12 @@ nv40_identify(struct nouveau_device *device)
145 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 200 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
146 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 201 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
147 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 202 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
203 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
204 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
205 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
206 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
207 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
208 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
148 break; 209 break;
149 case 0x46: 210 case 0x46:
150 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 211 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -157,6 +218,12 @@ nv40_identify(struct nouveau_device *device)
157 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 218 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
158 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 219 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
159 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 220 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
221 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
222 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
223 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
224 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
225 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
226 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
160 break; 227 break;
161 case 0x4a: 228 case 0x4a:
162 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 229 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -169,6 +236,12 @@ nv40_identify(struct nouveau_device *device)
169 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 236 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
170 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 237 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
171 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 238 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
239 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
240 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
241 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
242 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
243 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
244 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
172 break; 245 break;
173 case 0x4c: 246 case 0x4c:
174 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 247 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -181,6 +254,12 @@ nv40_identify(struct nouveau_device *device)
181 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 254 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
182 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 255 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
183 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 256 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
257 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
258 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
259 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
260 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
261 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
262 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
184 break; 263 break;
185 case 0x4e: 264 case 0x4e:
186 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 265 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -193,6 +272,12 @@ nv40_identify(struct nouveau_device *device)
193 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 272 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
194 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 273 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
195 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 274 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
276 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
277 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
279 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
280 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
196 break; 281 break;
197 case 0x63: 282 case 0x63:
198 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 283 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -205,6 +290,12 @@ nv40_identify(struct nouveau_device *device)
205 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 290 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
206 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 291 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
207 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 292 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
293 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
294 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
295 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
296 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
297 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
298 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
208 break; 299 break;
209 case 0x67: 300 case 0x67:
210 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 301 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -217,6 +308,12 @@ nv40_identify(struct nouveau_device *device)
217 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 308 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
218 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 309 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
219 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 310 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
311 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
312 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
313 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
314 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
315 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
316 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
220 break; 317 break;
221 case 0x68: 318 case 0x68:
222 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 319 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -229,6 +326,12 @@ nv40_identify(struct nouveau_device *device)
229 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 326 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
230 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 327 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
231 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 328 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
329 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
330 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
331 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
332 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
333 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
334 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
232 break; 335 break;
233 default: 336 default:
234 nv_fatal(device, "unknown Curie chipset\n"); 337 nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index 5e86a2f6ad8a..5d44b2a5bfa9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -35,6 +35,18 @@
35#include <subdev/vm.h> 35#include <subdev/vm.h>
36#include <subdev/bar.h> 36#include <subdev/bar.h>
37 37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40#include <engine/software.h>
41#include <engine/graph.h>
42#include <engine/mpeg.h>
43#include <engine/vp.h>
44#include <engine/crypt.h>
45#include <engine/bsp.h>
46#include <engine/ppp.h>
47#include <engine/copy.h>
48#include <engine/disp.h>
49
38int 50int
39nv50_identify(struct nouveau_device *device) 51nv50_identify(struct nouveau_device *device)
40{ 52{
@@ -51,6 +63,12 @@ nv50_identify(struct nouveau_device *device)
51 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 63 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
52 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 64 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
53 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 65 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
66 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
67 device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass;
68 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
69 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
70 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
71 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
54 break; 72 break;
55 case 0x84: 73 case 0x84:
56 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 74 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -64,6 +82,15 @@ nv50_identify(struct nouveau_device *device)
64 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 82 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
65 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 83 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
66 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 84 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
85 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
86 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
87 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
88 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
89 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
90 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
91 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
92 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
93 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
67 break; 94 break;
68 case 0x86: 95 case 0x86:
69 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 96 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -77,6 +104,15 @@ nv50_identify(struct nouveau_device *device)
77 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 104 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
78 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 105 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
79 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 106 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
107 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
108 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
109 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
110 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
111 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
112 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
113 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
114 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
115 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
80 break; 116 break;
81 case 0x92: 117 case 0x92:
82 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 118 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -90,6 +126,15 @@ nv50_identify(struct nouveau_device *device)
90 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
91 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 127 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
92 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 128 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
133 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
134 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
135 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
136 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
137 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
93 break; 138 break;
94 case 0x94: 139 case 0x94:
95 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 140 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -103,6 +148,15 @@ nv50_identify(struct nouveau_device *device)
103 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 148 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
104 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 149 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
105 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 150 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
151 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
152 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
153 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
154 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
155 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
156 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
157 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
158 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
159 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
106 break; 160 break;
107 case 0x96: 161 case 0x96:
108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 162 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -116,6 +170,15 @@ nv50_identify(struct nouveau_device *device)
116 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 170 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
117 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 171 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
118 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 172 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
173 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
174 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
177 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
178 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
179 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
180 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
181 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
119 break; 182 break;
120 case 0x98: 183 case 0x98:
121 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 184 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -129,6 +192,15 @@ nv50_identify(struct nouveau_device *device)
129 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 192 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
130 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 193 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
131 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 194 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
195 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
196 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
197 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
198 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
199 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
200 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
201 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
202 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
203 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
132 break; 204 break;
133 case 0xa0: 205 case 0xa0:
134 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 206 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -142,6 +214,15 @@ nv50_identify(struct nouveau_device *device)
142 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 214 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
143 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 215 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
144 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 216 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
217 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
218 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
219 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
220 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
221 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
222 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
223 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
224 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
225 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
145 break; 226 break;
146 case 0xaa: 227 case 0xaa:
147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 228 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -155,6 +236,15 @@ nv50_identify(struct nouveau_device *device)
155 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 236 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
156 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 237 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
157 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 238 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
239 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
240 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
241 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
242 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
243 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
244 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
245 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
246 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
247 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
158 break; 248 break;
159 case 0xac: 249 case 0xac:
160 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 250 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -168,6 +258,15 @@ nv50_identify(struct nouveau_device *device)
168 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 258 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
169 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 259 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
170 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 260 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
261 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
262 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
263 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
264 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
265 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
266 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
267 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
268 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
269 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
171 break; 270 break;
172 case 0xa3: 271 case 0xa3:
173 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 272 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -181,6 +280,16 @@ nv50_identify(struct nouveau_device *device)
181 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 280 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
182 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 281 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
183 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 282 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
283 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
284 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
285 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
286 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
287 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
288 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
289 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
290 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
291 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
292 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
184 break; 293 break;
185 case 0xa5: 294 case 0xa5:
186 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 295 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -194,6 +303,15 @@ nv50_identify(struct nouveau_device *device)
194 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 303 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
195 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 304 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
196 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 305 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
306 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
307 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
308 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
309 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
310 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
311 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
312 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
313 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
314 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
197 break; 315 break;
198 case 0xa8: 316 case 0xa8:
199 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 317 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -207,6 +325,15 @@ nv50_identify(struct nouveau_device *device)
207 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 325 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
208 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 326 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
209 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 327 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
328 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
329 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
330 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
331 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
332 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
333 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
334 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
335 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
336 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
210 break; 337 break;
211 case 0xaf: 338 case 0xaf:
212 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 339 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -220,6 +347,15 @@ nv50_identify(struct nouveau_device *device)
220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 347 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
221 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 348 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
222 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 349 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
350 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
351 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
352 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
353 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
354 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
355 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
356 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
357 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
358 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
223 break; 359 break;
224 default: 360 default:
225 nv_fatal(device, "unknown Tesla chipset\n"); 361 nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 87f4e16379c6..81d6ed593428 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -36,6 +36,16 @@
36#include <subdev/vm.h> 36#include <subdev/vm.h>
37#include <subdev/bar.h> 37#include <subdev/bar.h>
38 38
39#include <engine/dmaobj.h>
40#include <engine/fifo.h>
41#include <engine/software.h>
42#include <engine/graph.h>
43#include <engine/vp.h>
44#include <engine/bsp.h>
45#include <engine/ppp.h>
46#include <engine/copy.h>
47#include <engine/disp.h>
48
39int 49int
40nvc0_identify(struct nouveau_device *device) 50nvc0_identify(struct nouveau_device *device)
41{ 51{
@@ -53,6 +63,16 @@ nvc0_identify(struct nouveau_device *device)
53 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 63 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
54 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 64 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
55 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 65 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
66 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
67 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
68 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
69 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
70 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
71 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
72 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
73 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
74 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
75 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
56 break; 76 break;
57 case 0xc4: 77 case 0xc4:
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 78 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -67,6 +87,16 @@ nvc0_identify(struct nouveau_device *device)
67 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 87 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
68 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 88 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
69 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 89 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
90 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
91 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
92 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
93 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
94 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
95 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
96 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
97 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
98 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
99 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
70 break; 100 break;
71 case 0xc3: 101 case 0xc3:
72 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 102 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -81,6 +111,16 @@ nvc0_identify(struct nouveau_device *device)
81 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 111 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
82 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 112 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
83 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 113 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
115 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
116 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
117 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
118 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
119 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
120 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
121 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
122 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
123 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
84 break; 124 break;
85 case 0xce: 125 case 0xce:
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 126 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -95,6 +135,16 @@ nvc0_identify(struct nouveau_device *device)
95 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 135 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
96 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 136 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
97 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 137 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
138 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
139 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
140 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
141 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
142 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
143 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
144 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
145 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
146 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
147 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
98 break; 148 break;
99 case 0xcf: 149 case 0xcf:
100 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 150 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -109,6 +159,16 @@ nvc0_identify(struct nouveau_device *device)
109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 159 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
110 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 160 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
111 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 161 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
163 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
164 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
165 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
166 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
167 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
168 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
169 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
170 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
171 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
112 break; 172 break;
113 case 0xc1: 173 case 0xc1:
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 174 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -123,6 +183,16 @@ nvc0_identify(struct nouveau_device *device)
123 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 183 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
124 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 184 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
125 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 185 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
186 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
187 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
188 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
189 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
190 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
191 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
192 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
193 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
194 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
195 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
126 break; 196 break;
127 case 0xc8: 197 case 0xc8:
128 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 198 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -137,6 +207,16 @@ nvc0_identify(struct nouveau_device *device)
137 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 207 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
138 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 208 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
139 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 209 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
210 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
211 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
212 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
213 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
214 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
215 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
216 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
217 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
218 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
219 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
140 break; 220 break;
141 case 0xd9: 221 case 0xd9:
142 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 222 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -151,6 +231,15 @@ nvc0_identify(struct nouveau_device *device)
151 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 231 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
152 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 232 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
153 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 233 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
234 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
235 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
236 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
237 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
238 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
239 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
240 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
241 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
242 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
154 break; 243 break;
155 default: 244 default:
156 nv_fatal(device, "unknown Fermi chipset\n"); 245 nv_fatal(device, "unknown Fermi chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index ab8346b8bde0..f4f5a5af3c06 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -36,6 +36,12 @@
36#include <subdev/vm.h> 36#include <subdev/vm.h>
37#include <subdev/bar.h> 37#include <subdev/bar.h>
38 38
39#include <engine/dmaobj.h>
40#include <engine/fifo.h>
41#include <engine/software.h>
42#include <engine/graph.h>
43#include <engine/disp.h>
44
39int 45int
40nve0_identify(struct nouveau_device *device) 46nve0_identify(struct nouveau_device *device)
41{ 47{
@@ -53,6 +59,11 @@ nve0_identify(struct nouveau_device *device)
53 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 59 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
54 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 60 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
55 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 61 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
66 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
56 break; 67 break;
57 case 0xe7: 68 case 0xe7:
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 69 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -67,6 +78,11 @@ nve0_identify(struct nouveau_device *device)
67 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 78 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
68 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 79 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
69 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 80 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
85 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
70 break; 86 break;
71 default: 87 default:
72 nv_fatal(device, "unknown Kepler chipset\n"); 88 nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index f44f0f096689..ba4d28b50368 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -139,8 +139,7 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
139 return ret; 139 return ret;
140 140
141 /* 0x10000-0x18000: reserve for RAMHT */ 141 /* 0x10000-0x18000: reserve for RAMHT */
142 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 142 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
143 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
144 if (ret) 143 if (ret)
145 return ret; 144 return ret;
146 145
@@ -165,7 +164,7 @@ nv04_instmem_dtor(struct nouveau_object *object)
165 struct nv04_instmem_priv *priv = (void *)object; 164 struct nv04_instmem_priv *priv = (void *)object;
166 nouveau_gpuobj_ref(NULL, &priv->ramfc); 165 nouveau_gpuobj_ref(NULL, &priv->ramfc);
167 nouveau_gpuobj_ref(NULL, &priv->ramro); 166 nouveau_gpuobj_ref(NULL, &priv->ramro);
168 nouveau_gpuobj_ref(NULL, &priv->ramht); 167 nouveau_ramht_ref(NULL, &priv->ramht);
169 nouveau_gpuobj_ref(NULL, &priv->vbios); 168 nouveau_gpuobj_ref(NULL, &priv->vbios);
170 nouveau_mm_fini(&priv->heap); 169 nouveau_mm_fini(&priv->heap);
171 if (priv->iomem) 170 if (priv->iomem)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index b2f82f9e4e7f..7983d8d9b358 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -2,6 +2,7 @@
2#define __NV04_INSTMEM_H__ 2#define __NV04_INSTMEM_H__
3 3
4#include <core/gpuobj.h> 4#include <core/gpuobj.h>
5#include <core/ramht.h>
5#include <core/mm.h> 6#include <core/mm.h>
6 7
7#include <subdev/instmem.h> 8#include <subdev/instmem.h>
@@ -14,11 +15,17 @@ struct nv04_instmem_priv {
14 struct nouveau_mm heap; 15 struct nouveau_mm heap;
15 16
16 struct nouveau_gpuobj *vbios; 17 struct nouveau_gpuobj *vbios;
17 struct nouveau_gpuobj *ramht; 18 struct nouveau_ramht *ramht;
18 struct nouveau_gpuobj *ramro; 19 struct nouveau_gpuobj *ramro;
19 struct nouveau_gpuobj *ramfc; 20 struct nouveau_gpuobj *ramfc;
20}; 21};
21 22
23static inline struct nv04_instmem_priv *
24nv04_instmem(void *obj)
25{
26 return (void *)nouveau_instmem(obj);
27}
28
22struct nv04_instobj_priv { 29struct nv04_instobj_priv {
23 struct nouveau_instobj base; 30 struct nouveau_instobj base;
24 struct nouveau_mm_node *mem; 31 struct nouveau_mm_node *mem;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 6a22160324c1..73c52ebd5932 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -87,8 +87,7 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
87 return ret; 87 return ret;
88 88
89 /* 0x10000-0x18000: reserve for RAMHT */ 89 /* 0x10000-0x18000: reserve for RAMHT */
90 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 90 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
91 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
92 if (ret) 91 if (ret)
93 return ret; 92 return ret;
94 93
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index b92b3d47c69c..082c11b75acb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -299,6 +299,7 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
299 299
300 fpde = (vma->node->offset >> vmm->pgt_bits); 300 fpde = (vma->node->offset >> vmm->pgt_bits);
301 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits; 301 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
302
302 for (pde = fpde; pde <= lpde; pde++) { 303 for (pde = fpde; pde <= lpde; pde++) {
303 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 304 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
304 int big = (vma->node->type != vmm->spg_shift); 305 int big = (vma->node->type != vmm->spg_shift);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
index 6475c0201d01..bfe6766d36ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -96,6 +96,7 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
96 return ret; 96 return ret;
97 97
98 priv->base.create = nv04_vm_create; 98 priv->base.create = nv04_vm_create;
99 priv->base.limit = NV04_PDMA_SIZE;
99 priv->base.pgt_bits = 32 - 12; 100 priv->base.pgt_bits = 32 - 12;
100 priv->base.spg_shift = 12; 101 priv->base.spg_shift = 12;
101 priv->base.lpg_shift = 12; 102 priv->base.lpg_shift = 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
index 530930320bc4..e21369cd09c0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
@@ -10,4 +10,10 @@ struct nv04_vmmgr_priv {
10 dma_addr_t null; 10 dma_addr_t null;
11}; 11};
12 12
13static inline struct nv04_vmmgr_priv *
14nv04_vmmgr(void *obj)
15{
16 return (void *)nouveau_vmmgr(obj);
17}
18
13#endif 19#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index f0367703dff0..bbeac8d296ed 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -97,6 +97,7 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
97 return ret; 97 return ret;
98 98
99 priv->base.create = nv04_vm_create; 99 priv->base.create = nv04_vm_create;
100 priv->base.limit = NV41_GART_SIZE;
100 priv->base.pgt_bits = 32 - 12; 101 priv->base.pgt_bits = 32 - 12;
101 priv->base.spg_shift = 12; 102 priv->base.spg_shift = 12;
102 priv->base.lpg_shift = 12; 103 priv->base.lpg_shift = 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
index d17f76120bcd..d099cde3a7f5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -178,6 +178,7 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
178 return ret; 178 return ret;
179 179
180 priv->base.create = nv04_vm_create; 180 priv->base.create = nv04_vm_create;
181 priv->base.limit = NV44_GART_SIZE;
181 priv->base.pgt_bits = 32 - 12; 182 priv->base.pgt_bits = 32 - 12;
182 priv->base.spg_shift = 12; 183 priv->base.spg_shift = 12;
183 priv->base.lpg_shift = 12; 184 priv->base.lpg_shift = 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index 6e9bcd212cfc..0f0d3a5de5c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -154,7 +154,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
154 struct nouveau_engine *engine; 154 struct nouveau_engine *engine;
155 int i; 155 int i;
156 156
157#if 0
158 for (i = 0; i < NVDEV_SUBDEV_NR; i++) { 157 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
159 if (atomic_read(&vm->engref[i])) { 158 if (atomic_read(&vm->engref[i])) {
160 engine = nouveau_engine(vm->vmm, i); 159 engine = nouveau_engine(vm->vmm, i);
@@ -162,11 +161,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
162 engine->tlb_flush(engine); 161 engine->tlb_flush(engine);
163 } 162 }
164 } 163 }
165#else
166 nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x06); /* bar */
167 nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x05); /* fifo */
168 nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x00); /* gr */
169#endif
170} 164}
171 165
172void 166void
@@ -206,6 +200,7 @@ nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
206 if (ret) 200 if (ret)
207 return ret; 201 return ret;
208 202
203 priv->base.limit = 1ULL << 40;
209 priv->base.pgt_bits = 29 - 12; 204 priv->base.pgt_bits = 29 - 12;
210 priv->base.spg_shift = 12; 205 priv->base.spg_shift = 12;
211 priv->base.lpg_shift = 16; 206 priv->base.lpg_shift = 16;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index a0bc0f678d12..e48ece297511 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -162,6 +162,7 @@ nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
162 if (ret) 162 if (ret)
163 return ret; 163 return ret;
164 164
165 priv->base.limit = 1ULL << 40;
165 priv->base.pgt_bits = 27 - 12; 166 priv->base.pgt_bits = 27 - 12;
166 priv->base.spg_shift = 12; 167 priv->base.spg_shift = 12;
167 priv->base.lpg_shift = 17; 168 priv->base.lpg_shift = 17;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 9b3a4617bffa..9e6ced3b941a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -21,23 +21,153 @@
21 * 21 *
22 */ 22 */
23 23
24#include "drmP.h" 24#include <core/object.h>
25#include <core/client.h>
26#include <core/device.h>
27#include <core/class.h>
28#include <core/mm.h>
25 29
26#include "nouveau_drv.h" 30#include <subdev/fb.h>
31#include <subdev/timer.h>
32#include <subdev/instmem.h>
33
34#include "nouveau_drm.h"
27#include "nouveau_dma.h" 35#include "nouveau_dma.h"
36#include "nouveau_gem.h"
37#include "nouveau_chan.h"
28#include "nouveau_abi16.h" 38#include "nouveau_abi16.h"
29#include <core/ramht.h> 39
30#include "nouveau_software.h" 40struct nouveau_abi16 *
41nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
42{
43 struct nouveau_cli *cli = nouveau_cli(file_priv);
44 mutex_lock(&cli->mutex);
45 if (!cli->abi16) {
46 struct nouveau_abi16 *abi16;
47 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 if (cli->abi16) {
49 INIT_LIST_HEAD(&abi16->channels);
50 abi16->client = nv_object(cli);
51
52 /* allocate device object targeting client's default
53 * device (ie. the one that belongs to the fd it
54 * opened)
55 */
56 if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
57 NVDRM_DEVICE, 0x0080,
58 &(struct nv_device_class) {
59 .device = ~0ULL,
60 },
61 sizeof(struct nv_device_class),
62 &abi16->device) == 0)
63 return cli->abi16;
64
65 kfree(cli->abi16);
66 cli->abi16 = NULL;
67 }
68
69 mutex_unlock(&cli->mutex);
70 }
71 return cli->abi16;
72}
73
74int
75nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
76{
77 struct nouveau_cli *cli = (void *)abi16->client;
78 mutex_unlock(&cli->mutex);
79 return ret;
80}
81
82u16
83nouveau_abi16_swclass(struct nouveau_drm *drm)
84{
85 switch (nv_device(drm->device)->card_type) {
86 case NV_04:
87 return 0x006e;
88 case NV_10:
89 case NV_20:
90 case NV_30:
91 case NV_40:
92 return 0x016e;
93 case NV_50:
94 return 0x506e;
95 case NV_C0:
96 case NV_D0:
97 case NV_E0:
98 return 0x906e;
99 }
100
101 return 0x0000;
102}
103
104static void
105nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
106 struct nouveau_abi16_ntfy *ntfy)
107{
108 nouveau_mm_free(&chan->heap, &ntfy->node);
109 list_del(&ntfy->head);
110 kfree(ntfy);
111}
112
113static void
114nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
115 struct nouveau_abi16_chan *chan)
116{
117 struct nouveau_abi16_ntfy *ntfy, *temp;
118
119 /* cleanup notifier state */
120 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
121 nouveau_abi16_ntfy_fini(chan, ntfy);
122 }
123
124 if (chan->ntfy) {
125 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
126 drm_gem_object_unreference_unlocked(chan->ntfy->gem);
127 }
128
129 if (chan->heap.block_size)
130 nouveau_mm_fini(&chan->heap);
131
132 /* destroy channel object, all children will be killed too */
133 if (chan->chan) {
134 abi16->handles &= ~(1 << (chan->chan->handle & 0xffff));
135 nouveau_channel_del(&chan->chan);
136 }
137
138 list_del(&chan->head);
139 kfree(chan);
140}
141
142void
143nouveau_abi16_fini(struct nouveau_abi16 *abi16)
144{
145 struct nouveau_cli *cli = (void *)abi16->client;
146 struct nouveau_abi16_chan *chan, *temp;
147
148 /* cleanup channels */
149 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
150 nouveau_abi16_chan_fini(abi16, chan);
151 }
152
153 /* destroy the device object */
154 nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
155
156 kfree(cli->abi16);
157 cli->abi16 = NULL;
158}
31 159
32int 160int
33nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) 161nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
34{ 162{
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 163 struct nouveau_drm *drm = nouveau_drm(dev);
164 struct nouveau_device *device = nv_device(drm->device);
165 struct nouveau_timer *ptimer = nouveau_timer(device);
36 struct drm_nouveau_getparam *getparam = data; 166 struct drm_nouveau_getparam *getparam = data;
37 167
38 switch (getparam->param) { 168 switch (getparam->param) {
39 case NOUVEAU_GETPARAM_CHIPSET_ID: 169 case NOUVEAU_GETPARAM_CHIPSET_ID:
40 getparam->value = dev_priv->chipset; 170 getparam->value = device->chipset;
41 break; 171 break;
42 case NOUVEAU_GETPARAM_PCI_VENDOR: 172 case NOUVEAU_GETPARAM_PCI_VENDOR:
43 getparam->value = dev->pci_vendor; 173 getparam->value = dev->pci_vendor;
@@ -55,16 +185,16 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
55 getparam->value = 2; 185 getparam->value = 2;
56 break; 186 break;
57 case NOUVEAU_GETPARAM_FB_SIZE: 187 case NOUVEAU_GETPARAM_FB_SIZE:
58 getparam->value = dev_priv->fb_available_size; 188 getparam->value = drm->gem.vram_available;
59 break; 189 break;
60 case NOUVEAU_GETPARAM_AGP_SIZE: 190 case NOUVEAU_GETPARAM_AGP_SIZE:
61 getparam->value = dev_priv->gart_info.aper_size; 191 getparam->value = drm->gem.gart_available;
62 break; 192 break;
63 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 193 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
64 getparam->value = 0; /* deprecated */ 194 getparam->value = 0; /* deprecated */
65 break; 195 break;
66 case NOUVEAU_GETPARAM_PTIMER_TIME: 196 case NOUVEAU_GETPARAM_PTIMER_TIME:
67 getparam->value = nv_timer_read(dev); 197 getparam->value = ptimer->read(ptimer);
68 break; 198 break;
69 case NOUVEAU_GETPARAM_HAS_BO_USAGE: 199 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
70 getparam->value = 1; 200 getparam->value = 1;
@@ -76,13 +206,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
76 /* NV40 and NV50 versions are quite different, but register 206 /* NV40 and NV50 versions are quite different, but register
77 * address is the same. User is supposed to know the card 207 * address is the same. User is supposed to know the card
78 * family anyway... */ 208 * family anyway... */
79 if (dev_priv->chipset >= 0x40) { 209 if (device->chipset >= 0x40) {
80 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); 210 getparam->value = nv_rd32(device, 0x001540);
81 break; 211 break;
82 } 212 }
83 /* FALLTHRU */ 213 /* FALLTHRU */
84 default: 214 default:
85 NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); 215 nv_debug(device, "unknown parameter %lld\n", getparam->param);
86 return -EINVAL; 216 return -EINVAL;
87 } 217 }
88 218
@@ -98,148 +228,247 @@ nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
98int 228int
99nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) 229nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
100{ 230{
101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 struct drm_nouveau_channel_alloc *init = data; 231 struct drm_nouveau_channel_alloc *init = data;
103 struct nouveau_channel *chan; 232 struct nouveau_cli *cli = nouveau_cli(file_priv);
233 struct nouveau_drm *drm = nouveau_drm(dev);
234 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
235 struct nouveau_abi16_chan *chan;
236 struct nouveau_client *client;
237 struct nouveau_device *device;
238 struct nouveau_instmem *imem;
239 struct nouveau_fb *pfb;
104 int ret; 240 int ret;
105 241
106 if (!dev_priv->eng[NVOBJ_ENGINE_GR]) 242 if (unlikely(!abi16))
107 return -ENODEV; 243 return -ENOMEM;
244 client = nv_client(abi16->client);
108 245
109 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 246 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
110 return -EINVAL; 247 return nouveau_abi16_put(abi16, -EINVAL);
248
249 device = nv_device(abi16->device);
250 imem = nouveau_instmem(device);
251 pfb = nouveau_fb(device);
252
253 /* allocate "abi16 channel" data and make up a handle for it */
254 init->channel = ffsll(~abi16->handles);
255 if (!init->channel--)
256 return nouveau_abi16_put(abi16, -ENOSPC);
257
258 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
259 if (!chan)
260 return nouveau_abi16_put(abi16, -ENOMEM);
261
262 INIT_LIST_HEAD(&chan->notifiers);
263 list_add(&chan->head, &abi16->channels);
264 abi16->handles |= (1 << init->channel);
111 265
112 ret = nouveau_channel_alloc(dev, &chan, file_priv, 266 /* create channel object and initialise dma and fence management */
113 init->fb_ctxdma_handle, 267 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
114 init->tt_ctxdma_handle); 268 init->channel, init->fb_ctxdma_handle,
269 init->tt_ctxdma_handle, &chan->chan);
115 if (ret) 270 if (ret)
116 return ret; 271 goto done;
117 init->channel = chan->id; 272
118 273 if (device->card_type >= NV_50)
119 if (nouveau_vram_pushbuf == 0) { 274 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
120 if (chan->dma.ib_max) 275 NOUVEAU_GEM_DOMAIN_GART;
121 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 276 else
122 NOUVEAU_GEM_DOMAIN_GART; 277 if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
123 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
124 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
125 else
126 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
127 } else {
128 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; 278 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
129 } 279 else
280 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
130 281
131 if (dev_priv->card_type < NV_C0) { 282 if (device->card_type < NV_C0) {
132 init->subchan[0].handle = 0x00000000; 283 init->subchan[0].handle = 0x00000000;
133 init->subchan[0].grclass = 0x0000; 284 init->subchan[0].grclass = 0x0000;
134 init->subchan[1].handle = NvSw; 285 init->subchan[1].handle = NvSw;
135 init->subchan[1].grclass = NV_SW; 286 init->subchan[1].grclass = 0x506e;
136 init->nr_subchan = 2; 287 init->nr_subchan = 2;
137 } 288 }
138 289
139 /* Named memory object area */ 290 /* Named memory object area */
140 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, 291 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
292 0, 0, &chan->ntfy);
293 if (ret == 0)
294 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
295 if (ret)
296 goto done;
297
298 if (device->card_type >= NV_50) {
299 ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
300 &chan->ntfy_vma);
301 if (ret)
302 goto done;
303 }
304
305 ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
141 &init->notifier_handle); 306 &init->notifier_handle);
307 if (ret)
308 goto done;
142 309
143 if (ret == 0) 310 ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
144 atomic_inc(&chan->users); /* userspace reference */ 311done:
145 nouveau_channel_put(&chan); 312 if (ret)
146 return ret; 313 nouveau_abi16_chan_fini(abi16, chan);
314 return nouveau_abi16_put(abi16, ret);
147} 315}
148 316
317
149int 318int
150nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) 319nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
151{ 320{
152 struct drm_nouveau_channel_free *req = data; 321 struct drm_nouveau_channel_free *req = data;
153 struct nouveau_channel *chan; 322 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
323 struct nouveau_abi16_chan *chan;
324 int ret = -ENOENT;
154 325
155 chan = nouveau_channel_get(file_priv, req->channel); 326 if (unlikely(!abi16))
156 if (IS_ERR(chan)) 327 return -ENOMEM;
157 return PTR_ERR(chan);
158 328
159 list_del(&chan->list); 329 list_for_each_entry(chan, &abi16->channels, head) {
160 atomic_dec(&chan->users); 330 if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
161 nouveau_channel_put(&chan); 331 nouveau_abi16_chan_fini(abi16, chan);
162 return 0; 332 return nouveau_abi16_put(abi16, 0);
333 }
334 }
335
336 return nouveau_abi16_put(abi16, ret);
163} 337}
164 338
165int 339int
166nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) 340nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
167{ 341{
168 struct drm_nouveau_grobj_alloc *init = data; 342 struct drm_nouveau_grobj_alloc *init = data;
169 struct nouveau_channel *chan; 343 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
344 struct nouveau_drm *drm = nouveau_drm(dev);
345 struct nouveau_object *object;
170 int ret; 346 int ret;
171 347
348 if (unlikely(!abi16))
349 return -ENOMEM;
350
172 if (init->handle == ~0) 351 if (init->handle == ~0)
173 return -EINVAL; 352 return nouveau_abi16_put(abi16, -EINVAL);
174 353
175 /* compatibility with userspace that assumes 506e for all chipsets */ 354 /* compatibility with userspace that assumes 506e for all chipsets */
176 if (init->class == 0x506e) { 355 if (init->class == 0x506e) {
177 init->class = nouveau_software_class(dev); 356 init->class = nouveau_abi16_swclass(drm);
178 if (init->class == 0x906e) 357 if (init->class == 0x906e)
179 return 0; 358 return nouveau_abi16_put(abi16, 0);
180 } else
181 if (init->class == 0x906e) {
182 NV_DEBUG(dev, "906e not supported yet\n");
183 return -EINVAL;
184 }
185
186 chan = nouveau_channel_get(file_priv, init->channel);
187 if (IS_ERR(chan))
188 return PTR_ERR(chan);
189
190 if (nouveau_ramht_find(chan, init->handle)) {
191 ret = -EEXIST;
192 goto out;
193 } 359 }
194 360
195 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); 361 ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
196 if (ret) { 362 init->handle, init->class, NULL, 0, &object);
197 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", 363 return nouveau_abi16_put(abi16, ret);
198 ret, init->channel, init->handle);
199 }
200
201out:
202 nouveau_channel_put(&chan);
203 return ret;
204} 364}
205 365
206int 366int
207nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) 367nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
208{ 368{
209 struct drm_nouveau_private *dev_priv = dev->dev_private; 369 struct drm_nouveau_notifierobj_alloc *info = data;
210 struct drm_nouveau_notifierobj_alloc *na = data; 370 struct nouveau_drm *drm = nouveau_drm(dev);
211 struct nouveau_channel *chan; 371 struct nouveau_device *device = nv_device(drm->device);
372 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
373 struct nouveau_abi16_chan *chan, *temp;
374 struct nouveau_abi16_ntfy *ntfy;
375 struct nouveau_object *object;
376 struct nv_dma_class args;
212 int ret; 377 int ret;
213 378
379 if (unlikely(!abi16))
380 return -ENOMEM;
381
214 /* completely unnecessary for these chipsets... */ 382 /* completely unnecessary for these chipsets... */
215 if (unlikely(dev_priv->card_type >= NV_C0)) 383 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
216 return -EINVAL; 384 return nouveau_abi16_put(abi16, -EINVAL);
217 385
218 chan = nouveau_channel_get(file_priv, na->channel); 386 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
219 if (IS_ERR(chan)) 387 if (chan->chan->handle == (NVDRM_CHAN | info->channel))
220 return PTR_ERR(chan); 388 break;
389 chan = NULL;
390 }
221 391
222 ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, 392 if (!chan)
223 &na->offset); 393 return nouveau_abi16_put(abi16, -ENOENT);
224 nouveau_channel_put(&chan); 394
225 return ret; 395 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
396 if (!ntfy)
397 return nouveau_abi16_put(abi16, -ENOMEM);
398
399 list_add(&ntfy->head, &chan->notifiers);
400 ntfy->handle = info->handle;
401
402 ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
403 &ntfy->node);
404 if (ret)
405 goto done;
406
407 args.start = ntfy->node->offset;
408 args.limit = ntfy->node->offset + ntfy->node->length - 1;
409 if (device->card_type >= NV_50) {
410 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
411 args.start += chan->ntfy_vma.offset;
412 args.limit += chan->ntfy_vma.offset;
413 } else
414 if (drm->agp.stat == ENABLED) {
415 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
416 args.start += drm->agp.base + chan->ntfy->bo.offset;
417 args.limit += drm->agp.base + chan->ntfy->bo.offset;
418 } else {
419 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
420 args.start += chan->ntfy->bo.offset;
421 args.limit += chan->ntfy->bo.offset;
422 }
423
424 ret = nouveau_object_new(abi16->client, chan->chan->handle,
425 ntfy->handle, 0x003d, &args,
426 sizeof(args), &object);
427 if (ret)
428 goto done;
429
430done:
431 if (ret)
432 nouveau_abi16_ntfy_fini(chan, ntfy);
433 return nouveau_abi16_put(abi16, ret);
226} 434}
227 435
228int 436int
229nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) 437nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
230{ 438{
231 struct drm_nouveau_gpuobj_free *objfree = data; 439 struct drm_nouveau_gpuobj_free *fini = data;
232 struct nouveau_channel *chan; 440 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
441 struct nouveau_abi16_chan *chan, *temp;
442 struct nouveau_abi16_ntfy *ntfy;
233 int ret; 443 int ret;
234 444
235 chan = nouveau_channel_get(file_priv, objfree->channel); 445 if (unlikely(!abi16))
236 if (IS_ERR(chan)) 446 return -ENOMEM;
237 return PTR_ERR(chan); 447
448 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
449 if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
450 break;
451 chan = NULL;
452 }
453
454 if (!chan)
455 return nouveau_abi16_put(abi16, -ENOENT);
238 456
239 /* Synchronize with the user channel */ 457 /* synchronize with the user channel and destroy the gpu object */
240 nouveau_channel_idle(chan); 458 nouveau_channel_idle(chan->chan);
241 459
242 ret = nouveau_ramht_remove(chan, objfree->handle); 460 ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
243 nouveau_channel_put(&chan); 461 if (ret)
244 return ret; 462 return nouveau_abi16_put(abi16, ret);
463
464 /* cleanup extra state if this object was a notifier */
465 list_for_each_entry(ntfy, &chan->notifiers, head) {
466 if (ntfy->handle == fini->handle) {
467 nouveau_mm_free(&chan->heap, &ntfy->node);
468 list_del(&ntfy->head);
469 break;
470 }
471 }
472
473 return nouveau_abi16_put(abi16, 0);
245} 474}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index e6328b008a8c..90004081a501 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -3,6 +3,7 @@
3 3
4#define ABI16_IOCTL_ARGS \ 4#define ABI16_IOCTL_ARGS \
5 struct drm_device *dev, void *data, struct drm_file *file_priv 5 struct drm_device *dev, void *data, struct drm_file *file_priv
6
6int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS); 7int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
7int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS); 8int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
8int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS); 9int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
@@ -11,6 +12,37 @@ int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
11int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS); 12int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
12int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS); 13int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
13 14
15struct nouveau_abi16_ntfy {
16 struct list_head head;
17 struct nouveau_mm_node *node;
18 u32 handle;
19};
20
21struct nouveau_abi16_chan {
22 struct list_head head;
23 struct nouveau_channel *chan;
24 struct list_head notifiers;
25 struct nouveau_bo *ntfy;
26 struct nouveau_vma ntfy_vma;
27 struct nouveau_mm heap;
28};
29
30struct nouveau_abi16 {
31 struct nouveau_object *client;
32 struct nouveau_object *device;
33 struct list_head channels;
34 u64 handles;
35};
36
37struct nouveau_drm;
38struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
39int nouveau_abi16_put(struct nouveau_abi16 *, int);
40void nouveau_abi16_fini(struct nouveau_abi16 *);
41u16 nouveau_abi16_swclass(struct nouveau_drm *);
42
43#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
44#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
45
14struct drm_nouveau_channel_alloc { 46struct drm_nouveau_channel_alloc {
15 uint32_t fb_ctxdma_handle; 47 uint32_t fb_ctxdma_handle;
16 uint32_t tt_ctxdma_handle; 48 uint32_t tt_ctxdma_handle;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 778cd149f7cd..83686ef75d04 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -13,7 +13,6 @@
13#include "drm_crtc_helper.h" 13#include "drm_crtc_helper.h"
14#include "nouveau_drv.h" 14#include "nouveau_drv.h"
15#include <nouveau_drm.h> 15#include <nouveau_drm.h>
16#include "nv50_display.h"
17#include "nouveau_connector.h" 16#include "nouveau_connector.h"
18 17
19#include <linux/vga_switcheroo.h> 18#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c3e66ae04c83..3465df327227 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,66 +27,57 @@
27 * Jeremy Kolb <jkolb@brandeis.edu> 27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */ 28 */
29 29
30#include "drmP.h" 30#include <core/engine.h>
31#include "ttm/ttm_page_alloc.h"
32 31
33#include <nouveau_drm.h> 32#include <subdev/fb.h>
34#include "nouveau_drv.h" 33#include <subdev/vm.h>
34#include <subdev/bar.h>
35
36#include "nouveau_drm.h"
35#include "nouveau_dma.h" 37#include "nouveau_dma.h"
36#include <core/mm.h>
37#include "nouveau_fence.h" 38#include "nouveau_fence.h"
38#include <core/ramht.h>
39#include <engine/fifo.h>
40 39
41#include <linux/log2.h> 40#include "nouveau_bo.h"
42#include <linux/slab.h> 41#include "nouveau_ttm.h"
42#include "nouveau_gem.h"
43 43
44/* 44/*
45 * NV10-NV40 tiling helpers 45 * NV10-NV40 tiling helpers
46 */ 46 */
47 47
48static void 48static void
49nv10_bo_update_tile_region(struct drm_device *dev, 49nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
50 struct nouveau_tile_reg *tilereg, uint32_t addr, 50 u32 addr, u32 size, u32 pitch, u32 flags)
51 uint32_t size, uint32_t pitch, uint32_t flags)
52{ 51{
53 struct drm_nouveau_private *dev_priv = dev->dev_private; 52 struct nouveau_drm *drm = nouveau_newpriv(dev);
54 int i = tilereg - dev_priv->tile.reg, j; 53 int i = reg - drm->tile.reg;
55 struct nouveau_fb_tile *tile = nvfb_tile(dev, i); 54 struct nouveau_fb *pfb = nouveau_fb(drm->device);
56 unsigned long save; 55 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
56 struct nouveau_engine *engine;
57 57
58 nouveau_fence_unref(&tilereg->fence); 58 nouveau_fence_unref(&reg->fence);
59 59
60 if (tile->pitch) 60 if (tile->pitch)
61 nvfb_tile_fini(dev, i); 61 pfb->tile.fini(pfb, i, tile);
62 62
63 if (pitch) 63 if (pitch)
64 nvfb_tile_init(dev, i, addr, size, pitch, flags); 64 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
65
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
68 nv04_fifo_cache_pull(dev, false);
69 65
70 nouveau_wait_for_idle(dev); 66 pfb->tile.prog(pfb, i, tile);
71
72 nvfb_tile_prog(dev, i);
73 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
74 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
75 dev_priv->eng[j]->set_tile_region(dev, i);
76 }
77 67
78 nv04_fifo_cache_pull(dev, true); 68 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
79 nv_wr32(dev, NV03_PFIFO_CACHES, 1); 69 engine->tile_prog(engine, i);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); 70 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
71 engine->tile_prog(engine, i);
81} 72}
82 73
83static struct nouveau_tile_reg * 74static struct nouveau_drm_tile *
84nv10_bo_get_tile_region(struct drm_device *dev, int i) 75nv10_bo_get_tile_region(struct drm_device *dev, int i)
85{ 76{
86 struct drm_nouveau_private *dev_priv = dev->dev_private; 77 struct nouveau_drm *drm = nouveau_newpriv(dev);
87 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 78 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
88 79
89 spin_lock(&dev_priv->tile.lock); 80 spin_lock(&drm->tile.lock);
90 81
91 if (!tile->used && 82 if (!tile->used &&
92 (!tile->fence || nouveau_fence_done(tile->fence))) 83 (!tile->fence || nouveau_fence_done(tile->fence)))
@@ -94,18 +85,18 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
94 else 85 else
95 tile = NULL; 86 tile = NULL;
96 87
97 spin_unlock(&dev_priv->tile.lock); 88 spin_unlock(&drm->tile.lock);
98 return tile; 89 return tile;
99} 90}
100 91
101static void 92static void
102nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, 93nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
103 struct nouveau_fence *fence) 94 struct nouveau_fence *fence)
104{ 95{
105 struct drm_nouveau_private *dev_priv = dev->dev_private; 96 struct nouveau_drm *drm = nouveau_newpriv(dev);
106 97
107 if (tile) { 98 if (tile) {
108 spin_lock(&dev_priv->tile.lock); 99 spin_lock(&drm->tile.lock);
109 if (fence) { 100 if (fence) {
110 /* Mark it as pending. */ 101 /* Mark it as pending. */
111 tile->fence = fence; 102 tile->fence = fence;
@@ -113,25 +104,27 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
113 } 104 }
114 105
115 tile->used = false; 106 tile->used = false;
116 spin_unlock(&dev_priv->tile.lock); 107 spin_unlock(&drm->tile.lock);
117 } 108 }
118} 109}
119 110
120static struct nouveau_tile_reg * 111static struct nouveau_drm_tile *
121nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, 112nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
122 uint32_t pitch, uint32_t flags) 113 u32 size, u32 pitch, u32 flags)
123{ 114{
124 struct nouveau_tile_reg *tile, *found = NULL; 115 struct nouveau_drm *drm = nouveau_newpriv(dev);
116 struct nouveau_fb *pfb = nouveau_fb(drm->device);
117 struct nouveau_drm_tile *tile, *found = NULL;
125 int i; 118 int i;
126 119
127 for (i = 0; i < nvfb_tile_nr(dev); i++) { 120 for (i = 0; i < pfb->tile.regions; i++) {
128 tile = nv10_bo_get_tile_region(dev, i); 121 tile = nv10_bo_get_tile_region(dev, i);
129 122
130 if (pitch && !found) { 123 if (pitch && !found) {
131 found = tile; 124 found = tile;
132 continue; 125 continue;
133 126
134 } else if (tile && nvfb_tile(dev, i)->pitch) { 127 } else if (tile && pfb->tile.region[i].pitch) {
135 /* Kill an unused tile region. */ 128 /* Kill an unused tile region. */
136 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); 129 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
137 } 130 }
@@ -148,13 +141,12 @@ nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
148static void 141static void
149nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 142nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
150{ 143{
151 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 144 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
152 struct drm_device *dev = dev_priv->dev; 145 struct drm_device *dev = drm->dev;
153 struct nouveau_bo *nvbo = nouveau_bo(bo); 146 struct nouveau_bo *nvbo = nouveau_bo(bo);
154 147
155 if (unlikely(nvbo->gem)) 148 if (unlikely(nvbo->gem))
156 DRM_ERROR("bo %p still attached to GEM object\n", bo); 149 DRM_ERROR("bo %p still attached to GEM object\n", bo);
157
158 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 150 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
159 kfree(nvbo); 151 kfree(nvbo);
160} 152}
@@ -163,23 +155,24 @@ static void
163nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, 155nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
164 int *align, int *size) 156 int *align, int *size)
165{ 157{
166 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 158 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159 struct nouveau_device *device = nv_device(drm->device);
167 160
168 if (dev_priv->card_type < NV_50) { 161 if (device->card_type < NV_50) {
169 if (nvbo->tile_mode) { 162 if (nvbo->tile_mode) {
170 if (dev_priv->chipset >= 0x40) { 163 if (device->chipset >= 0x40) {
171 *align = 65536; 164 *align = 65536;
172 *size = roundup(*size, 64 * nvbo->tile_mode); 165 *size = roundup(*size, 64 * nvbo->tile_mode);
173 166
174 } else if (dev_priv->chipset >= 0x30) { 167 } else if (device->chipset >= 0x30) {
175 *align = 32768; 168 *align = 32768;
176 *size = roundup(*size, 64 * nvbo->tile_mode); 169 *size = roundup(*size, 64 * nvbo->tile_mode);
177 170
178 } else if (dev_priv->chipset >= 0x20) { 171 } else if (device->chipset >= 0x20) {
179 *align = 16384; 172 *align = 16384;
180 *size = roundup(*size, 64 * nvbo->tile_mode); 173 *size = roundup(*size, 64 * nvbo->tile_mode);
181 174
182 } else if (dev_priv->chipset >= 0x10) { 175 } else if (device->chipset >= 0x10) {
183 *align = 16384; 176 *align = 16384;
184 *size = roundup(*size, 32 * nvbo->tile_mode); 177 *size = roundup(*size, 32 * nvbo->tile_mode);
185 } 178 }
@@ -198,7 +191,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
198 struct sg_table *sg, 191 struct sg_table *sg,
199 struct nouveau_bo **pnvbo) 192 struct nouveau_bo **pnvbo)
200{ 193{
201 struct drm_nouveau_private *dev_priv = dev->dev_private; 194 struct nouveau_drm *drm = nouveau_newpriv(dev);
202 struct nouveau_bo *nvbo; 195 struct nouveau_bo *nvbo;
203 size_t acc_size; 196 size_t acc_size;
204 int ret; 197 int ret;
@@ -215,22 +208,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
215 INIT_LIST_HEAD(&nvbo->vma_list); 208 INIT_LIST_HEAD(&nvbo->vma_list);
216 nvbo->tile_mode = tile_mode; 209 nvbo->tile_mode = tile_mode;
217 nvbo->tile_flags = tile_flags; 210 nvbo->tile_flags = tile_flags;
218 nvbo->bo.bdev = &dev_priv->ttm.bdev; 211 nvbo->bo.bdev = &drm->ttm.bdev;
219 212
220 nvbo->page_shift = 12; 213 nvbo->page_shift = 12;
221 if (dev_priv->chan_vm) { 214 if (drm->client.base.vm) {
222 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) 215 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
223 nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm); 216 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
224 } 217 }
225 218
226 nouveau_bo_fixup_align(nvbo, flags, &align, &size); 219 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
227 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 220 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
228 nouveau_bo_placement_set(nvbo, flags, 0); 221 nouveau_bo_placement_set(nvbo, flags, 0);
229 222
230 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size, 223 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
231 sizeof(struct nouveau_bo)); 224 sizeof(struct nouveau_bo));
232 225
233 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
234 type, &nvbo->placement, 227 type, &nvbo->placement,
235 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, 228 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
236 nouveau_bo_del_ttm); 229 nouveau_bo_del_ttm);
@@ -259,10 +252,11 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
259static void 252static void
260set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 253set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
261{ 254{
262 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 255 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
263 int vram_pages = nvfb_vram_size(dev_priv->dev) >> PAGE_SHIFT; 256 struct nouveau_fb *pfb = nouveau_fb(drm->device);
257 u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
264 258
265 if (dev_priv->card_type == NV_10 && 259 if (nv_device(drm->device)->card_type == NV_10 &&
266 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 260 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
267 nvbo->bo.mem.num_pages < vram_pages / 4) { 261 nvbo->bo.mem.num_pages < vram_pages / 4) {
268 /* 262 /*
@@ -302,13 +296,12 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
302int 296int
303nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) 297nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
304{ 298{
305 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 299 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
306 struct ttm_buffer_object *bo = &nvbo->bo; 300 struct ttm_buffer_object *bo = &nvbo->bo;
307 int ret; 301 int ret;
308 302
309 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 303 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
310 NV_ERROR(nouveau_bdev(bo->bdev)->dev, 304 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
311 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
312 1 << bo->mem.mem_type, memtype); 305 1 << bo->mem.mem_type, memtype);
313 return -EINVAL; 306 return -EINVAL;
314 } 307 }
@@ -326,10 +319,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
326 if (ret == 0) { 319 if (ret == 0) {
327 switch (bo->mem.mem_type) { 320 switch (bo->mem.mem_type) {
328 case TTM_PL_VRAM: 321 case TTM_PL_VRAM:
329 dev_priv->fb_aper_free -= bo->mem.size; 322 drm->gem.vram_available -= bo->mem.size;
330 break; 323 break;
331 case TTM_PL_TT: 324 case TTM_PL_TT:
332 dev_priv->gart_info.aper_free -= bo->mem.size; 325 drm->gem.gart_available -= bo->mem.size;
333 break; 326 break;
334 default: 327 default:
335 break; 328 break;
@@ -345,7 +338,7 @@ out:
345int 338int
346nouveau_bo_unpin(struct nouveau_bo *nvbo) 339nouveau_bo_unpin(struct nouveau_bo *nvbo)
347{ 340{
348 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 341 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
349 struct ttm_buffer_object *bo = &nvbo->bo; 342 struct ttm_buffer_object *bo = &nvbo->bo;
350 int ret; 343 int ret;
351 344
@@ -362,10 +355,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
362 if (ret == 0) { 355 if (ret == 0) {
363 switch (bo->mem.mem_type) { 356 switch (bo->mem.mem_type) {
364 case TTM_PL_VRAM: 357 case TTM_PL_VRAM:
365 dev_priv->fb_aper_free += bo->mem.size; 358 drm->gem.vram_available += bo->mem.size;
366 break; 359 break;
367 case TTM_PL_TT: 360 case TTM_PL_TT:
368 dev_priv->gart_info.aper_free += bo->mem.size; 361 drm->gem.gart_available += bo->mem.size;
369 break; 362 break;
370 default: 363 default:
371 break; 364 break;
@@ -460,30 +453,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
460} 453}
461 454
462static struct ttm_tt * 455static struct ttm_tt *
463nouveau_ttm_tt_create(struct ttm_bo_device *bdev, 456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
464 unsigned long size, uint32_t page_flags, 457 uint32_t page_flags, struct page *dummy_read)
465 struct page *dummy_read_page)
466{ 458{
467 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 459 struct nouveau_drm *drm = nouveau_bdev(bdev);
468 struct drm_device *dev = dev_priv->dev; 460 struct drm_device *dev = drm->dev;
469 461
470 switch (dev_priv->gart_info.type) { 462 if (drm->agp.stat == ENABLED) {
471#if __OS_HAS_AGP 463 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
472 case NOUVEAU_GART_AGP: 464 page_flags, dummy_read);
473 return ttm_agp_tt_create(bdev, dev->agp->bridge,
474 size, page_flags, dummy_read_page);
475#endif
476 case NOUVEAU_GART_PDMA:
477 case NOUVEAU_GART_HW:
478 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
479 dummy_read_page);
480 default:
481 NV_ERROR(dev, "Unknown GART type %d\n",
482 dev_priv->gart_info.type);
483 break;
484 } 465 }
485 466
486 return NULL; 467 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
487} 468}
488 469
489static int 470static int
@@ -497,8 +478,7 @@ static int
497nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 478nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
498 struct ttm_mem_type_manager *man) 479 struct ttm_mem_type_manager *man)
499{ 480{
500 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 481 struct nouveau_drm *drm = nouveau_bdev(bdev);
501 struct drm_device *dev = dev_priv->dev;
502 482
503 switch (type) { 483 switch (type) {
504 case TTM_PL_SYSTEM: 484 case TTM_PL_SYSTEM:
@@ -507,7 +487,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
507 man->default_caching = TTM_PL_FLAG_CACHED; 487 man->default_caching = TTM_PL_FLAG_CACHED;
508 break; 488 break;
509 case TTM_PL_VRAM: 489 case TTM_PL_VRAM:
510 if (dev_priv->card_type >= NV_50) { 490 if (nv_device(drm->device)->card_type >= NV_50) {
511 man->func = &nouveau_vram_manager; 491 man->func = &nouveau_vram_manager;
512 man->io_reserve_fastpath = false; 492 man->io_reserve_fastpath = false;
513 man->use_io_reserve_lru = true; 493 man->use_io_reserve_lru = true;
@@ -521,35 +501,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
521 man->default_caching = TTM_PL_FLAG_WC; 501 man->default_caching = TTM_PL_FLAG_WC;
522 break; 502 break;
523 case TTM_PL_TT: 503 case TTM_PL_TT:
524 if (dev_priv->card_type >= NV_50) 504 if (nv_device(drm->device)->card_type >= NV_50)
525 man->func = &nouveau_gart_manager; 505 man->func = &nouveau_gart_manager;
526 else 506 else
527 if (dev_priv->gart_info.type != NOUVEAU_GART_AGP) 507 if (drm->agp.stat != ENABLED)
528 man->func = &nv04_gart_manager; 508 man->func = &nv04_gart_manager;
529 else 509 else
530 man->func = &ttm_bo_manager_func; 510 man->func = &ttm_bo_manager_func;
531 switch (dev_priv->gart_info.type) { 511
532 case NOUVEAU_GART_AGP: 512 if (drm->agp.stat == ENABLED) {
533 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 513 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
534 man->available_caching = TTM_PL_FLAG_UNCACHED | 514 man->available_caching = TTM_PL_FLAG_UNCACHED |
535 TTM_PL_FLAG_WC; 515 TTM_PL_FLAG_WC;
536 man->default_caching = TTM_PL_FLAG_WC; 516 man->default_caching = TTM_PL_FLAG_WC;
537 break; 517 } else {
538 case NOUVEAU_GART_PDMA:
539 case NOUVEAU_GART_HW:
540 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 518 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
541 TTM_MEMTYPE_FLAG_CMA; 519 TTM_MEMTYPE_FLAG_CMA;
542 man->available_caching = TTM_PL_MASK_CACHING; 520 man->available_caching = TTM_PL_MASK_CACHING;
543 man->default_caching = TTM_PL_FLAG_CACHED; 521 man->default_caching = TTM_PL_FLAG_CACHED;
544 break;
545 default:
546 NV_ERROR(dev, "Unknown GART type: %d\n",
547 dev_priv->gart_info.type);
548 return -EINVAL;
549 } 522 }
523
550 break; 524 break;
551 default: 525 default:
552 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
553 return -EINVAL; 526 return -EINVAL;
554 } 527 }
555 return 0; 528 return 0;
@@ -783,20 +756,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
783static int 756static int
784nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) 757nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
785{ 758{
786 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 759 int ret = RING_SPACE(chan, 6);
787 &chan->m2mf_ntfy);
788 if (ret == 0) { 760 if (ret == 0) {
789 ret = RING_SPACE(chan, 6); 761 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
790 if (ret == 0) { 762 OUT_RING (chan, handle);
791 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 763 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
792 OUT_RING (chan, handle); 764 OUT_RING (chan, NvNotify0);
793 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); 765 OUT_RING (chan, NvDmaFB);
794 OUT_RING (chan, NvNotify0); 766 OUT_RING (chan, NvDmaFB);
795 OUT_RING (chan, NvDmaFB);
796 OUT_RING (chan, NvDmaFB);
797 } else {
798 nouveau_ramht_remove(chan, NvNotify0);
799 }
800 } 767 }
801 768
802 return ret; 769 return ret;
@@ -895,16 +862,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
895static int 862static int
896nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) 863nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
897{ 864{
898 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 865 int ret = RING_SPACE(chan, 4);
899 &chan->m2mf_ntfy);
900 if (ret == 0) { 866 if (ret == 0) {
901 ret = RING_SPACE(chan, 4); 867 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
902 if (ret == 0) { 868 OUT_RING (chan, handle);
903 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 869 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
904 OUT_RING (chan, handle); 870 OUT_RING (chan, NvNotify0);
905 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
906 OUT_RING (chan, NvNotify0);
907 }
908 } 871 }
909 872
910 return ret; 873 return ret;
@@ -915,8 +878,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
915 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 878 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
916{ 879{
917 if (mem->mem_type == TTM_PL_TT) 880 if (mem->mem_type == TTM_PL_TT)
918 return chan->gart_handle; 881 return NvDmaTT;
919 return chan->vram_handle; 882 return NvDmaFB;
920} 883}
921 884
922static int 885static int
@@ -972,8 +935,9 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
972 struct nouveau_mem *node = mem->mm_node; 935 struct nouveau_mem *node = mem->mm_node;
973 int ret; 936 int ret;
974 937
975 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, 938 ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
976 node->page_shift, NV_MEM_ACCESS_RO, vma); 939 PAGE_SHIFT, node->page_shift,
940 NV_MEM_ACCESS_RW, vma);
977 if (ret) 941 if (ret)
978 return ret; 942 return ret;
979 943
@@ -990,19 +954,19 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
990 bool no_wait_reserve, bool no_wait_gpu, 954 bool no_wait_reserve, bool no_wait_gpu,
991 struct ttm_mem_reg *new_mem) 955 struct ttm_mem_reg *new_mem)
992{ 956{
993 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 957 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
994 struct nouveau_channel *chan = chan = dev_priv->channel; 958 struct nouveau_channel *chan = chan = drm->channel;
995 struct nouveau_bo *nvbo = nouveau_bo(bo); 959 struct nouveau_bo *nvbo = nouveau_bo(bo);
996 struct ttm_mem_reg *old_mem = &bo->mem; 960 struct ttm_mem_reg *old_mem = &bo->mem;
997 int ret; 961 int ret;
998 962
999 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); 963 mutex_lock(&chan->cli->mutex);
1000 964
1001 /* create temporary vmas for the transfer and attach them to the 965 /* create temporary vmas for the transfer and attach them to the
1002 * old nouveau_mem node, these will get cleaned up after ttm has 966 * old nouveau_mem node, these will get cleaned up after ttm has
1003 * destroyed the ttm_mem_reg 967 * destroyed the ttm_mem_reg
1004 */ 968 */
1005 if (dev_priv->card_type >= NV_50) { 969 if (nv_device(drm->device)->card_type >= NV_50) {
1006 struct nouveau_mem *node = old_mem->mm_node; 970 struct nouveau_mem *node = old_mem->mm_node;
1007 971
1008 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); 972 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
@@ -1014,7 +978,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1014 goto out; 978 goto out;
1015 } 979 }
1016 980
1017 ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem); 981 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1018 if (ret == 0) { 982 if (ret == 0) {
1019 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 983 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
1020 no_wait_reserve, 984 no_wait_reserve,
@@ -1022,14 +986,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1022 } 986 }
1023 987
1024out: 988out:
1025 mutex_unlock(&chan->mutex); 989 mutex_unlock(&chan->cli->mutex);
1026 return ret; 990 return ret;
1027} 991}
1028 992
1029void 993void
1030nouveau_bo_move_init(struct nouveau_channel *chan) 994nouveau_bo_move_init(struct nouveau_channel *chan)
1031{ 995{
1032 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 996 struct nouveau_cli *cli = chan->cli;
997 struct nouveau_drm *drm = chan->drm;
1033 static const struct { 998 static const struct {
1034 const char *name; 999 const char *name;
1035 int engine; 1000 int engine;
@@ -1054,19 +1019,26 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
1054 int ret; 1019 int ret;
1055 1020
1056 do { 1021 do {
1022 struct nouveau_object *object;
1057 u32 handle = (mthd->engine << 16) | mthd->oclass; 1023 u32 handle = (mthd->engine << 16) | mthd->oclass;
1058 ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass); 1024
1025 ret = nouveau_object_new(nv_object(cli), chan->handle, handle,
1026 mthd->oclass, NULL, 0, &object);
1059 if (ret == 0) { 1027 if (ret == 0) {
1060 ret = mthd->init(chan, handle); 1028 ret = mthd->init(chan, handle);
1061 if (ret == 0) { 1029 if (ret) {
1062 dev_priv->ttm.move = mthd->exec; 1030 nouveau_object_del(nv_object(cli),
1063 name = mthd->name; 1031 chan->handle, handle);
1064 break; 1032 continue;
1065 } 1033 }
1034
1035 drm->ttm.move = mthd->exec;
1036 name = mthd->name;
1037 break;
1066 } 1038 }
1067 } while ((++mthd)->exec); 1039 } while ((++mthd)->exec);
1068 1040
1069 NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name); 1041 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1070} 1042}
1071 1043
1072static int 1044static int
@@ -1151,7 +1123,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1151 nouveau_vm_map(vma, new_mem->mm_node); 1123 nouveau_vm_map(vma, new_mem->mm_node);
1152 } else 1124 } else
1153 if (new_mem && new_mem->mem_type == TTM_PL_TT && 1125 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1154 nvbo->page_shift == nvvm_spg_shift(vma->vm)) { 1126 nvbo->page_shift == vma->vm->vmm->spg_shift) {
1155 if (((struct nouveau_mem *)new_mem->mm_node)->sg) 1127 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1156 nouveau_vm_map_sg_table(vma, 0, new_mem-> 1128 nouveau_vm_map_sg_table(vma, 0, new_mem->
1157 num_pages << PAGE_SHIFT, 1129 num_pages << PAGE_SHIFT,
@@ -1168,10 +1140,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1168 1140
1169static int 1141static int
1170nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, 1142nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1171 struct nouveau_tile_reg **new_tile) 1143 struct nouveau_drm_tile **new_tile)
1172{ 1144{
1173 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1145 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1174 struct drm_device *dev = dev_priv->dev; 1146 struct drm_device *dev = drm->dev;
1175 struct nouveau_bo *nvbo = nouveau_bo(bo); 1147 struct nouveau_bo *nvbo = nouveau_bo(bo);
1176 u64 offset = new_mem->start << PAGE_SHIFT; 1148 u64 offset = new_mem->start << PAGE_SHIFT;
1177 1149
@@ -1179,7 +1151,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1179 if (new_mem->mem_type != TTM_PL_VRAM) 1151 if (new_mem->mem_type != TTM_PL_VRAM)
1180 return 0; 1152 return 0;
1181 1153
1182 if (dev_priv->card_type >= NV_10) { 1154 if (nv_device(drm->device)->card_type >= NV_10) {
1183 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, 1155 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1184 nvbo->tile_mode, 1156 nvbo->tile_mode,
1185 nvbo->tile_flags); 1157 nvbo->tile_flags);
@@ -1190,11 +1162,11 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1190 1162
1191static void 1163static void
1192nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, 1164nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1193 struct nouveau_tile_reg *new_tile, 1165 struct nouveau_drm_tile *new_tile,
1194 struct nouveau_tile_reg **old_tile) 1166 struct nouveau_drm_tile **old_tile)
1195{ 1167{
1196 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1168 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1197 struct drm_device *dev = dev_priv->dev; 1169 struct drm_device *dev = drm->dev;
1198 1170
1199 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); 1171 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1200 *old_tile = new_tile; 1172 *old_tile = new_tile;
@@ -1205,13 +1177,13 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1205 bool no_wait_reserve, bool no_wait_gpu, 1177 bool no_wait_reserve, bool no_wait_gpu,
1206 struct ttm_mem_reg *new_mem) 1178 struct ttm_mem_reg *new_mem)
1207{ 1179{
1208 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1180 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1209 struct nouveau_bo *nvbo = nouveau_bo(bo); 1181 struct nouveau_bo *nvbo = nouveau_bo(bo);
1210 struct ttm_mem_reg *old_mem = &bo->mem; 1182 struct ttm_mem_reg *old_mem = &bo->mem;
1211 struct nouveau_tile_reg *new_tile = NULL; 1183 struct nouveau_drm_tile *new_tile = NULL;
1212 int ret = 0; 1184 int ret = 0;
1213 1185
1214 if (dev_priv->card_type < NV_50) { 1186 if (nv_device(drm->device)->card_type < NV_50) {
1215 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 1187 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1216 if (ret) 1188 if (ret)
1217 return ret; 1189 return ret;
@@ -1226,7 +1198,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1226 } 1198 }
1227 1199
1228 /* CPU copy if we have no accelerated method available */ 1200 /* CPU copy if we have no accelerated method available */
1229 if (!dev_priv->ttm.move) { 1201 if (!drm->ttm.move) {
1230 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1202 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1231 goto out; 1203 goto out;
1232 } 1204 }
@@ -1246,7 +1218,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1246 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1218 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1247 1219
1248out: 1220out:
1249 if (dev_priv->card_type < NV_50) { 1221 if (nv_device(drm->device)->card_type < NV_50) {
1250 if (ret) 1222 if (ret)
1251 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1223 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1252 else 1224 else
@@ -1266,8 +1238,8 @@ static int
1266nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1238nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1267{ 1239{
1268 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1240 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1269 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1241 struct nouveau_drm *drm = nouveau_bdev(bdev);
1270 struct drm_device *dev = dev_priv->dev; 1242 struct drm_device *dev = drm->dev;
1271 int ret; 1243 int ret;
1272 1244
1273 mem->bus.addr = NULL; 1245 mem->bus.addr = NULL;
@@ -1283,9 +1255,9 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1283 return 0; 1255 return 0;
1284 case TTM_PL_TT: 1256 case TTM_PL_TT:
1285#if __OS_HAS_AGP 1257#if __OS_HAS_AGP
1286 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1258 if (drm->agp.stat == ENABLED) {
1287 mem->bus.offset = mem->start << PAGE_SHIFT; 1259 mem->bus.offset = mem->start << PAGE_SHIFT;
1288 mem->bus.base = dev_priv->gart_info.aper_base; 1260 mem->bus.base = drm->agp.base;
1289 mem->bus.is_iomem = true; 1261 mem->bus.is_iomem = true;
1290 } 1262 }
1291#endif 1263#endif
@@ -1294,10 +1266,11 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1294 mem->bus.offset = mem->start << PAGE_SHIFT; 1266 mem->bus.offset = mem->start << PAGE_SHIFT;
1295 mem->bus.base = pci_resource_start(dev->pdev, 1); 1267 mem->bus.base = pci_resource_start(dev->pdev, 1);
1296 mem->bus.is_iomem = true; 1268 mem->bus.is_iomem = true;
1297 if (dev_priv->card_type >= NV_50) { 1269 if (nv_device(drm->device)->card_type >= NV_50) {
1270 struct nouveau_bar *bar = nouveau_bar(drm->device);
1298 struct nouveau_mem *node = mem->mm_node; 1271 struct nouveau_mem *node = mem->mm_node;
1299 1272
1300 ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW, 1273 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1301 &node->bar_vma); 1274 &node->bar_vma);
1302 if (ret) 1275 if (ret)
1303 return ret; 1276 return ret;
@@ -1314,40 +1287,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1314static void 1287static void
1315nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1288nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1316{ 1289{
1317 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1290 struct nouveau_drm *drm = nouveau_bdev(bdev);
1291 struct nouveau_bar *bar = nouveau_bar(drm->device);
1318 struct nouveau_mem *node = mem->mm_node; 1292 struct nouveau_mem *node = mem->mm_node;
1319 1293
1320 if (mem->mem_type != TTM_PL_VRAM)
1321 return;
1322
1323 if (!node->bar_vma.node) 1294 if (!node->bar_vma.node)
1324 return; 1295 return;
1325 1296
1326 nvbar_unmap(dev_priv->dev, &node->bar_vma); 1297 bar->unmap(bar, &node->bar_vma);
1327} 1298}
1328 1299
1329static int 1300static int
1330nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 1301nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1331{ 1302{
1332 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1303 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1333 struct nouveau_bo *nvbo = nouveau_bo(bo); 1304 struct nouveau_bo *nvbo = nouveau_bo(bo);
1305 struct nouveau_device *device = nv_device(drm->device);
1306 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
1334 1307
1335 /* as long as the bo isn't in vram, and isn't tiled, we've got 1308 /* as long as the bo isn't in vram, and isn't tiled, we've got
1336 * nothing to do here. 1309 * nothing to do here.
1337 */ 1310 */
1338 if (bo->mem.mem_type != TTM_PL_VRAM) { 1311 if (bo->mem.mem_type != TTM_PL_VRAM) {
1339 if (dev_priv->card_type < NV_50 || 1312 if (nv_device(drm->device)->card_type < NV_50 ||
1340 !nouveau_bo_tile_layout(nvbo)) 1313 !nouveau_bo_tile_layout(nvbo))
1341 return 0; 1314 return 0;
1342 } 1315 }
1343 1316
1344 /* make sure bo is in mappable vram */ 1317 /* make sure bo is in mappable vram */
1345 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) 1318 if (bo->mem.start + bo->mem.num_pages < mappable)
1346 return 0; 1319 return 0;
1347 1320
1348 1321
1349 nvbo->placement.fpfn = 0; 1322 nvbo->placement.fpfn = 0;
1350 nvbo->placement.lpfn = dev_priv->fb_mappable_pages; 1323 nvbo->placement.lpfn = mappable;
1351 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1324 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1352 return nouveau_bo_validate(nvbo, false, true, false); 1325 return nouveau_bo_validate(nvbo, false, true, false);
1353} 1326}
@@ -1356,7 +1329,7 @@ static int
1356nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1329nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1357{ 1330{
1358 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1331 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1359 struct drm_nouveau_private *dev_priv; 1332 struct nouveau_drm *drm;
1360 struct drm_device *dev; 1333 struct drm_device *dev;
1361 unsigned i; 1334 unsigned i;
1362 int r; 1335 int r;
@@ -1373,11 +1346,11 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1373 return 0; 1346 return 0;
1374 } 1347 }
1375 1348
1376 dev_priv = nouveau_bdev(ttm->bdev); 1349 drm = nouveau_bdev(ttm->bdev);
1377 dev = dev_priv->dev; 1350 dev = drm->dev;
1378 1351
1379#if __OS_HAS_AGP 1352#if __OS_HAS_AGP
1380 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1353 if (drm->agp.stat == ENABLED) {
1381 return ttm_agp_tt_populate(ttm); 1354 return ttm_agp_tt_populate(ttm);
1382 } 1355 }
1383#endif 1356#endif
@@ -1414,7 +1387,7 @@ static void
1414nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) 1387nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1415{ 1388{
1416 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1389 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1417 struct drm_nouveau_private *dev_priv; 1390 struct nouveau_drm *drm;
1418 struct drm_device *dev; 1391 struct drm_device *dev;
1419 unsigned i; 1392 unsigned i;
1420 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1393 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1422,11 +1395,11 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1422 if (slave) 1395 if (slave)
1423 return; 1396 return;
1424 1397
1425 dev_priv = nouveau_bdev(ttm->bdev); 1398 drm = nouveau_bdev(ttm->bdev);
1426 dev = dev_priv->dev; 1399 dev = drm->dev;
1427 1400
1428#if __OS_HAS_AGP 1401#if __OS_HAS_AGP
1429 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1402 if (drm->agp.stat == ENABLED) {
1430 ttm_agp_tt_unpopulate(ttm); 1403 ttm_agp_tt_unpopulate(ttm);
1431 return; 1404 return;
1432 } 1405 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index a0a889cbf5ca..c42aea9fb546 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -2,13 +2,9 @@
2#define __NOUVEAU_BO_H__ 2#define __NOUVEAU_BO_H__
3 3
4struct nouveau_channel; 4struct nouveau_channel;
5struct nouveau_fence;
5struct nouveau_vma; 6struct nouveau_vma;
6 7
7struct nouveau_tile_reg {
8 bool used;
9 struct nouveau_fence *fence;
10};
11
12struct nouveau_bo { 8struct nouveau_bo {
13 struct ttm_buffer_object bo; 9 struct ttm_buffer_object bo;
14 struct ttm_placement placement; 10 struct ttm_placement placement;
@@ -29,7 +25,7 @@ struct nouveau_bo {
29 25
30 u32 tile_mode; 26 u32 tile_mode;
31 u32 tile_flags; 27 u32 tile_flags;
32 struct nouveau_tile_reg *tile; 28 struct nouveau_drm_tile *tile;
33 29
34 struct drm_gem_object *gem; 30 struct drm_gem_object *gem;
35 int pin_refcnt; 31 int pin_refcnt;
@@ -89,4 +85,15 @@ int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
89 struct nouveau_vma *); 85 struct nouveau_vma *);
90void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); 86void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
91 87
88/* TODO: submit equivalent to TTM generic API upstream? */
89static inline void __iomem *
90nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
91{
92 bool is_iomem;
93 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
94 &nvbo->kmap, &is_iomem);
95 WARN_ON_ONCE(ioptr && !is_iomem);
96 return ioptr;
97}
98
92#endif 99#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
new file mode 100644
index 000000000000..3dd5f712b98c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -0,0 +1,387 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/device.h>
28#include <core/class.h>
29
30#include <subdev/fb.h>
31#include <subdev/vm.h>
32#include <subdev/instmem.h>
33
34#include <engine/software.h>
35
36#include "nouveau_drm.h"
37#include "nouveau_dma.h"
38#include "nouveau_bo.h"
39#include "nouveau_chan.h"
40#include "nouveau_fence.h"
41#include "nouveau_abi16.h"
42
43MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
44static int nouveau_vram_pushbuf;
45module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
46
47int
48nouveau_channel_idle(struct nouveau_channel *chan)
49{
50 struct nouveau_drm *drm = chan->drm;
51 struct nouveau_fence *fence = NULL;
52 int ret;
53
54 ret = nouveau_fence_new(chan, &fence);
55 if (!ret) {
56 ret = nouveau_fence_wait(fence, false, false);
57 nouveau_fence_unref(&fence);
58 }
59
60 if (ret)
61 NV_ERROR(drm, "failed to idle channel 0x%08x\n", chan->handle);
62 return ret;
63}
64
65void
66nouveau_channel_del(struct nouveau_channel **pchan)
67{
68 struct nouveau_channel *chan = *pchan;
69 if (chan) {
70 struct nouveau_object *client = nv_object(chan->cli);
71 if (chan->fence) {
72 nouveau_channel_idle(chan);
73 nouveau_fence(chan->drm)->context_del(chan);
74 }
75 nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
76 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
77 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
78 nouveau_bo_unmap(chan->push.buffer);
79 nouveau_bo_ref(NULL, &chan->push.buffer);
80 kfree(chan);
81 }
82 *pchan = NULL;
83}
84
85static int
86nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
87 u32 parent, u32 handle, u32 size,
88 struct nouveau_channel **pchan)
89{
90 struct nouveau_device *device = nv_device(drm->device);
91 struct nouveau_instmem *imem = nouveau_instmem(device);
92 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
93 struct nouveau_fb *pfb = nouveau_fb(device);
94 struct nouveau_client *client = &cli->base;
95 struct nv_dma_class args = {};
96 struct nouveau_channel *chan;
97 struct nouveau_object *push;
98 u32 target;
99 int ret;
100
101 chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
102 if (!chan)
103 return -ENOMEM;
104
105 chan->cli = cli;
106 chan->drm = drm;
107 chan->handle = handle;
108
109 /* allocate memory for dma push buffer */
110 target = TTM_PL_FLAG_TT;
111 if (nouveau_vram_pushbuf)
112 target = TTM_PL_FLAG_VRAM;
113
114 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
115 &chan->push.buffer);
116 if (ret == 0) {
117 ret = nouveau_bo_pin(chan->push.buffer, target);
118 if (ret == 0)
119 ret = nouveau_bo_map(chan->push.buffer);
120 }
121
122 if (ret) {
123 nouveau_channel_del(pchan);
124 return ret;
125 }
126
127 /* create dma object covering the *entire* memory space that the
128 * pushbuf lives in, this is because the GEM code requires that
129 * we be able to call out to other (indirect) push buffers
130 */
131 chan->push.vma.offset = chan->push.buffer->bo.offset;
132 chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
133
134 if (device->card_type >= NV_50) {
135 ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
136 &chan->push.vma);
137 if (ret) {
138 nouveau_channel_del(pchan);
139 return ret;
140 }
141
142 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
143 args.start = 0;
144 args.limit = client->vm->vmm->limit - 1;
145 } else
146 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
147 u64 limit = pfb->ram.size - imem->reserved - 1;
148 if (device->card_type == NV_04) {
149 /* nv04 vram pushbuf hack, retarget to its location in
150 * the framebuffer bar rather than direct vram access..
151 * nfi why this exists, it came from the -nv ddx.
152 */
153 args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
154 args.start = pci_resource_start(device->pdev, 1);
155 args.limit = args.start + limit;
156 } else {
157 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
158 args.start = 0;
159 args.limit = limit;
160 }
161 } else {
162 if (chan->drm->agp.stat == ENABLED) {
163 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
164 args.start = chan->drm->agp.base;
165 args.limit = chan->drm->agp.base +
166 chan->drm->agp.size - 1;
167 } else {
168 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
169 args.start = 0;
170 args.limit = vmm->limit - 1;
171 }
172 }
173
174 ret = nouveau_object_new(nv_object(chan->cli), parent,
175 chan->push.handle, 0x0002,
176 &args, sizeof(args), &push);
177 if (ret) {
178 nouveau_channel_del(pchan);
179 return ret;
180 }
181
182 return 0;
183}
184
185int
186nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
187 u32 parent, u32 handle, struct nouveau_channel **pchan)
188{
189 static const u16 oclasses[] = { 0xa06f, 0x906f, 0x826f, 0x506f, 0 };
190 const u16 *oclass = oclasses;
191 struct nv_channel_ind_class args;
192 struct nouveau_channel *chan;
193 int ret;
194
195 /* allocate dma push buffer */
196 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
197 *pchan = chan;
198 if (ret)
199 return ret;
200
201 /* create channel object */
202 args.pushbuf = chan->push.handle;
203 args.ioffset = 0x10000 + chan->push.vma.offset;
204 args.ilength = 0x02000;
205
206 do {
207 ret = nouveau_object_new(nv_object(cli), parent, handle,
208 *oclass++, &args, sizeof(args),
209 &chan->object);
210 if (ret == 0)
211 return ret;
212 } while (*oclass);
213
214 nouveau_channel_del(pchan);
215 return ret;
216}
217
218static int
219nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
220 u32 parent, u32 handle, struct nouveau_channel **pchan)
221{
222 static const u16 oclasses[] = { 0x006e, 0 };
223 const u16 *oclass = oclasses;
224 struct nv_channel_dma_class args;
225 struct nouveau_channel *chan;
226 int ret;
227
228 /* allocate dma push buffer */
229 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
230 *pchan = chan;
231 if (ret)
232 return ret;
233
234 /* create channel object */
235 args.pushbuf = chan->push.handle;
236 args.offset = chan->push.vma.offset;
237
238 do {
239 ret = nouveau_object_new(nv_object(cli), parent, handle,
240 *oclass++, &args, sizeof(args),
241 &chan->object);
242 if (ret == 0)
243 return ret;
244 } while (ret && *oclass);
245
246 nouveau_channel_del(pchan);
247 return ret;
248}
249
250static int
251nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
252{
253 struct nouveau_client *client = nv_client(chan->cli);
254 struct nouveau_device *device = nv_device(chan->drm->device);
255 struct nouveau_instmem *imem = nouveau_instmem(device);
256 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
257 struct nouveau_fb *pfb = nouveau_fb(device);
258 struct nouveau_software_chan *swch;
259 struct nouveau_object *object;
260 struct nv_dma_class args;
261 int ret, i;
262
263 chan->vram = vram;
264 chan->gart = gart;
265
266 /* allocate dma objects to cover all allowed vram, and gart */
267 if (device->card_type < NV_C0) {
268 if (device->card_type >= NV_50) {
269 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
270 args.start = 0;
271 args.limit = client->vm->vmm->limit - 1;
272 } else {
273 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
274 args.start = 0;
275 args.limit = pfb->ram.size - imem->reserved - 1;
276 }
277
278 ret = nouveau_object_new(nv_object(client), chan->handle, vram,
279 0x003d, &args, sizeof(args), &object);
280 if (ret)
281 return ret;
282
283 if (device->card_type >= NV_50) {
284 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
285 args.start = 0;
286 args.limit = client->vm->vmm->limit - 1;
287 } else
288 if (chan->drm->agp.stat == ENABLED) {
289 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
290 args.start = chan->drm->agp.base;
291 args.limit = chan->drm->agp.base +
292 chan->drm->agp.size - 1;
293 } else {
294 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
295 args.start = 0;
296 args.limit = vmm->limit - 1;
297 }
298
299 ret = nouveau_object_new(nv_object(client), chan->handle, gart,
300 0x003d, &args, sizeof(args), &object);
301 if (ret)
302 return ret;
303 }
304
305 /* initialise dma tracking parameters */
306 switch (nv_hclass(chan->object) & 0xffff) {
307 case 0x006e:
308 chan->user_put = 0x40;
309 chan->user_get = 0x44;
310 chan->dma.max = (0x10000 / 4) - 2;
311 break;
312 default:
313 chan->user_put = 0x40;
314 chan->user_get = 0x44;
315 chan->user_get_hi = 0x60;
316 chan->dma.ib_base = 0x10000 / 4;
317 chan->dma.ib_max = (0x02000 / 8) - 1;
318 chan->dma.ib_put = 0;
319 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
320 chan->dma.max = chan->dma.ib_base;
321 break;
322 }
323
324 chan->dma.put = 0;
325 chan->dma.cur = chan->dma.put;
326 chan->dma.free = chan->dma.max - chan->dma.cur;
327
328 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
329 if (ret)
330 return ret;
331
332 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
333 OUT_RING(chan, 0x00000000);
334
335 /* allocate software object class (used for fences on <= nv05, and
336 * to signal flip completion), bind it to a subchannel.
337 */
338 ret = nouveau_object_new(nv_object(client), chan->handle,
339 NvSw, nouveau_abi16_swclass(chan->drm),
340 NULL, 0, &object);
341 if (ret)
342 return ret;
343
344 swch = (void *)object->parent;
345 swch->flip = nouveau_flip_complete;
346 swch->flip_data = chan;
347
348 if (device->card_type < NV_C0) {
349 ret = RING_SPACE(chan, 2);
350 if (ret)
351 return ret;
352
353 BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
354 OUT_RING (chan, NvSw);
355 FIRE_RING (chan);
356 }
357
358 /* initialise synchronisation */
359 return nouveau_fence(chan->drm)->context_new(chan);
360}
361
362int
363nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
364 u32 parent, u32 handle, u32 vram, u32 gart,
365 struct nouveau_channel **pchan)
366{
367 int ret;
368
369 ret = nouveau_channel_ind(drm, cli, parent, handle, pchan);
370 if (ret) {
371 NV_DEBUG(drm, "ib channel create, %d\n", ret);
372 ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
373 if (ret) {
374 NV_DEBUG(drm, "dma channel create, %d\n", ret);
375 return ret;
376 }
377 }
378
379 ret = nouveau_channel_init(*pchan, vram, gart);
380 if (ret) {
381 NV_ERROR(drm, "channel failed to initialise, %d\n", ret);
382 nouveau_channel_del(pchan);
383 return ret;
384 }
385
386 return 0;
387}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
new file mode 100644
index 000000000000..0fa94244bed1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -0,0 +1,47 @@
1#ifndef __NOUVEAU_CHAN_H__
2#define __NOUVEAU_CHAN_H__
3
4struct nouveau_cli;
5
6struct nouveau_channel {
7 struct nouveau_cli *cli;
8 struct nouveau_drm *drm;
9
10 u32 handle;
11 u32 vram;
12 u32 gart;
13
14 struct {
15 struct nouveau_bo *buffer;
16 struct nouveau_vma vma;
17 u32 handle;
18 } push;
19
20 /* TODO: this will be reworked in the near future */
21 bool accel_done;
22 void *fence;
23 struct {
24 int max;
25 int free;
26 int cur;
27 int put;
28 int ib_base;
29 int ib_max;
30 int ib_free;
31 int ib_put;
32 } dma;
33 u32 user_get_hi;
34 u32 user_get;
35 u32 user_put;
36
37 struct nouveau_object *object;
38};
39
40
41int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
42 u32 parent, u32 handle, u32 vram, u32 gart,
43 struct nouveau_channel **);
44void nouveau_channel_del(struct nouveau_channel **);
45int nouveau_channel_idle(struct nouveau_channel *);
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
deleted file mode 100644
index 285fde8ed3e3..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ /dev/null
@@ -1,408 +0,0 @@
1/*
2 * Copyright 2005-2006 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drv.h"
28#include <nouveau_drm.h>
29#include "nouveau_dma.h"
30#include <engine/fifo.h>
31#include <core/ramht.h>
32#include "nouveau_fence.h"
33#include "nouveau_software.h"
34
35MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
36int nouveau_vram_pushbuf;
37module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
38
39static int
40nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
41{
42 u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
43 struct drm_device *dev = chan->dev;
44 struct drm_nouveau_private *dev_priv = dev->dev_private;
45 int ret;
46
47 /* allocate buffer object */
48 ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
49 if (ret)
50 goto out;
51
52 ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
53 if (ret)
54 goto out;
55
56 ret = nouveau_bo_map(chan->pushbuf_bo);
57 if (ret)
58 goto out;
59
60 /* create DMA object covering the entire memtype where the push
61 * buffer resides, userspace can submit its own push buffers from
62 * anywhere within the same memtype.
63 */
64 chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
65 if (dev_priv->card_type >= NV_50) {
66 ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
67 &chan->pushbuf_vma);
68 if (ret)
69 goto out;
70
71 if (dev_priv->card_type < NV_C0) {
72 ret = nouveau_gpuobj_dma_new(chan,
73 NV_CLASS_DMA_IN_MEMORY, 0,
74 (1ULL << 40),
75 NV_MEM_ACCESS_RO,
76 NV_MEM_TARGET_VM,
77 &chan->pushbuf);
78 }
79 chan->pushbuf_base = chan->pushbuf_vma.offset;
80 } else
81 if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
82 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
83 dev_priv->gart_info.aper_size,
84 NV_MEM_ACCESS_RO,
85 NV_MEM_TARGET_GART,
86 &chan->pushbuf);
87 } else
88 if (dev_priv->card_type != NV_04) {
89 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
90 dev_priv->fb_available_size,
91 NV_MEM_ACCESS_RO,
92 NV_MEM_TARGET_VRAM,
93 &chan->pushbuf);
94 } else {
95 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
96 * exact reason for existing :) PCI access to cmdbuf in
97 * VRAM.
98 */
99 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
100 pci_resource_start(dev->pdev, 1),
101 dev_priv->fb_available_size,
102 NV_MEM_ACCESS_RO,
103 NV_MEM_TARGET_PCI,
104 &chan->pushbuf);
105 }
106
107out:
108 if (ret) {
109 NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
110 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
111 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
112 if (chan->pushbuf_bo) {
113 nouveau_bo_unmap(chan->pushbuf_bo);
114 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
115 }
116 }
117
118 return 0;
119}
120
121/* allocates and initializes a fifo for user space consumption */
122int
123nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
124 struct drm_file *file_priv,
125 uint32_t vram_handle, uint32_t gart_handle)
126{
127 struct drm_nouveau_private *dev_priv = dev->dev_private;
128 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
129 struct nouveau_fence_priv *fence = dev_priv->fence.func;
130 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
131 struct nouveau_channel *chan;
132 unsigned long flags;
133 int ret, i;
134
135 /* allocate and lock channel structure */
136 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
137 if (!chan)
138 return -ENOMEM;
139 chan->dev = dev;
140 chan->file_priv = file_priv;
141 chan->vram_handle = vram_handle;
142 chan->gart_handle = gart_handle;
143
144 kref_init(&chan->ref);
145 atomic_set(&chan->users, 1);
146 mutex_init(&chan->mutex);
147 mutex_lock(&chan->mutex);
148
149 /* allocate hw channel id */
150 spin_lock_irqsave(&dev_priv->channels.lock, flags);
151 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
152 if ( dev_priv->card_type == NV_50 && chan->id == 0)
153 continue;
154
155 if (!dev_priv->channels.ptr[chan->id]) {
156 nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
157 break;
158 }
159 }
160 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
161
162 if (chan->id == pfifo->channels) {
163 mutex_unlock(&chan->mutex);
164 kfree(chan);
165 return -ENODEV;
166 }
167
168 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
169
170 /* setup channel's memory and vm */
171 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
172 if (ret) {
173 NV_ERROR(dev, "gpuobj %d\n", ret);
174 nouveau_channel_put(&chan);
175 return ret;
176 }
177
178 /* Allocate space for per-channel fixed notifier memory */
179 ret = nouveau_notifier_init_channel(chan);
180 if (ret) {
181 NV_ERROR(dev, "ntfy %d\n", ret);
182 nouveau_channel_put(&chan);
183 return ret;
184 }
185
186 /* Allocate DMA push buffer */
187 ret = nouveau_channel_pushbuf_init(chan);
188 if (ret) {
189 NV_ERROR(dev, "pushbuf %d\n", ret);
190 nouveau_channel_put(&chan);
191 return ret;
192 }
193
194 nouveau_dma_init(chan);
195 chan->user_put = 0x40;
196 chan->user_get = 0x44;
197 if (dev_priv->card_type >= NV_50)
198 chan->user_get_hi = 0x60;
199
200 /* create fifo context */
201 ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
202 if (ret) {
203 nouveau_channel_put(&chan);
204 return ret;
205 }
206
207 /* Insert NOPs for NOUVEAU_DMA_SKIPS */
208 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
209 if (ret) {
210 nouveau_channel_put(&chan);
211 return ret;
212 }
213
214 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
215 OUT_RING (chan, 0x00000000);
216
217 ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
218 if (ret) {
219 nouveau_channel_put(&chan);
220 return ret;
221 }
222
223 if (dev_priv->card_type < NV_C0) {
224 ret = RING_SPACE(chan, 2);
225 if (ret) {
226 nouveau_channel_put(&chan);
227 return ret;
228 }
229
230 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
231 OUT_RING (chan, NvSw);
232 FIRE_RING (chan);
233 }
234
235 FIRE_RING(chan);
236
237 ret = fence->context_new(chan);
238 if (ret) {
239 nouveau_channel_put(&chan);
240 return ret;
241 }
242
243 nouveau_debugfs_channel_init(chan);
244
245 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
246 if (fpriv) {
247 spin_lock(&fpriv->lock);
248 list_add(&chan->list, &fpriv->channels);
249 spin_unlock(&fpriv->lock);
250 }
251 *chan_ret = chan;
252 return 0;
253}
254
255struct nouveau_channel *
256nouveau_channel_get_unlocked(struct nouveau_channel *ref)
257{
258 struct nouveau_channel *chan = NULL;
259
260 if (likely(ref && atomic_inc_not_zero(&ref->users)))
261 nouveau_channel_ref(ref, &chan);
262
263 return chan;
264}
265
266struct nouveau_channel *
267nouveau_channel_get(struct drm_file *file_priv, int id)
268{
269 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
270 struct nouveau_channel *chan;
271
272 spin_lock(&fpriv->lock);
273 list_for_each_entry(chan, &fpriv->channels, list) {
274 if (chan->id == id) {
275 chan = nouveau_channel_get_unlocked(chan);
276 spin_unlock(&fpriv->lock);
277 mutex_lock(&chan->mutex);
278 return chan;
279 }
280 }
281 spin_unlock(&fpriv->lock);
282
283 return ERR_PTR(-EINVAL);
284}
285
286void
287nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
288{
289 struct nouveau_channel *chan = *pchan;
290 struct drm_device *dev = chan->dev;
291 struct drm_nouveau_private *dev_priv = dev->dev_private;
292 struct nouveau_fence_priv *fence = dev_priv->fence.func;
293 unsigned long flags;
294 int i;
295
296 /* decrement the refcount, and we're done if there's still refs */
297 if (likely(!atomic_dec_and_test(&chan->users))) {
298 nouveau_channel_ref(NULL, pchan);
299 return;
300 }
301
302 /* no one wants the channel anymore */
303 NV_DEBUG(dev, "freeing channel %d\n", chan->id);
304 nouveau_debugfs_channel_fini(chan);
305
306 /* give it chance to idle */
307 nouveau_channel_idle(chan);
308
309 /* destroy the engine specific contexts */
310 for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
311 if (chan->engctx[i])
312 dev_priv->eng[i]->context_del(chan, i);
313 }
314
315 if (chan->fence)
316 fence->context_del(chan);
317
318 /* aside from its resources, the channel should now be dead,
319 * remove it from the channel list
320 */
321 spin_lock_irqsave(&dev_priv->channels.lock, flags);
322 nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
323 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
324
325 /* destroy any resources the channel owned */
326 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
327 if (chan->pushbuf_bo) {
328 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
329 nouveau_bo_unmap(chan->pushbuf_bo);
330 nouveau_bo_unpin(chan->pushbuf_bo);
331 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
332 }
333 nouveau_ramht_ref(NULL, &chan->ramht, chan);
334 nouveau_notifier_takedown_channel(chan);
335 nouveau_gpuobj_channel_takedown(chan);
336
337 nouveau_channel_ref(NULL, pchan);
338}
339
340void
341nouveau_channel_put(struct nouveau_channel **pchan)
342{
343 mutex_unlock(&(*pchan)->mutex);
344 nouveau_channel_put_unlocked(pchan);
345}
346
347static void
348nouveau_channel_del(struct kref *ref)
349{
350 struct nouveau_channel *chan =
351 container_of(ref, struct nouveau_channel, ref);
352
353 kfree(chan);
354}
355
356void
357nouveau_channel_ref(struct nouveau_channel *chan,
358 struct nouveau_channel **pchan)
359{
360 if (chan)
361 kref_get(&chan->ref);
362
363 if (*pchan)
364 kref_put(&(*pchan)->ref, nouveau_channel_del);
365
366 *pchan = chan;
367}
368
369int
370nouveau_channel_idle(struct nouveau_channel *chan)
371{
372 struct drm_device *dev = chan->dev;
373 struct nouveau_fence *fence = NULL;
374 int ret;
375
376 ret = nouveau_fence_new(chan, &fence);
377 if (!ret) {
378 ret = nouveau_fence_wait(fence, false, false);
379 nouveau_fence_unref(&fence);
380 }
381
382 if (ret)
383 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
384 return ret;
385}
386
387/* cleans up all the fifos from file_priv */
388void
389nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
390{
391 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
392 struct nouveau_channel *chan;
393 int i;
394
395 if (!pfifo)
396 return;
397
398 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
399 for (i = 0; i < pfifo->channels; i++) {
400 chan = nouveau_channel_get(file_priv, i);
401 if (IS_ERR(chan))
402 continue;
403
404 list_del(&chan->list);
405 atomic_dec(&chan->users);
406 nouveau_channel_put(&chan);
407 }
408}
diff --git a/drivers/gpu/drm/nouveau/nouveau_compat.c b/drivers/gpu/drm/nouveau/nouveau_compat.c
index 0403f2b94fa6..3db23496dff6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_compat.c
+++ b/drivers/gpu/drm/nouveau/nouveau_compat.c
@@ -1,4 +1,5 @@
1#include "nouveau_drm.h" 1#include "nouveau_drm.h"
2#include "nouveau_chan.h"
2#include "nouveau_compat.h" 3#include "nouveau_compat.h"
3 4
4#include <subdev/bios.h> 5#include <subdev/bios.h>
@@ -14,8 +15,6 @@
14#include <subdev/bar.h> 15#include <subdev/bar.h>
15#include <subdev/vm.h> 16#include <subdev/vm.h>
16 17
17void *nouveau_newpriv(struct drm_device *);
18
19int 18int
20nvdrm_gart_init(struct drm_device *dev, u64 *base, u64 *size) 19nvdrm_gart_init(struct drm_device *dev, u64 *base, u64 *size)
21{ 20{
@@ -583,3 +582,28 @@ nvvm_lpg_shift(struct nouveau_vm *vm)
583{ 582{
584 return vm->vmm->lpg_shift; 583 return vm->vmm->lpg_shift;
585} 584}
585
586u64 nvgpuobj_addr(struct nouveau_object *object)
587{
588 return nv_gpuobj(object)->addr;
589}
590
591struct drm_device *
592nouveau_drv(void *ptr)
593{
594 struct nouveau_drm *drm = ptr;
595 return drm->dev;
596}
597
598struct nouveau_channel *
599nvdrm_channel(struct drm_device *dev)
600{
601 struct nouveau_drm *drm = nouveau_newpriv(dev);
602 return drm->channel;
603}
604
605struct mutex *
606nvchan_mutex(struct nouveau_channel *chan)
607{
608 return &chan->cli->mutex;
609}
diff --git a/drivers/gpu/drm/nouveau/nouveau_compat.h b/drivers/gpu/drm/nouveau/nouveau_compat.h
index d691b2535c72..9f42d1d0f868 100644
--- a/drivers/gpu/drm/nouveau/nouveau_compat.h
+++ b/drivers/gpu/drm/nouveau/nouveau_compat.h
@@ -124,4 +124,18 @@ nvvm_spg_shift(struct nouveau_vm *);
124int 124int
125nvvm_lpg_shift(struct nouveau_vm *); 125nvvm_lpg_shift(struct nouveau_vm *);
126 126
127u32
128nv50_display_active_crtcs(struct drm_device *dev);
129
130u64 nvgpuobj_addr(struct nouveau_object *object);
131
132struct drm_device *
133nouveau_drv(void *drm);
134
135struct nouveau_channel *
136nvdrm_channel(struct drm_device *dev);
137
138struct mutex *
139nvchan_mutex(struct nouveau_channel *chan);
140
127#endif 141#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
deleted file mode 100644
index 6564b547973e..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Ben Skeggs <bskeggs@redhat.com>
29 */
30
31#include <linux/debugfs.h>
32
33#include "drmP.h"
34#include "nouveau_drv.h"
35
36#include <ttm/ttm_page_alloc.h>
37
38static int
39nouveau_debugfs_channel_info(struct seq_file *m, void *data)
40{
41 struct drm_info_node *node = (struct drm_info_node *) m->private;
42 struct nouveau_channel *chan = node->info_ent->data;
43
44 seq_printf(m, "channel id : %d\n", chan->id);
45
46 seq_printf(m, "cpu fifo state:\n");
47 seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
48 seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
49 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
50 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
51 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
52 if (chan->dma.ib_max) {
53 seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
54 seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
55 seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
56 }
57
58 seq_printf(m, "gpu fifo state:\n");
59 seq_printf(m, " get: 0x%08x\n",
60 nvchan_rd32(chan, chan->user_get));
61 seq_printf(m, " put: 0x%08x\n",
62 nvchan_rd32(chan, chan->user_put));
63 if (chan->dma.ib_max) {
64 seq_printf(m, " ib get: 0x%08x\n",
65 nvchan_rd32(chan, 0x88));
66 seq_printf(m, " ib put: 0x%08x\n",
67 nvchan_rd32(chan, 0x8c));
68 }
69
70 return 0;
71}
72
73int
74nouveau_debugfs_channel_init(struct nouveau_channel *chan)
75{
76 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
77 struct drm_minor *minor = chan->dev->primary;
78 int ret;
79
80 if (!dev_priv->debugfs.channel_root) {
81 dev_priv->debugfs.channel_root =
82 debugfs_create_dir("channel", minor->debugfs_root);
83 if (!dev_priv->debugfs.channel_root)
84 return -ENOENT;
85 }
86
87 snprintf(chan->debugfs.name, 32, "%d", chan->id);
88 chan->debugfs.info.name = chan->debugfs.name;
89 chan->debugfs.info.show = nouveau_debugfs_channel_info;
90 chan->debugfs.info.driver_features = 0;
91 chan->debugfs.info.data = chan;
92
93 ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
94 dev_priv->debugfs.channel_root,
95 chan->dev->primary);
96 if (ret == 0)
97 chan->debugfs.active = true;
98 return ret;
99}
100
101void
102nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
103{
104 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
105
106 if (!chan->debugfs.active)
107 return;
108
109 drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
110 chan->debugfs.active = false;
111
112 if (chan == dev_priv->channel) {
113 debugfs_remove(dev_priv->debugfs.channel_root);
114 dev_priv->debugfs.channel_root = NULL;
115 }
116}
117
118static int
119nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
120{
121 struct drm_info_node *node = (struct drm_info_node *) m->private;
122 struct drm_minor *minor = node->minor;
123 struct drm_device *dev = minor->dev;
124 struct drm_nouveau_private *dev_priv = dev->dev_private;
125 uint32_t ppci_0;
126
127 ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
128
129 seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
130 seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
131 ppci_0 & 0xffff, ppci_0 >> 16);
132 return 0;
133}
134
135static int
136nouveau_debugfs_memory_info(struct seq_file *m, void *data)
137{
138 struct drm_info_node *node = (struct drm_info_node *) m->private;
139 struct drm_minor *minor = node->minor;
140
141 seq_printf(m, "VRAM total: %dKiB\n", (int)(nvfb_vram_size(minor->dev) >> 10));
142 return 0;
143}
144
145static int
146nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
147{
148 struct drm_info_node *node = (struct drm_info_node *) m->private;
149 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
150 int i;
151
152 for (i = 0; i < dev_priv->vbios.length; i++)
153 seq_printf(m, "%c", dev_priv->vbios.data[i]);
154 return 0;
155}
156
157static int
158nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
159{
160 struct drm_info_node *node = (struct drm_info_node *) m->private;
161 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
162 int ret;
163
164 ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
165 if (ret)
166 seq_printf(m, "failed: %d", ret);
167 else
168 seq_printf(m, "succeeded\n");
169 return 0;
170}
171
172static struct drm_info_list nouveau_debugfs_list[] = {
173 { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
174 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
175 { "memory", nouveau_debugfs_memory_info, 0, NULL },
176 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
177 { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
178 { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
179};
180#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
181
182int
183nouveau_debugfs_init(struct drm_minor *minor)
184{
185 drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
186 minor->debugfs_root, minor);
187 return 0;
188}
189
190void
191nouveau_debugfs_takedown(struct drm_minor *minor)
192{
193 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
194 minor);
195}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e0a56b277884..a60a9f51e890 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -33,10 +33,10 @@
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_software.h"
37#include "nouveau_fence.h"
38#include "nv50_display.h" 36#include "nv50_display.h"
39 37
38#include "nouveau_fence.h"
39
40#include <subdev/bios/gpio.h> 40#include <subdev/bios/gpio.h>
41 41
42static void 42static void
@@ -260,6 +260,24 @@ nouveau_display_fini(struct drm_device *dev)
260 disp->fini(dev); 260 disp->fini(dev);
261} 261}
262 262
263static void
264nouveau_display_vblank_notify(void *data, int crtc)
265{
266 drm_handle_vblank(data, crtc);
267}
268
269static void
270nouveau_display_vblank_get(void *data, int crtc)
271{
272 drm_vblank_get(data, crtc);
273}
274
275static void
276nouveau_display_vblank_put(void *data, int crtc)
277{
278 drm_vblank_put(data, crtc);
279}
280
263int 281int
264nouveau_display_create(struct drm_device *dev) 282nouveau_display_create(struct drm_device *dev)
265{ 283{
@@ -365,6 +383,10 @@ nouveau_vblank_enable(struct drm_device *dev, int crtc)
365{ 383{
366 struct drm_nouveau_private *dev_priv = dev->dev_private; 384 struct drm_nouveau_private *dev_priv = dev->dev_private;
367 385
386 if (dev_priv->card_type >= NV_D0)
387 nv_mask(dev, 0x6100c0 + (crtc * 0x800), 1, 1);
388 else
389
368 if (dev_priv->card_type >= NV_50) 390 if (dev_priv->card_type >= NV_50)
369 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, 391 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
370 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc)); 392 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
@@ -380,6 +402,9 @@ nouveau_vblank_disable(struct drm_device *dev, int crtc)
380{ 402{
381 struct drm_nouveau_private *dev_priv = dev->dev_private; 403 struct drm_nouveau_private *dev_priv = dev->dev_private;
382 404
405 if (dev_priv->card_type >= NV_D0)
406 nv_mask(dev, 0x6100c0 + (crtc * 0x800), 1, 0);
407 else
383 if (dev_priv->card_type >= NV_50) 408 if (dev_priv->card_type >= NV_50)
384 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 409 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
385 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0); 410 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
@@ -436,8 +461,8 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
436 struct nouveau_fence **pfence) 461 struct nouveau_fence **pfence)
437{ 462{
438 struct nouveau_fence_chan *fctx = chan->fence; 463 struct nouveau_fence_chan *fctx = chan->fence;
439 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 464 struct drm_device *dev = nouveau_drv(chan->drm);
440 struct drm_device *dev = chan->dev; 465 struct drm_nouveau_private *dev_priv = dev->dev_private;
441 unsigned long flags; 466 unsigned long flags;
442 int ret; 467 int ret;
443 468
@@ -492,7 +517,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
492 struct nouveau_fence *fence; 517 struct nouveau_fence *fence;
493 int ret; 518 int ret;
494 519
495 if (!dev_priv->channel) 520 if (!nvdrm_channel(dev))
496 return -ENODEV; 521 return -ENODEV;
497 522
498 s = kzalloc(sizeof(*s), GFP_KERNEL); 523 s = kzalloc(sizeof(*s), GFP_KERNEL);
@@ -513,10 +538,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
513 /* Choose the channel the flip will be handled in */ 538 /* Choose the channel the flip will be handled in */
514 fence = new_bo->bo.sync_obj; 539 fence = new_bo->bo.sync_obj;
515 if (fence) 540 if (fence)
516 chan = nouveau_channel_get_unlocked(fence->channel); 541 chan = fence->channel;
517 if (!chan) 542 if (!chan)
518 chan = nouveau_channel_get_unlocked(dev_priv->channel); 543 chan = nvdrm_channel(dev);
519 mutex_lock(&chan->mutex); 544 mutex_lock(nvchan_mutex(chan));
520 545
521 /* Emit a page flip */ 546 /* Emit a page flip */
522 if (dev_priv->card_type >= NV_50) { 547 if (dev_priv->card_type >= NV_50) {
@@ -525,13 +550,13 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
525 else 550 else
526 ret = nv50_display_flip_next(crtc, fb, chan); 551 ret = nv50_display_flip_next(crtc, fb, chan);
527 if (ret) { 552 if (ret) {
528 nouveau_channel_put(&chan); 553 mutex_unlock(nvchan_mutex(chan));
529 goto fail_unreserve; 554 goto fail_unreserve;
530 } 555 }
531 } 556 }
532 557
533 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 558 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
534 nouveau_channel_put(&chan); 559 mutex_unlock(nvchan_mutex(chan));
535 if (ret) 560 if (ret)
536 goto fail_unreserve; 561 goto fail_unreserve;
537 562
@@ -554,14 +579,14 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
554 struct nouveau_page_flip_state *ps) 579 struct nouveau_page_flip_state *ps)
555{ 580{
556 struct nouveau_fence_chan *fctx = chan->fence; 581 struct nouveau_fence_chan *fctx = chan->fence;
557 struct drm_device *dev = chan->dev; 582 struct drm_device *dev = nouveau_drv(chan->drm);
558 struct nouveau_page_flip_state *s; 583 struct nouveau_page_flip_state *s;
559 unsigned long flags; 584 unsigned long flags;
560 585
561 spin_lock_irqsave(&dev->event_lock, flags); 586 spin_lock_irqsave(&dev->event_lock, flags);
562 587
563 if (list_empty(&fctx->flip)) { 588 if (list_empty(&fctx->flip)) {
564 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); 589 NV_ERROR(dev, "unexpected pageflip\n");
565 spin_unlock_irqrestore(&dev->event_lock, flags); 590 spin_unlock_irqrestore(&dev->event_lock, flags);
566 return -EINVAL; 591 return -EINVAL;
567 } 592 }
@@ -592,7 +617,7 @@ int
592nouveau_flip_complete(void *data) 617nouveau_flip_complete(void *data)
593{ 618{
594 struct nouveau_channel *chan = data; 619 struct nouveau_channel *chan = data;
595 struct drm_device *dev = chan->dev; 620 struct drm_device *dev = nouveau_drv(chan->drm);
596 struct drm_nouveau_private *dev_priv = dev->dev_private; 621 struct drm_nouveau_private *dev_priv = dev->dev_private;
597 struct nouveau_page_flip_state state; 622 struct nouveau_page_flip_state state;
598 623
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index efd082323405..40f91e1e5842 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,41 +24,16 @@
24 * 24 *
25 */ 25 */
26 26
27#include "drmP.h" 27#include <core/client.h>
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_dma.h"
31#include <core/ramht.h>
32
33void
34nouveau_dma_init(struct nouveau_channel *chan)
35{
36 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
37 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
38
39 if (dev_priv->card_type >= NV_50) {
40 const int ib_size = pushbuf->bo.mem.size / 2;
41
42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
43 chan->dma.ib_max = (ib_size / 8) - 1;
44 chan->dma.ib_put = 0;
45 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
46 28
47 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; 29#include "nouveau_drm.h"
48 } else { 30#include "nouveau_dma.h"
49 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
50 }
51
52 chan->dma.put = 0;
53 chan->dma.cur = chan->dma.put;
54 chan->dma.free = chan->dma.max - chan->dma.cur;
55}
56 31
57void 32void
58OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) 33OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
59{ 34{
60 bool is_iomem; 35 bool is_iomem;
61 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem); 36 u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
62 mem = &mem[chan->dma.cur]; 37 mem = &mem[chan->dma.cur];
63 if (is_iomem) 38 if (is_iomem)
64 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); 39 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
@@ -79,9 +54,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
79{ 54{
80 uint64_t val; 55 uint64_t val;
81 56
82 val = nvchan_rd32(chan, chan->user_get); 57 val = nv_ro32(chan->object, chan->user_get);
83 if (chan->user_get_hi) 58 if (chan->user_get_hi)
84 val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32; 59 val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
85 60
86 /* reset counter as long as GET is still advancing, this is 61 /* reset counter as long as GET is still advancing, this is
87 * to avoid misdetecting a GPU lockup if the GPU happens to 62 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -93,32 +68,33 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
93 } 68 }
94 69
95 if ((++*timeout & 0xff) == 0) { 70 if ((++*timeout & 0xff) == 0) {
96 DRM_UDELAY(1); 71 udelay(1);
97 if (*timeout > 100000) 72 if (*timeout > 100000)
98 return -EBUSY; 73 return -EBUSY;
99 } 74 }
100 75
101 if (val < chan->pushbuf_base || 76 if (val < chan->push.vma.offset ||
102 val > chan->pushbuf_base + (chan->dma.max << 2)) 77 val > chan->push.vma.offset + (chan->dma.max << 2))
103 return -EINVAL; 78 return -EINVAL;
104 79
105 return (val - chan->pushbuf_base) >> 2; 80 return (val - chan->push.vma.offset) >> 2;
106} 81}
107 82
108void 83void
109nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 84nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
110 int delta, int length) 85 int delta, int length)
111{ 86{
112 struct nouveau_bo *pb = chan->pushbuf_bo; 87 struct nouveau_bo *pb = chan->push.buffer;
113 struct nouveau_vma *vma; 88 struct nouveau_vma *vma;
114 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 89 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
115 u64 offset; 90 u64 offset;
116 91
117 vma = nouveau_bo_vma_find(bo, chan->vm); 92 vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
118 BUG_ON(!vma); 93 BUG_ON(!vma);
119 offset = vma->offset + delta; 94 offset = vma->offset + delta;
120 95
121 BUG_ON(chan->dma.ib_free < 1); 96 BUG_ON(chan->dma.ib_free < 1);
97
122 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 98 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
123 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); 99 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
124 100
@@ -128,7 +104,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
128 /* Flush writes. */ 104 /* Flush writes. */
129 nouveau_bo_rd32(pb, 0); 105 nouveau_bo_rd32(pb, 0);
130 106
131 nvchan_wr32(chan, 0x8c, chan->dma.ib_put); 107 nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
132 chan->dma.ib_free--; 108 chan->dma.ib_free--;
133} 109}
134 110
@@ -138,7 +114,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
138 uint32_t cnt = 0, prev_get = 0; 114 uint32_t cnt = 0, prev_get = 0;
139 115
140 while (chan->dma.ib_free < count) { 116 while (chan->dma.ib_free < count) {
141 uint32_t get = nvchan_rd32(chan, 0x88); 117 uint32_t get = nv_ro32(chan->object, 0x88);
142 if (get != prev_get) { 118 if (get != prev_get) {
143 prev_get = get; 119 prev_get = get;
144 cnt = 0; 120 cnt = 0;
@@ -249,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
249 * instruct the GPU to jump back to the start right 225 * instruct the GPU to jump back to the start right
250 * after processing the currently pending commands. 226 * after processing the currently pending commands.
251 */ 227 */
252 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 228 OUT_RING(chan, chan->push.vma.offset | 0x20000000);
253 229
254 /* wait for GET to depart from the skips area. 230 /* wait for GET to depart from the skips area.
255 * prevents writing GET==PUT and causing a race 231 * prevents writing GET==PUT and causing a race
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8db68be9544f..5c2e22932d1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -27,10 +27,10 @@
27#ifndef __NOUVEAU_DMA_H__ 27#ifndef __NOUVEAU_DMA_H__
28#define __NOUVEAU_DMA_H__ 28#define __NOUVEAU_DMA_H__
29 29
30#ifndef NOUVEAU_DMA_DEBUG 30#include "nouveau_bo.h"
31#define NOUVEAU_DMA_DEBUG 0 31#include "nouveau_chan.h"
32#endif
33 32
33int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, 34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int length); 35 int delta, int length);
36 36
@@ -116,12 +116,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
116static inline void 116static inline void
117OUT_RING(struct nouveau_channel *chan, int data) 117OUT_RING(struct nouveau_channel *chan, int data)
118{ 118{
119 if (NOUVEAU_DMA_DEBUG) { 119 nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
120 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
121 chan->id, chan->dma.cur << 2, data);
122 }
123
124 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
125} 120}
126 121
127extern void 122extern void
@@ -159,24 +154,19 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
159 154
160#define WRITE_PUT(val) do { \ 155#define WRITE_PUT(val) do { \
161 DRM_MEMORYBARRIER(); \ 156 DRM_MEMORYBARRIER(); \
162 nouveau_bo_rd32(chan->pushbuf_bo, 0); \ 157 nouveau_bo_rd32(chan->push.buffer, 0); \
163 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \ 158 nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
164} while (0) 159} while (0)
165 160
166static inline void 161static inline void
167FIRE_RING(struct nouveau_channel *chan) 162FIRE_RING(struct nouveau_channel *chan)
168{ 163{
169 if (NOUVEAU_DMA_DEBUG) {
170 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
171 chan->id, chan->dma.cur << 2);
172 }
173
174 if (chan->dma.cur == chan->dma.put) 164 if (chan->dma.cur == chan->dma.put)
175 return; 165 return;
176 chan->accel_done = true; 166 chan->accel_done = true;
177 167
178 if (chan->dma.ib_max) { 168 if (chan->dma.ib_max) {
179 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2, 169 nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2,
180 (chan->dma.cur - chan->dma.put) << 2); 170 (chan->dma.cur - chan->dma.put) << 2);
181 } else { 171 } else {
182 WRITE_PUT(chan->dma.cur); 172 WRITE_PUT(chan->dma.cur);
@@ -191,4 +181,31 @@ WIND_RING(struct nouveau_channel *chan)
191 chan->dma.cur = chan->dma.put; 181 chan->dma.cur = chan->dma.put;
192} 182}
193 183
184/* FIFO methods */
185#define NV01_SUBCHAN_OBJECT 0x00000000
186#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010
187#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014
188#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018
189#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c
190#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
191#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
192#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
193#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
194#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
195#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
196#define NV10_SUBCHAN_REF_CNT 0x00000050
197#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
198#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
199#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
200#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
201#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c
202#define NV40_SUBCHAN_YIELD 0x00000080
203
204/* NV_SW object class */
205#define NV_SW_DMA_VBLSEM 0x0000018c
206#define NV_SW_VBLSEM_OFFSET 0x00000400
207#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
208#define NV_SW_VBLSEM_RELEASE 0x00000408
209#define NV_SW_PAGE_FLIP 0x00000500
210
194#endif 211#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3b4e65d5122b..92ecf50a39d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -27,12 +27,20 @@
27 27
28#include <core/device.h> 28#include <core/device.h>
29#include <core/client.h> 29#include <core/client.h>
30#include <core/gpuobj.h>
30#include <core/class.h> 31#include <core/class.h>
31 32
32#include <subdev/device.h> 33#include <subdev/device.h>
34#include <subdev/vm.h>
33 35
34#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_dma.h"
35#include "nouveau_agp.h" 38#include "nouveau_agp.h"
39#include "nouveau_abi16.h"
40#include "nouveau_fbcon.h"
41#include "nouveau_fence.h"
42
43#include "nouveau_ttm.h"
36 44
37int __devinit nouveau_pci_probe(struct pci_dev *, const struct pci_device_id *); 45int __devinit nouveau_pci_probe(struct pci_dev *, const struct pci_device_id *);
38void nouveau_pci_remove(struct pci_dev *); 46void nouveau_pci_remove(struct pci_dev *);
@@ -43,7 +51,6 @@ void __exit nouveau_exit(struct pci_driver *);
43 51
44int nouveau_load(struct drm_device *, unsigned long); 52int nouveau_load(struct drm_device *, unsigned long);
45int nouveau_unload(struct drm_device *); 53int nouveau_unload(struct drm_device *);
46void *nouveau_newpriv(struct drm_device *);
47 54
48MODULE_PARM_DESC(config, "option string to pass to driver core"); 55MODULE_PARM_DESC(config, "option string to pass to driver core");
49static char *nouveau_config; 56static char *nouveau_config;
@@ -53,6 +60,10 @@ MODULE_PARM_DESC(debug, "debug string to pass to driver core");
53static char *nouveau_debug; 60static char *nouveau_debug;
54module_param_named(debug, nouveau_debug, charp, 0400); 61module_param_named(debug, nouveau_debug, charp, 0400);
55 62
63MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
64static int nouveau_noaccel = 0;
65module_param_named(noaccel, nouveau_noaccel, int, 0400);
66
56static u64 67static u64
57nouveau_name(struct pci_dev *pdev) 68nouveau_name(struct pci_dev *pdev)
58{ 69{
@@ -82,17 +93,112 @@ static void
82nouveau_cli_destroy(struct nouveau_cli *cli) 93nouveau_cli_destroy(struct nouveau_cli *cli)
83{ 94{
84 struct nouveau_object *client = nv_object(cli); 95 struct nouveau_object *client = nv_object(cli);
96 nouveau_vm_ref(NULL, &cli->base.vm, NULL);
85 nouveau_client_fini(&cli->base, false); 97 nouveau_client_fini(&cli->base, false);
86 atomic_set(&client->refcount, 1); 98 atomic_set(&client->refcount, 1);
87 nouveau_object_ref(NULL, &client); 99 nouveau_object_ref(NULL, &client);
88} 100}
89 101
102static void
103nouveau_accel_fini(struct nouveau_drm *drm)
104{
105 nouveau_gpuobj_ref(NULL, &drm->notify);
106 nouveau_channel_del(&drm->channel);
107 if (drm->fence)
108 nouveau_fence(drm)->dtor(drm);
109}
110
111static void
112nouveau_accel_init(struct nouveau_drm *drm)
113{
114 struct nouveau_device *device = nv_device(drm->device);
115 struct nouveau_object *object;
116 int ret;
117
118 if (nouveau_noaccel)
119 return;
120
121 /* initialise synchronisation routines */
122 if (device->card_type < NV_10) ret = nv04_fence_create(drm);
123 else if (device->chipset < 0x84) ret = nv10_fence_create(drm);
124 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
125 else ret = nvc0_fence_create(drm);
126 if (ret) {
127 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
128 nouveau_accel_fini(drm);
129 return;
130 }
131
132 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
133 NvDmaFB, NvDmaTT, &drm->channel);
134 if (ret) {
135 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
136 nouveau_accel_fini(drm);
137 return;
138 }
139
140 if (device->card_type < NV_C0) {
141 ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
142 &drm->notify);
143 if (ret) {
144 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
145 nouveau_accel_fini(drm);
146 return;
147 }
148
149 ret = nouveau_object_new(nv_object(drm),
150 drm->channel->handle, NvNotify0,
151 0x003d, &(struct nv_dma_class) {
152 .flags = NV_DMA_TARGET_VRAM |
153 NV_DMA_ACCESS_RDWR,
154 .start = drm->notify->addr,
155 .limit = drm->notify->addr + 31
156 }, sizeof(struct nv_dma_class),
157 &object);
158 if (ret) {
159 nouveau_accel_fini(drm);
160 return;
161 }
162 }
163
164
165 nouveau_bo_move_init(drm->channel);
166}
167
90static int __devinit 168static int __devinit
91nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent) 169nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
92{ 170{
93 struct nouveau_device *device; 171 struct nouveau_device *device;
172 struct apertures_struct *aper;
173 bool boot = false;
94 int ret; 174 int ret;
95 175
176 /* remove conflicting drivers (vesafb, efifb etc) */
177 aper = alloc_apertures(3);
178 if (!aper)
179 return -ENOMEM;
180
181 aper->ranges[0].base = pci_resource_start(pdev, 1);
182 aper->ranges[0].size = pci_resource_len(pdev, 1);
183 aper->count = 1;
184
185 if (pci_resource_len(pdev, 2)) {
186 aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
187 aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
188 aper->count++;
189 }
190
191 if (pci_resource_len(pdev, 3)) {
192 aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
193 aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
194 aper->count++;
195 }
196
197#ifdef CONFIG_X86
198 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
199#endif
200 remove_conflicting_framebuffers(aper, "nouveaufb", boot);
201
96 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev), 202 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
97 nouveau_config, nouveau_debug, &device); 203 nouveau_config, nouveau_debug, &device);
98 if (ret) 204 if (ret)
@@ -102,7 +208,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
102 208
103 ret = nouveau_pci_probe(pdev, pent); 209 ret = nouveau_pci_probe(pdev, pent);
104 if (ret) { 210 if (ret) {
105 nouveau_device_destroy(&device); 211 nouveau_object_ref(NULL, (struct nouveau_object **)&device);
106 return ret; 212 return ret;
107 } 213 }
108 214
@@ -113,6 +219,7 @@ int
113nouveau_drm_load(struct drm_device *dev, unsigned long flags) 219nouveau_drm_load(struct drm_device *dev, unsigned long flags)
114{ 220{
115 struct pci_dev *pdev = dev->pdev; 221 struct pci_dev *pdev = dev->pdev;
222 struct nouveau_device *device;
116 struct nouveau_drm *drm; 223 struct nouveau_drm *drm;
117 int ret; 224 int ret;
118 225
@@ -122,6 +229,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
122 return ret; 229 return ret;
123 230
124 INIT_LIST_HEAD(&drm->clients); 231 INIT_LIST_HEAD(&drm->clients);
232 spin_lock_init(&drm->tile.lock);
125 drm->dev = dev; 233 drm->dev = dev;
126 234
127 /* make sure AGP controller is in a consistent state before we 235 /* make sure AGP controller is in a consistent state before we
@@ -142,7 +250,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
142 }, sizeof(struct nv_device_class), 250 }, sizeof(struct nv_device_class),
143 &drm->device); 251 &drm->device);
144 if (ret) 252 if (ret)
145 return ret; 253 goto fail_device;
146 254
147 nouveau_agp_reset(drm); 255 nouveau_agp_reset(drm);
148 nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE); 256 nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE);
@@ -158,15 +266,32 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
158 if (ret) 266 if (ret)
159 goto fail_device; 267 goto fail_device;
160 268
269 device = nv_device(drm->device);
270
161 /* initialise AGP */ 271 /* initialise AGP */
162 nouveau_agp_init(drm); 272 nouveau_agp_init(drm);
163 273
164 ret = nouveau_load(dev, flags); 274 if (device->card_type >= NV_50) {
275 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
276 0x1000, &drm->client.base.vm);
277 if (ret)
278 goto fail_device;
279 }
280
281 ret = nouveau_ttm_init(drm);
165 if (ret) 282 if (ret)
166 goto fail_device; 283 goto fail_device;
167 284
285 ret = nouveau_load(dev, flags);
286 if (ret)
287 goto fail_load;
288
289 nouveau_accel_init(drm);
290 nouveau_fbcon_init(dev);
168 return 0; 291 return 0;
169 292
293fail_load:
294 nouveau_ttm_fini(drm);
170fail_device: 295fail_device:
171 nouveau_cli_destroy(&drm->client); 296 nouveau_cli_destroy(&drm->client);
172 return ret; 297 return ret;
@@ -179,10 +304,14 @@ nouveau_drm_unload(struct drm_device *dev)
179 struct pci_dev *pdev = dev->pdev; 304 struct pci_dev *pdev = dev->pdev;
180 int ret; 305 int ret;
181 306
307 nouveau_fbcon_fini(dev);
308 nouveau_accel_fini(drm);
309
182 ret = nouveau_unload(dev); 310 ret = nouveau_unload(dev);
183 if (ret) 311 if (ret)
184 return ret; 312 return ret;
185 313
314 nouveau_ttm_fini(drm);
186 nouveau_agp_fini(drm); 315 nouveau_agp_fini(drm);
187 316
188 pci_set_drvdata(pdev, drm->client.base.device); 317 pci_set_drvdata(pdev, drm->client.base.device);
@@ -193,10 +322,11 @@ nouveau_drm_unload(struct drm_device *dev)
193static void 322static void
194nouveau_drm_remove(struct pci_dev *pdev) 323nouveau_drm_remove(struct pci_dev *pdev)
195{ 324{
196 struct nouveau_device *device; 325 struct nouveau_object *device;
197 nouveau_pci_remove(pdev); 326 nouveau_pci_remove(pdev);
198 device = pci_get_drvdata(pdev); 327 device = pci_get_drvdata(pdev);
199 nouveau_device_destroy(&device); 328 nouveau_object_ref(NULL, &device);
329 nouveau_object_debug();
200} 330}
201 331
202int 332int
@@ -211,10 +341,23 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
211 pm_state.event == PM_EVENT_PRETHAW) 341 pm_state.event == PM_EVENT_PRETHAW)
212 return 0; 342 return 0;
213 343
344 NV_INFO(drm, "suspending fbcon...\n");
345 nouveau_fbcon_set_suspend(dev, 1);
346
347 NV_INFO(drm, "suspending drm...\n");
214 ret = nouveau_pci_suspend(pdev, pm_state); 348 ret = nouveau_pci_suspend(pdev, pm_state);
215 if (ret) 349 if (ret)
216 return ret; 350 return ret;
217 351
352 NV_INFO(drm, "evicting buffers...\n");
353 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
354
355 if (drm->fence && nouveau_fence(drm)->suspend) {
356 if (!nouveau_fence(drm)->suspend(drm))
357 return -ENOMEM;
358 }
359
360 NV_INFO(drm, "suspending client object trees...\n");
218 list_for_each_entry(cli, &drm->clients, head) { 361 list_for_each_entry(cli, &drm->clients, head) {
219 ret = nouveau_client_fini(&cli->base, true); 362 ret = nouveau_client_fini(&cli->base, true);
220 if (ret) 363 if (ret)
@@ -255,6 +398,7 @@ nouveau_drm_resume(struct pci_dev *pdev)
255 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 398 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
256 return 0; 399 return 0;
257 400
401 NV_INFO(drm, "re-enabling device...\n");
258 pci_set_power_state(pdev, PCI_D0); 402 pci_set_power_state(pdev, PCI_D0);
259 pci_restore_state(pdev); 403 pci_restore_state(pdev);
260 ret = pci_enable_device(pdev); 404 ret = pci_enable_device(pdev);
@@ -264,17 +408,70 @@ nouveau_drm_resume(struct pci_dev *pdev)
264 408
265 nouveau_agp_reset(drm); 409 nouveau_agp_reset(drm);
266 410
411 NV_INFO(drm, "resuming client object trees...\n");
267 nouveau_client_init(&drm->client.base); 412 nouveau_client_init(&drm->client.base);
413 nouveau_agp_init(drm);
268 414
269 list_for_each_entry(cli, &drm->clients, head) { 415 list_for_each_entry(cli, &drm->clients, head) {
270 nouveau_client_init(&cli->base); 416 nouveau_client_init(&cli->base);
271 } 417 }
272 418
273 nouveau_agp_init(drm); 419 if (drm->fence && nouveau_fence(drm)->resume)
420 nouveau_fence(drm)->resume(drm);
274 421
275 return nouveau_pci_resume(pdev); 422 return nouveau_pci_resume(pdev);
276} 423}
277 424
425int
426nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
427{
428 struct pci_dev *pdev = dev->pdev;
429 struct nouveau_drm *drm = nouveau_drm(dev);
430 struct nouveau_cli *cli;
431 int ret;
432
433 ret = nouveau_cli_create(pdev, fpriv->pid, sizeof(*cli), (void **)&cli);
434 if (ret)
435 return ret;
436
437 if (nv_device(drm->device)->card_type >= NV_50) {
438 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
439 0x1000, &cli->base.vm);
440 if (ret) {
441 nouveau_cli_destroy(cli);
442 return ret;
443 }
444 }
445
446 fpriv->driver_priv = cli;
447
448 mutex_lock(&drm->client.mutex);
449 list_add(&cli->head, &drm->clients);
450 mutex_unlock(&drm->client.mutex);
451 return 0;
452}
453
454void
455nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
456{
457 struct nouveau_cli *cli = nouveau_cli(fpriv);
458 struct nouveau_drm *drm = nouveau_drm(dev);
459
460 if (cli->abi16)
461 nouveau_abi16_fini(cli->abi16);
462
463 mutex_lock(&drm->client.mutex);
464 list_del(&cli->head);
465 mutex_unlock(&drm->client.mutex);
466}
467
468void
469nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
470{
471 struct nouveau_cli *cli = nouveau_cli(fpriv);
472 nouveau_cli_destroy(cli);
473}
474
278static struct pci_device_id 475static struct pci_device_id
279nouveau_drm_pci_table[] = { 476nouveau_drm_pci_table[] = {
280 { 477 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 87698067244b..2e3364d50ca0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -3,20 +3,50 @@
3 3
4#include <core/client.h> 4#include <core/client.h>
5 5
6#include <subdev/vm.h>
7
6#include <drmP.h> 8#include <drmP.h>
7#include <drm/nouveau_drm.h> 9#include <drm/nouveau_drm.h>
8 10
11#include "ttm/ttm_bo_api.h"
12#include "ttm/ttm_bo_driver.h"
13#include "ttm/ttm_placement.h"
14#include "ttm/ttm_memory.h"
15#include "ttm/ttm_module.h"
16#include "ttm/ttm_page_alloc.h"
17
18struct nouveau_channel;
19
20#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
21
22#include "nouveau_revcompat.h"
23#include "nouveau_fence.h"
24
25struct nouveau_drm_tile {
26 struct nouveau_fence *fence;
27 bool used;
28};
29
9enum nouveau_drm_handle { 30enum nouveau_drm_handle {
10 NVDRM_CLIENT = 0xffffffff, 31 NVDRM_CLIENT = 0xffffffff,
11 NVDRM_DEVICE = 0xdddddddd, 32 NVDRM_DEVICE = 0xdddddddd,
33 NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
34 NVDRM_CHAN = 0xcccc0000, /* |= client chid */
12}; 35};
13 36
14struct nouveau_cli { 37struct nouveau_cli {
15 struct nouveau_client base; 38 struct nouveau_client base;
16 struct list_head head; 39 struct list_head head;
17 struct mutex mutex; 40 struct mutex mutex;
41 void *abi16;
18}; 42};
19 43
44static inline struct nouveau_cli *
45nouveau_cli(struct drm_file *fpriv)
46{
47 return fpriv ? fpriv->driver_priv : NULL;
48}
49
20struct nouveau_drm { 50struct nouveau_drm {
21 struct nouveau_cli client; 51 struct nouveau_cli client;
22 struct drm_device *dev; 52 struct drm_device *dev;
@@ -33,8 +63,46 @@ struct nouveau_drm {
33 u32 base; 63 u32 base;
34 u32 size; 64 u32 size;
35 } agp; 65 } agp;
66
67 /* TTM interface support */
68 struct {
69 struct drm_global_reference mem_global_ref;
70 struct ttm_bo_global_ref bo_global_ref;
71 struct ttm_bo_device bdev;
72 atomic_t validate_sequence;
73 int (*move)(struct nouveau_channel *,
74 struct ttm_buffer_object *,
75 struct ttm_mem_reg *, struct ttm_mem_reg *);
76 int mtrr;
77 } ttm;
78
79 /* GEM interface support */
80 struct {
81 u64 vram_available;
82 u64 gart_available;
83 } gem;
84
85 /* synchronisation */
86 void *fence;
87
88 /* context for accelerated drm-internal operations */
89 struct nouveau_channel *channel;
90 struct nouveau_gpuobj *notify;
91 struct nouveau_fbdev *fbcon;
92
93 /* nv10-nv40 tiling regions */
94 struct {
95 struct nouveau_drm_tile reg[15];
96 spinlock_t lock;
97 } tile;
36}; 98};
37 99
100static inline struct nouveau_drm *
101nouveau_drm(struct drm_device *dev)
102{
103 return nouveau_newpriv(dev);
104}
105
38int nouveau_drm_suspend(struct pci_dev *, pm_message_t); 106int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
39int nouveau_drm_resume(struct pci_dev *); 107int nouveau_drm_resume(struct pci_dev *);
40 108
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index db150d9e0cd4..2294cb8848f7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -35,7 +35,6 @@
35#include "nouveau_fbcon.h" 35#include "nouveau_fbcon.h"
36#include "nouveau_fence.h" 36#include "nouveau_fence.h"
37#include "nouveau_pm.h" 37#include "nouveau_pm.h"
38#include <engine/fifo.h>
39#include "nv50_display.h" 38#include "nv50_display.h"
40 39
41#include "drm_pciids.h" 40#include "drm_pciids.h"
@@ -68,14 +67,6 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
68int nouveau_ignorelid = 0; 67int nouveau_ignorelid = 0;
69module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 68module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
70 69
71MODULE_PARM_DESC(noaccel, "Disable all acceleration");
72int nouveau_noaccel = -1;
73module_param_named(noaccel, nouveau_noaccel, int, 0400);
74
75MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
76int nouveau_nofbaccel = 0;
77module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
78
79MODULE_PARM_DESC(force_post, "Force POST"); 70MODULE_PARM_DESC(force_post, "Force POST");
80int nouveau_force_post = 0; 71int nouveau_force_post = 0;
81module_param_named(force_post, nouveau_force_post, int, 0400); 72module_param_named(force_post, nouveau_force_post, int, 0400);
@@ -148,19 +139,11 @@ int
148nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) 139nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
149{ 140{
150 struct drm_device *dev = pci_get_drvdata(pdev); 141 struct drm_device *dev = pci_get_drvdata(pdev);
151 struct drm_nouveau_private *dev_priv = dev->dev_private;
152 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
153 struct nouveau_fence_priv *fence = dev_priv->fence.func;
154 struct nouveau_channel *chan;
155 struct drm_crtc *crtc; 142 struct drm_crtc *crtc;
156 int ret, i, e;
157 143
158 NV_INFO(dev, "Disabling display...\n"); 144 NV_INFO(dev, "Disabling display...\n");
159 nouveau_display_fini(dev); 145 nouveau_display_fini(dev);
160 146
161 NV_INFO(dev, "Disabling fbcon...\n");
162 nouveau_fbcon_set_suspend(dev, 1);
163
164 NV_INFO(dev, "Unpinning framebuffer(s)...\n"); 147 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
165 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
166 struct nouveau_framebuffer *nouveau_fb; 149 struct nouveau_framebuffer *nouveau_fb;
@@ -179,74 +162,23 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
179 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 162 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
180 } 163 }
181 164
182 NV_INFO(dev, "Evicting buffers...\n");
183 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
184
185 NV_INFO(dev, "Idling channels...\n");
186 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
187 chan = dev_priv->channels.ptr[i];
188
189 if (chan && chan->pushbuf_bo)
190 nouveau_channel_idle(chan);
191 }
192
193 if (fence->suspend) {
194 if (!fence->suspend(dev))
195 return -ENOMEM;
196 }
197
198 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
199 if (!dev_priv->eng[e])
200 continue;
201
202 ret = dev_priv->eng[e]->fini(dev, e, true);
203 if (ret) {
204 NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
205 goto out_abort;
206 }
207 }
208
209 return 0; 165 return 0;
210
211out_abort:
212 NV_INFO(dev, "Re-enabling acceleration..\n");
213 for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) {
214 if (dev_priv->eng[e])
215 dev_priv->eng[e]->init(dev, e);
216 }
217 return ret;
218} 166}
219 167
220int 168int
221nouveau_pci_resume(struct pci_dev *pdev) 169nouveau_pci_resume(struct pci_dev *pdev)
222{ 170{
223 struct drm_device *dev = pci_get_drvdata(pdev); 171 struct drm_device *dev = pci_get_drvdata(pdev);
224 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nouveau_fence_priv *fence = dev_priv->fence.func;
227 struct nouveau_engine *engine = &dev_priv->engine;
228 struct drm_crtc *crtc; 172 struct drm_crtc *crtc;
229 int ret, i; 173 int ret;
230
231 /* Make the CRTCs accessible */
232 engine->display.early_init(dev);
233 174
234 NV_INFO(dev, "POSTing device...\n");
235 ret = nouveau_run_vbios_init(dev); 175 ret = nouveau_run_vbios_init(dev);
236 if (ret) 176 if (ret)
237 return ret; 177 return ret;
238 178
239 NV_INFO(dev, "Reinitialising engines...\n");
240 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
241 if (dev_priv->eng[i])
242 dev_priv->eng[i]->init(dev, i);
243 }
244
245 if (fence->resume)
246 fence->resume(dev);
247
248 nouveau_irq_postinstall(dev); 179 nouveau_irq_postinstall(dev);
249 180
181#if 0
250 /* Re-write SKIPS, they'll have been lost over the suspend */ 182 /* Re-write SKIPS, they'll have been lost over the suspend */
251 if (nouveau_vram_pushbuf) { 183 if (nouveau_vram_pushbuf) {
252 struct nouveau_channel *chan; 184 struct nouveau_channel *chan;
@@ -261,6 +193,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
261 nouveau_bo_wr32(chan->pushbuf_bo, i, 0); 193 nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
262 } 194 }
263 } 195 }
196#endif
264 197
265 nouveau_pm_resume(dev); 198 nouveau_pm_resume(dev);
266 199
@@ -343,6 +276,9 @@ static const struct file_operations nouveau_driver_fops = {
343 276
344int nouveau_drm_load(struct drm_device *, unsigned long); 277int nouveau_drm_load(struct drm_device *, unsigned long);
345int nouveau_drm_unload(struct drm_device *); 278int nouveau_drm_unload(struct drm_device *);
279int nouveau_drm_open(struct drm_device *, struct drm_file *);
280void nouveau_drm_preclose(struct drm_device *dev, struct drm_file *);
281void nouveau_drm_postclose(struct drm_device *, struct drm_file *);
346 282
347static struct drm_driver driver = { 283static struct drm_driver driver = {
348 .driver_features = 284 .driver_features =
@@ -353,13 +289,9 @@ static struct drm_driver driver = {
353 .firstopen = nouveau_firstopen, 289 .firstopen = nouveau_firstopen,
354 .lastclose = nouveau_lastclose, 290 .lastclose = nouveau_lastclose,
355 .unload = nouveau_drm_unload, 291 .unload = nouveau_drm_unload,
356 .open = nouveau_open, 292 .open = nouveau_drm_open,
357 .preclose = nouveau_preclose, 293 .preclose = nouveau_drm_preclose,
358 .postclose = nouveau_postclose, 294 .postclose = nouveau_drm_postclose,
359#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
360 .debugfs_init = nouveau_debugfs_init,
361 .debugfs_cleanup = nouveau_debugfs_takedown,
362#endif
363 .irq_preinstall = nouveau_irq_preinstall, 295 .irq_preinstall = nouveau_irq_preinstall,
364 .irq_postinstall = nouveau_irq_postinstall, 296 .irq_postinstall = nouveau_irq_postinstall,
365 .irq_uninstall = nouveau_irq_uninstall, 297 .irq_uninstall = nouveau_irq_uninstall,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 08ce60be3f3c..f1cce652a2a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -64,24 +64,11 @@ enum blah {
64 NV_MEM_TYPE_GDDR5 64 NV_MEM_TYPE_GDDR5
65}; 65};
66 66
67struct nouveau_fpriv {
68 spinlock_t lock;
69 struct list_head channels;
70 struct nouveau_vm *vm;
71};
72
73static inline struct nouveau_fpriv *
74nouveau_fpriv(struct drm_file *file_priv)
75{
76 return file_priv ? file_priv->driver_priv : NULL;
77}
78
79#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 67#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
80 68
81#include <nouveau_drm.h> 69#include <nouveau_drm.h>
82#include "nouveau_reg.h" 70#include "nouveau_reg.h"
83#include <nouveau_bios.h> 71#include <nouveau_bios.h>
84#include "nouveau_util.h"
85 72
86struct nouveau_grctx; 73struct nouveau_grctx;
87struct nouveau_mem; 74struct nouveau_mem;
@@ -90,8 +77,7 @@ struct nouveau_mem;
90#include "nouveau_compat.h" 77#include "nouveau_compat.h"
91 78
92#define nouveau_gpuobj_new(d,c,s,a,f,o) \ 79#define nouveau_gpuobj_new(d,c,s,a,f,o) \
93 _nouveau_gpuobj_new((d), (c) ? ((struct nouveau_channel *)(c))->ramin : NULL, \ 80 _nouveau_gpuobj_new((d), NULL, (s), (a), (f), (o))
94 (s), (a), (f), (o))
95 81
96#define nouveau_vm_new(d,o,l,m,v) \ 82#define nouveau_vm_new(d,o,l,m,v) \
97 _nouveau_vm_new((d), (o), (l), (m), (v)) 83 _nouveau_vm_new((d), (o), (l), (m), (v))
@@ -102,40 +88,15 @@ struct nouveau_mem;
102#define MAX_NUM_DCB_ENTRIES 16 88#define MAX_NUM_DCB_ENTRIES 16
103 89
104#define NOUVEAU_MAX_CHANNEL_NR 4096 90#define NOUVEAU_MAX_CHANNEL_NR 4096
105#define NOUVEAU_MAX_TILE_NR 15
106 91
107#include "nouveau_bo.h" 92#include "nouveau_bo.h"
108#include "nouveau_gem.h" 93#include "nouveau_gem.h"
109 94
110/* TODO: submit equivalent to TTM generic API upstream? */
111static inline void __iomem *
112nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
113{
114 bool is_iomem;
115 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
116 &nvbo->kmap, &is_iomem);
117 WARN_ON_ONCE(ioptr && !is_iomem);
118 return ioptr;
119}
120
121enum nouveau_flags { 95enum nouveau_flags {
122 NV_NFORCE = 0x10000000, 96 NV_NFORCE = 0x10000000,
123 NV_NFORCE2 = 0x20000000 97 NV_NFORCE2 = 0x20000000
124}; 98};
125 99
126#define NVOBJ_ENGINE_SW 0
127#define NVOBJ_ENGINE_GR 1
128#define NVOBJ_ENGINE_CRYPT 2
129#define NVOBJ_ENGINE_COPY0 3
130#define NVOBJ_ENGINE_COPY1 4
131#define NVOBJ_ENGINE_MPEG 5
132#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
133#define NVOBJ_ENGINE_BSP 6
134#define NVOBJ_ENGINE_VP 7
135#define NVOBJ_ENGINE_FIFO 14
136#define NVOBJ_ENGINE_NR 16
137#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
138
139struct nouveau_page_flip_state { 100struct nouveau_page_flip_state {
140 struct list_head head; 101 struct list_head head;
141 struct drm_pending_vblank_event *event; 102 struct drm_pending_vblank_event *event;
@@ -148,95 +109,6 @@ enum nouveau_channel_mutex_class {
148 NOUVEAU_KCHANNEL_MUTEX 109 NOUVEAU_KCHANNEL_MUTEX
149}; 110};
150 111
151struct nouveau_channel {
152 struct drm_device *dev;
153 struct list_head list;
154 int id;
155
156 /* references to the channel data structure */
157 struct kref ref;
158 /* users of the hardware channel resources, the hardware
159 * context will be kicked off when it reaches zero. */
160 atomic_t users;
161 struct mutex mutex;
162
163 /* owner of this fifo */
164 struct drm_file *file_priv;
165 /* mapping of the fifo itself */
166 struct drm_local_map *map;
167
168 /* mapping of the regs controlling the fifo */
169 void __iomem *user;
170 uint32_t user_get;
171 uint32_t user_get_hi;
172 uint32_t user_put;
173
174 /* DMA push buffer */
175 struct nouveau_gpuobj *pushbuf;
176 struct nouveau_bo *pushbuf_bo;
177 struct nouveau_vma pushbuf_vma;
178 uint64_t pushbuf_base;
179
180 /* Notifier memory */
181 struct nouveau_bo *notifier_bo;
182 struct nouveau_vma notifier_vma;
183 struct drm_mm notifier_heap;
184
185 /* PFIFO context */
186 struct nouveau_gpuobj *engptr;
187 struct nouveau_gpuobj *ramfc;
188
189 /* Execution engine contexts */
190 void *engctx[NVOBJ_ENGINE_NR];
191 void *fence;
192
193 /* NV50 VM */
194 struct nouveau_vm *vm;
195 struct nouveau_gpuobj *vm_pd;
196
197 /* Objects */
198 struct nouveau_gpuobj *ramin; /* Private instmem */
199 struct nouveau_ramht *ramht; /* Hash table */
200
201 /* GPU object info for stuff used in-kernel (mm_enabled) */
202 uint32_t m2mf_ntfy;
203 uint32_t vram_handle;
204 uint32_t gart_handle;
205 bool accel_done;
206
207 /* Push buffer state (only for drm's channel on !mm_enabled) */
208 struct {
209 int max;
210 int free;
211 int cur;
212 int put;
213 /* access via pushbuf_bo */
214
215 int ib_base;
216 int ib_max;
217 int ib_free;
218 int ib_put;
219 } dma;
220
221 struct {
222 bool active;
223 char name[32];
224 struct drm_info_list info;
225 } debugfs;
226};
227
228struct nouveau_exec_engine {
229 void (*destroy)(struct drm_device *, int engine);
230 int (*init)(struct drm_device *, int engine);
231 int (*fini)(struct drm_device *, int engine, bool suspend);
232 int (*context_new)(struct nouveau_channel *, int engine);
233 void (*context_del)(struct nouveau_channel *, int engine);
234 int (*object_new)(struct nouveau_channel *, int engine,
235 u32 handle, u16 class);
236 void (*set_tile_region)(struct drm_device *dev, int i);
237 void (*tlb_flush)(struct drm_device *, int engine);
238};
239
240struct nouveau_display_engine { 112struct nouveau_display_engine {
241 void *priv; 113 void *priv;
242 int (*early_init)(struct drm_device *); 114 int (*early_init)(struct drm_device *);
@@ -434,6 +306,8 @@ enum nouveau_card_type {
434 NV_E0 = 0xe0, 306 NV_E0 = 0xe0,
435}; 307};
436 308
309struct nouveau_channel;
310
437struct drm_nouveau_private { 311struct drm_nouveau_private {
438 struct drm_device *dev; 312 struct drm_device *dev;
439 bool noaccel; 313 bool noaccel;
@@ -447,92 +321,29 @@ struct drm_nouveau_private {
447 int flags; 321 int flags;
448 u32 crystal; 322 u32 crystal;
449 323
450 struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
451
452 struct list_head classes;
453
454 struct nouveau_bo *vga_ram; 324 struct nouveau_bo *vga_ram;
455 325
456 /* interrupt handling */ 326 /* interrupt handling */
457 void (*irq_handler[32])(struct drm_device *); 327 void (*irq_handler[32])(struct drm_device *);
458 bool msi_enabled; 328 bool msi_enabled;
459 329
460 struct {
461 struct drm_global_reference mem_global_ref;
462 struct ttm_bo_global_ref bo_global_ref;
463 struct ttm_bo_device bdev;
464 atomic_t validate_sequence;
465 int (*move)(struct nouveau_channel *,
466 struct ttm_buffer_object *,
467 struct ttm_mem_reg *, struct ttm_mem_reg *);
468 } ttm;
469
470 struct {
471 void *func;
472 spinlock_t lock;
473 struct drm_mm heap;
474 struct nouveau_bo *bo;
475 } fence;
476
477 struct {
478 spinlock_t lock;
479 struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
480 } channels;
481
482 struct nouveau_engine engine; 330 struct nouveau_engine engine;
483 struct nouveau_channel *channel;
484 331
485 /* For PFIFO and PGRAPH. */ 332 /* For PFIFO and PGRAPH. */
486 spinlock_t context_switch_lock; 333 spinlock_t context_switch_lock;
487 334
488 /* VM/PRAMIN flush, legacy PRAMIN aperture */
489 spinlock_t vm_lock;
490
491 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 335 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
492 struct nouveau_ramht *ramht; 336 struct nouveau_ramht *ramht;
493 337
494 struct {
495 enum {
496 NOUVEAU_GART_NONE = 0,
497 NOUVEAU_GART_AGP, /* AGP */
498 NOUVEAU_GART_PDMA, /* paged dma object */
499 NOUVEAU_GART_HW /* on-chip gart/vm */
500 } type;
501 uint64_t aper_base;
502 uint64_t aper_size;
503 uint64_t aper_free;
504
505 struct ttm_backend_func *func;
506
507 struct nouveau_gpuobj *sg_ctxdma;
508 } gart_info;
509
510 /* nv10-nv40 tiling regions */
511 struct {
512 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
513 spinlock_t lock;
514 } tile;
515
516 uint64_t fb_available_size; 338 uint64_t fb_available_size;
517 uint64_t fb_mappable_pages; 339 uint64_t fb_mappable_pages;
518 uint64_t fb_aper_free;
519 int fb_mtrr; 340 int fb_mtrr;
520 341
521 /* G8x/G9x virtual address space */
522 struct nouveau_vm *chan_vm;
523
524 struct nvbios vbios; 342 struct nvbios vbios;
525 u8 *mxms; 343 u8 *mxms;
526 struct list_head i2c_ports; 344 struct list_head i2c_ports;
527 345
528 struct backlight_device *backlight; 346 struct backlight_device *backlight;
529
530 struct {
531 struct dentry *channel_root;
532 } debugfs;
533
534 struct nouveau_fbdev *nfbdev;
535 struct apertures_struct *apertures;
536}; 347};
537 348
538static inline struct drm_nouveau_private * 349static inline struct drm_nouveau_private *
@@ -541,12 +352,6 @@ nouveau_private(struct drm_device *dev)
541 return dev->dev_private; 352 return dev->dev_private;
542} 353}
543 354
544static inline struct drm_nouveau_private *
545nouveau_bdev(struct ttm_bo_device *bd)
546{
547 return container_of(bd, struct drm_nouveau_private, ttm.bdev);
548}
549
550/* nouveau_drv.c */ 355/* nouveau_drv.c */
551extern int nouveau_modeset; 356extern int nouveau_modeset;
552extern int nouveau_duallink; 357extern int nouveau_duallink;
@@ -560,7 +365,6 @@ extern int nouveau_tv_disable;
560extern char *nouveau_tv_norm; 365extern char *nouveau_tv_norm;
561extern int nouveau_reg_debug; 366extern int nouveau_reg_debug;
562extern int nouveau_ignorelid; 367extern int nouveau_ignorelid;
563extern int nouveau_nofbaccel;
564extern int nouveau_noaccel; 368extern int nouveau_noaccel;
565extern int nouveau_force_post; 369extern int nouveau_force_post;
566extern int nouveau_override_conntype; 370extern int nouveau_override_conntype;
@@ -574,9 +378,6 @@ extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
574extern int nouveau_pci_resume(struct pci_dev *pdev); 378extern int nouveau_pci_resume(struct pci_dev *pdev);
575 379
576/* nouveau_state.c */ 380/* nouveau_state.c */
577extern int nouveau_open(struct drm_device *, struct drm_file *);
578extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
579extern void nouveau_postclose(struct drm_device *, struct drm_file *);
580extern int nouveau_load(struct drm_device *, unsigned long flags); 381extern int nouveau_load(struct drm_device *, unsigned long flags);
581extern int nouveau_firstopen(struct drm_device *); 382extern int nouveau_firstopen(struct drm_device *);
582extern void nouveau_lastclose(struct drm_device *); 383extern void nouveau_lastclose(struct drm_device *);
@@ -596,76 +397,16 @@ extern int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
596extern void nouveau_mem_timing_read(struct drm_device *, 397extern void nouveau_mem_timing_read(struct drm_device *,
597 struct nouveau_pm_memtiming *); 398 struct nouveau_pm_memtiming *);
598extern int nouveau_mem_vbios_type(struct drm_device *); 399extern int nouveau_mem_vbios_type(struct drm_device *);
599extern const struct ttm_mem_type_manager_func nouveau_vram_manager; 400extern struct nouveau_tile_reg *nv10_mem_set_tiling(
600extern const struct ttm_mem_type_manager_func nouveau_gart_manager; 401 struct drm_device *dev, uint32_t addr, uint32_t size,
601extern const struct ttm_mem_type_manager_func nv04_gart_manager; 402 uint32_t pitch, uint32_t flags);
602 403extern void nv10_mem_put_tile_region(struct drm_device *dev,
603/* nouveau_notifier.c */ 404 struct nouveau_tile_reg *tile,
604extern int nouveau_notifier_init_channel(struct nouveau_channel *); 405 struct nouveau_fence *fence);
605extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 406
606extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
607 int cout, uint32_t start, uint32_t end,
608 uint32_t *offset);
609
610/* nouveau_channel.c */
611extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
612extern int nouveau_channel_alloc(struct drm_device *dev,
613 struct nouveau_channel **chan,
614 struct drm_file *file_priv,
615 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
616extern struct nouveau_channel *
617nouveau_channel_get_unlocked(struct nouveau_channel *);
618extern struct nouveau_channel *
619nouveau_channel_get(struct drm_file *, int id);
620extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
621extern void nouveau_channel_put(struct nouveau_channel **);
622extern void nouveau_channel_ref(struct nouveau_channel *chan,
623 struct nouveau_channel **pchan);
624extern int nouveau_channel_idle(struct nouveau_channel *chan); 407extern int nouveau_channel_idle(struct nouveau_channel *chan);
625 408
626/* nouveau_gpuobj.c */ 409/* nouveau_gpuobj.c */
627#define NVOBJ_ENGINE_ADD(d, e, p) do { \
628 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
629 dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \
630} while (0)
631
632#define NVOBJ_ENGINE_DEL(d, e) do { \
633 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
634 dev_priv->eng[NVOBJ_ENGINE_##e] = NULL; \
635} while (0)
636
637#define NVOBJ_CLASS(d, c, e) do { \
638 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
639 if (ret) \
640 return ret; \
641} while (0)
642
643#define NVOBJ_MTHD(d, c, m, e) do { \
644 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
645 if (ret) \
646 return ret; \
647} while (0)
648
649extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
650extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
651 int (*exec)(struct nouveau_channel *,
652 u32 class, u32 mthd, u32 data));
653extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
654extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
655extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
656 uint32_t vram_h, uint32_t tt_h);
657extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
658extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
659 uint64_t offset, uint64_t size, int access,
660 int target, struct nouveau_gpuobj **);
661extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
662extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
663 u64 size, int target, int access, u32 type,
664 u32 comp, struct nouveau_gpuobj **pobj);
665extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
666 int class, u64 base, u64 size, int target,
667 int access, u32 type, u32 comp);
668
669int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm, 410int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
670 u32 flags, struct nouveau_vma *vma); 411 u32 flags, struct nouveau_vma *vma);
671void nouveau_gpuobj_unmap(struct nouveau_vma *vma); 412void nouveau_gpuobj_unmap(struct nouveau_vma *vma);
@@ -681,49 +422,6 @@ extern void nouveau_irq_preinstall(struct drm_device *);
681extern int nouveau_irq_postinstall(struct drm_device *); 422extern int nouveau_irq_postinstall(struct drm_device *);
682extern void nouveau_irq_uninstall(struct drm_device *); 423extern void nouveau_irq_uninstall(struct drm_device *);
683 424
684/* nouveau_sgdma.c */
685extern int nouveau_sgdma_init(struct drm_device *);
686extern void nouveau_sgdma_takedown(struct drm_device *);
687extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
688 uint32_t offset);
689extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
690 unsigned long size,
691 uint32_t page_flags,
692 struct page *dummy_read_page);
693
694/* nouveau_debugfs.c */
695#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
696extern int nouveau_debugfs_init(struct drm_minor *);
697extern void nouveau_debugfs_takedown(struct drm_minor *);
698extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
699extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
700#else
701static inline int
702nouveau_debugfs_init(struct drm_minor *minor)
703{
704 return 0;
705}
706
707static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
708{
709}
710
711static inline int
712nouveau_debugfs_channel_init(struct nouveau_channel *chan)
713{
714 return 0;
715}
716
717static inline void
718nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
719{
720}
721#endif
722
723/* nouveau_dma.c */
724extern void nouveau_dma_init(struct nouveau_channel *);
725extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
726
727/* nouveau_acpi.c */ 425/* nouveau_acpi.c */
728#define ROM_BIOS_PAGE 4096 426#define ROM_BIOS_PAGE 4096
729#if defined(CONFIG_ACPI) 427#if defined(CONFIG_ACPI)
@@ -785,72 +483,8 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
785/* nouveau_hdmi.c */ 483/* nouveau_hdmi.c */
786void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); 484void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
787 485
788/* nv04_graph.c */
789extern int nv04_graph_create(struct drm_device *);
790extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
791extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
792 u32 class, u32 mthd, u32 data);
793extern struct nouveau_bitfield nv04_graph_nsource[];
794
795/* nv10_graph.c */
796extern int nv10_graph_create(struct drm_device *);
797extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
798extern struct nouveau_bitfield nv10_graph_intr[];
799extern struct nouveau_bitfield nv10_graph_nstatus[];
800
801/* nv20_graph.c */
802extern int nv20_graph_create(struct drm_device *);
803
804/* nv40_graph.c */
805extern int nv40_graph_create(struct drm_device *);
806extern void nv40_grctx_init(struct drm_device *, u32 *size);
807extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
808
809/* nv50_graph.c */
810extern int nv50_graph_create(struct drm_device *);
811extern struct nouveau_enum nv50_data_error_names[];
812extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
813extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
814extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
815
816/* nvc0_graph.c */
817extern int nvc0_graph_create(struct drm_device *);
818extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
819
820/* nve0_graph.c */
821extern int nve0_graph_create(struct drm_device *);
822
823/* nv84_crypt.c */
824extern int nv84_crypt_create(struct drm_device *);
825
826/* nv98_crypt.c */
827extern int nv98_crypt_create(struct drm_device *dev);
828
829/* nva3_copy.c */
830extern int nva3_copy_create(struct drm_device *dev);
831
832/* nvc0_copy.c */
833extern int nvc0_copy_create(struct drm_device *dev, int engine);
834
835/* nv31_mpeg.c */
836extern int nv31_mpeg_create(struct drm_device *dev);
837
838/* nv50_mpeg.c */
839extern int nv50_mpeg_create(struct drm_device *dev);
840
841/* nv84_bsp.c */
842/* nv98_bsp.c */
843extern int nv84_bsp_create(struct drm_device *dev);
844
845/* nv84_vp.c */
846/* nv98_vp.c */
847extern int nv84_vp_create(struct drm_device *dev);
848
849/* nv98_ppp.c */
850extern int nv98_ppp_create(struct drm_device *dev);
851
852extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, 486extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
853 unsigned long arg); 487 unsigned long arg);
854 488
855/* nvd0_display.c */ 489/* nvd0_display.c */
856extern int nvd0_display_create(struct drm_device *); 490extern int nvd0_display_create(struct drm_device *);
@@ -895,18 +529,6 @@ int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
895#endif /* def __BIG_ENDIAN else */ 529#endif /* def __BIG_ENDIAN else */
896#endif /* !ioread32_native */ 530#endif /* !ioread32_native */
897 531
898/* channel control reg access */
899static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
900{
901 return ioread32_native(chan->user + reg);
902}
903
904static inline void nvchan_wr32(struct nouveau_channel *chan,
905 unsigned reg, u32 val)
906{
907 iowrite32_native(val, chan->user + reg);
908}
909
910/* register access */ 532/* register access */
911#define nv_rd08 _nv_rd08 533#define nv_rd08 _nv_rd08
912#define nv_wr08 _nv_wr08 534#define nv_wr08 _nv_wr08
@@ -1023,13 +645,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
1023 dev->pdev->subsystem_device == sub_device; 645 dev->pdev->subsystem_device == sub_device;
1024} 646}
1025 647
1026static inline void *
1027nv_engine(struct drm_device *dev, int engine)
1028{
1029 struct drm_nouveau_private *dev_priv = dev->dev_private;
1030 return (void *)dev_priv->eng[engine];
1031}
1032
1033/* returns 1 if device is one of the nv4x using the 0x4497 object class, 648/* returns 1 if device is one of the nv4x using the 0x4497 object class,
1034 * helpful to determine a number of other hardware features 649 * helpful to determine a number of other hardware features
1035 */ 650 */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f3f0b4c362cb..8b8bc8314d92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -43,19 +43,31 @@
43#include "drm_crtc.h" 43#include "drm_crtc.h"
44#include "drm_crtc_helper.h" 44#include "drm_crtc_helper.h"
45#include "drm_fb_helper.h" 45#include "drm_fb_helper.h"
46#include "nouveau_drv.h" 46
47#include <nouveau_drm.h> 47#include "nouveau_drm.h"
48#include "nouveau_crtc.h" 48#include "nouveau_gem.h"
49#include "nouveau_bo.h"
49#include "nouveau_fb.h" 50#include "nouveau_fb.h"
50#include "nouveau_fbcon.h" 51#include "nouveau_fbcon.h"
51#include "nouveau_dma.h" 52#include "nouveau_chan.h"
53
54#include "nouveau_crtc.h"
55
56#include <core/client.h>
57#include <core/device.h>
58
59#include <subdev/fb.h>
60
61MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
62static int nouveau_nofbaccel = 0;
63module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
52 64
53static void 65static void
54nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 66nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
55{ 67{
56 struct nouveau_fbdev *nfbdev = info->par; 68 struct nouveau_fbdev *fbcon = info->par;
57 struct drm_device *dev = nfbdev->dev; 69 struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
58 struct drm_nouveau_private *dev_priv = dev->dev_private; 70 struct nouveau_device *device = nv_device(drm->device);
59 int ret; 71 int ret;
60 72
61 if (info->state != FBINFO_STATE_RUNNING) 73 if (info->state != FBINFO_STATE_RUNNING)
@@ -63,15 +75,15 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
63 75
64 ret = -ENODEV; 76 ret = -ENODEV;
65 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 77 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
66 mutex_trylock(&dev_priv->channel->mutex)) { 78 mutex_trylock(&drm->client.mutex)) {
67 if (dev_priv->card_type < NV_50) 79 if (device->card_type < NV_50)
68 ret = nv04_fbcon_fillrect(info, rect); 80 ret = nv04_fbcon_fillrect(info, rect);
69 else 81 else
70 if (dev_priv->card_type < NV_C0) 82 if (device->card_type < NV_C0)
71 ret = nv50_fbcon_fillrect(info, rect); 83 ret = nv50_fbcon_fillrect(info, rect);
72 else 84 else
73 ret = nvc0_fbcon_fillrect(info, rect); 85 ret = nvc0_fbcon_fillrect(info, rect);
74 mutex_unlock(&dev_priv->channel->mutex); 86 mutex_unlock(&drm->client.mutex);
75 } 87 }
76 88
77 if (ret == 0) 89 if (ret == 0)
@@ -85,9 +97,9 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
85static void 97static void
86nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) 98nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
87{ 99{
88 struct nouveau_fbdev *nfbdev = info->par; 100 struct nouveau_fbdev *fbcon = info->par;
89 struct drm_device *dev = nfbdev->dev; 101 struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
90 struct drm_nouveau_private *dev_priv = dev->dev_private; 102 struct nouveau_device *device = nv_device(drm->device);
91 int ret; 103 int ret;
92 104
93 if (info->state != FBINFO_STATE_RUNNING) 105 if (info->state != FBINFO_STATE_RUNNING)
@@ -95,15 +107,15 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
95 107
96 ret = -ENODEV; 108 ret = -ENODEV;
97 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 109 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
98 mutex_trylock(&dev_priv->channel->mutex)) { 110 mutex_trylock(&drm->client.mutex)) {
99 if (dev_priv->card_type < NV_50) 111 if (device->card_type < NV_50)
100 ret = nv04_fbcon_copyarea(info, image); 112 ret = nv04_fbcon_copyarea(info, image);
101 else 113 else
102 if (dev_priv->card_type < NV_C0) 114 if (device->card_type < NV_C0)
103 ret = nv50_fbcon_copyarea(info, image); 115 ret = nv50_fbcon_copyarea(info, image);
104 else 116 else
105 ret = nvc0_fbcon_copyarea(info, image); 117 ret = nvc0_fbcon_copyarea(info, image);
106 mutex_unlock(&dev_priv->channel->mutex); 118 mutex_unlock(&drm->client.mutex);
107 } 119 }
108 120
109 if (ret == 0) 121 if (ret == 0)
@@ -117,9 +129,9 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
117static void 129static void
118nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 130nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
119{ 131{
120 struct nouveau_fbdev *nfbdev = info->par; 132 struct nouveau_fbdev *fbcon = info->par;
121 struct drm_device *dev = nfbdev->dev; 133 struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
122 struct drm_nouveau_private *dev_priv = dev->dev_private; 134 struct nouveau_device *device = nv_device(drm->device);
123 int ret; 135 int ret;
124 136
125 if (info->state != FBINFO_STATE_RUNNING) 137 if (info->state != FBINFO_STATE_RUNNING)
@@ -127,15 +139,15 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
127 139
128 ret = -ENODEV; 140 ret = -ENODEV;
129 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 141 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
130 mutex_trylock(&dev_priv->channel->mutex)) { 142 mutex_trylock(&drm->client.mutex)) {
131 if (dev_priv->card_type < NV_50) 143 if (device->card_type < NV_50)
132 ret = nv04_fbcon_imageblit(info, image); 144 ret = nv04_fbcon_imageblit(info, image);
133 else 145 else
134 if (dev_priv->card_type < NV_C0) 146 if (device->card_type < NV_C0)
135 ret = nv50_fbcon_imageblit(info, image); 147 ret = nv50_fbcon_imageblit(info, image);
136 else 148 else
137 ret = nvc0_fbcon_imageblit(info, image); 149 ret = nvc0_fbcon_imageblit(info, image);
138 mutex_unlock(&dev_priv->channel->mutex); 150 mutex_unlock(&drm->client.mutex);
139 } 151 }
140 152
141 if (ret == 0) 153 if (ret == 0)
@@ -149,10 +161,9 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
149static int 161static int
150nouveau_fbcon_sync(struct fb_info *info) 162nouveau_fbcon_sync(struct fb_info *info)
151{ 163{
152 struct nouveau_fbdev *nfbdev = info->par; 164 struct nouveau_fbdev *fbcon = info->par;
153 struct drm_device *dev = nfbdev->dev; 165 struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
154 struct drm_nouveau_private *dev_priv = dev->dev_private; 166 struct nouveau_channel *chan = drm->channel;
155 struct nouveau_channel *chan = dev_priv->channel;
156 int ret; 167 int ret;
157 168
158 if (!chan || !chan->accel_done || in_interrupt() || 169 if (!chan || !chan->accel_done || in_interrupt() ||
@@ -160,11 +171,11 @@ nouveau_fbcon_sync(struct fb_info *info)
160 info->flags & FBINFO_HWACCEL_DISABLED) 171 info->flags & FBINFO_HWACCEL_DISABLED)
161 return 0; 172 return 0;
162 173
163 if (!mutex_trylock(&chan->mutex)) 174 if (!mutex_trylock(&drm->client.mutex))
164 return 0; 175 return 0;
165 176
166 ret = nouveau_channel_idle(chan); 177 ret = nouveau_channel_idle(chan);
167 mutex_unlock(&chan->mutex); 178 mutex_unlock(&drm->client.mutex);
168 if (ret) { 179 if (ret) {
169 nouveau_fbcon_gpu_lockup(info); 180 nouveau_fbcon_gpu_lockup(info);
170 return 0; 181 return 0;
@@ -224,9 +235,9 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
224} 235}
225 236
226static void 237static void
227nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) 238nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
228{ 239{
229 struct fb_info *info = nfbdev->helper.fbdev; 240 struct fb_info *info = fbcon->helper.fbdev;
230 struct fb_fillrect rect; 241 struct fb_fillrect rect;
231 242
232 /* Clear the entire fbcon. The drm will program every connector 243 /* Clear the entire fbcon. The drm will program every connector
@@ -242,11 +253,12 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
242} 253}
243 254
244static int 255static int
245nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, 256nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
246 struct drm_fb_helper_surface_size *sizes) 257 struct drm_fb_helper_surface_size *sizes)
247{ 258{
248 struct drm_device *dev = nfbdev->dev; 259 struct drm_device *dev = fbcon->dev;
249 struct drm_nouveau_private *dev_priv = dev->dev_private; 260 struct nouveau_drm *drm = nouveau_newpriv(dev);
261 struct nouveau_device *device = nv_device(drm->device);
250 struct fb_info *info; 262 struct fb_info *info;
251 struct drm_framebuffer *fb; 263 struct drm_framebuffer *fb;
252 struct nouveau_framebuffer *nouveau_fb; 264 struct nouveau_framebuffer *nouveau_fb;
@@ -254,7 +266,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
254 struct nouveau_bo *nvbo; 266 struct nouveau_bo *nvbo;
255 struct drm_mode_fb_cmd2 mode_cmd; 267 struct drm_mode_fb_cmd2 mode_cmd;
256 struct pci_dev *pdev = dev->pdev; 268 struct pci_dev *pdev = dev->pdev;
257 struct device *device = &pdev->dev;
258 int size, ret; 269 int size, ret;
259 270
260 mode_cmd.width = sizes->surface_width; 271 mode_cmd.width = sizes->surface_width;
@@ -272,37 +283,38 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
272 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 283 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
273 0, 0x0000, &nvbo); 284 0, 0x0000, &nvbo);
274 if (ret) { 285 if (ret) {
275 NV_ERROR(dev, "failed to allocate framebuffer\n"); 286 NV_ERROR(drm, "failed to allocate framebuffer\n");
276 goto out; 287 goto out;
277 } 288 }
278 289
279 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); 290 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
280 if (ret) { 291 if (ret) {
281 NV_ERROR(dev, "failed to pin fb: %d\n", ret); 292 NV_ERROR(drm, "failed to pin fb: %d\n", ret);
282 nouveau_bo_ref(NULL, &nvbo); 293 nouveau_bo_ref(NULL, &nvbo);
283 goto out; 294 goto out;
284 } 295 }
285 296
286 ret = nouveau_bo_map(nvbo); 297 ret = nouveau_bo_map(nvbo);
287 if (ret) { 298 if (ret) {
288 NV_ERROR(dev, "failed to map fb: %d\n", ret); 299 NV_ERROR(drm, "failed to map fb: %d\n", ret);
289 nouveau_bo_unpin(nvbo); 300 nouveau_bo_unpin(nvbo);
290 nouveau_bo_ref(NULL, &nvbo); 301 nouveau_bo_ref(NULL, &nvbo);
291 goto out; 302 goto out;
292 } 303 }
293 304
294 chan = nouveau_nofbaccel ? NULL : dev_priv->channel; 305 chan = nouveau_nofbaccel ? NULL : drm->channel;
295 if (chan && dev_priv->card_type >= NV_50) { 306 if (chan && device->card_type >= NV_50) {
296 ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma); 307 ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm,
308 &fbcon->nouveau_fb.vma);
297 if (ret) { 309 if (ret) {
298 NV_ERROR(dev, "failed to map fb into chan: %d\n", ret); 310 NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
299 chan = NULL; 311 chan = NULL;
300 } 312 }
301 } 313 }
302 314
303 mutex_lock(&dev->struct_mutex); 315 mutex_lock(&dev->struct_mutex);
304 316
305 info = framebuffer_alloc(0, device); 317 info = framebuffer_alloc(0, &pdev->dev);
306 if (!info) { 318 if (!info) {
307 ret = -ENOMEM; 319 ret = -ENOMEM;
308 goto out_unref; 320 goto out_unref;
@@ -314,16 +326,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
314 goto out_unref; 326 goto out_unref;
315 } 327 }
316 328
317 info->par = nfbdev; 329 info->par = fbcon;
318 330
319 nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo); 331 nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
320 332
321 nouveau_fb = &nfbdev->nouveau_fb; 333 nouveau_fb = &fbcon->nouveau_fb;
322 fb = &nouveau_fb->base; 334 fb = &nouveau_fb->base;
323 335
324 /* setup helper */ 336 /* setup helper */
325 nfbdev->helper.fb = fb; 337 fbcon->helper.fb = fb;
326 nfbdev->helper.fbdev = info; 338 fbcon->helper.fbdev = info;
327 339
328 strcpy(info->fix.id, "nouveaufb"); 340 strcpy(info->fix.id, "nouveaufb");
329 if (nouveau_nofbaccel) 341 if (nouveau_nofbaccel)
@@ -342,25 +354,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
342 info->screen_size = size; 354 info->screen_size = size;
343 355
344 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 356 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
345 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); 357 drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
346
347 /* Set aperture base/size for vesafb takeover */
348 info->apertures = dev_priv->apertures;
349 if (!info->apertures) {
350 ret = -ENOMEM;
351 goto out_unref;
352 }
353 358
354 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 359 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
355 360
356 mutex_unlock(&dev->struct_mutex); 361 mutex_unlock(&dev->struct_mutex);
357 362
358 if (dev_priv->channel && !nouveau_nofbaccel) { 363 if (chan) {
359 ret = -ENODEV; 364 ret = -ENODEV;
360 if (dev_priv->card_type < NV_50) 365 if (device->card_type < NV_50)
361 ret = nv04_fbcon_accel_init(info); 366 ret = nv04_fbcon_accel_init(info);
362 else 367 else
363 if (dev_priv->card_type < NV_C0) 368 if (device->card_type < NV_C0)
364 ret = nv50_fbcon_accel_init(info); 369 ret = nv50_fbcon_accel_init(info);
365 else 370 else
366 ret = nvc0_fbcon_accel_init(info); 371 ret = nvc0_fbcon_accel_init(info);
@@ -369,13 +374,12 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
369 info->fbops = &nouveau_fbcon_ops; 374 info->fbops = &nouveau_fbcon_ops;
370 } 375 }
371 376
372 nouveau_fbcon_zfill(dev, nfbdev); 377 nouveau_fbcon_zfill(dev, fbcon);
373 378
374 /* To allow resizeing without swapping buffers */ 379 /* To allow resizeing without swapping buffers */
375 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", 380 NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
376 nouveau_fb->base.width, 381 nouveau_fb->base.width, nouveau_fb->base.height,
377 nouveau_fb->base.height, 382 nvbo->bo.offset, nvbo);
378 nvbo->bo.offset, nvbo);
379 383
380 vga_switcheroo_client_fb_set(dev->pdev, info); 384 vga_switcheroo_client_fb_set(dev->pdev, info);
381 return 0; 385 return 0;
@@ -390,12 +394,12 @@ static int
390nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, 394nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
391 struct drm_fb_helper_surface_size *sizes) 395 struct drm_fb_helper_surface_size *sizes)
392{ 396{
393 struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper; 397 struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
394 int new_fb = 0; 398 int new_fb = 0;
395 int ret; 399 int ret;
396 400
397 if (!helper->fb) { 401 if (!helper->fb) {
398 ret = nouveau_fbcon_create(nfbdev, sizes); 402 ret = nouveau_fbcon_create(fbcon, sizes);
399 if (ret) 403 if (ret)
400 return ret; 404 return ret;
401 new_fb = 1; 405 new_fb = 1;
@@ -406,18 +410,18 @@ nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
406void 410void
407nouveau_fbcon_output_poll_changed(struct drm_device *dev) 411nouveau_fbcon_output_poll_changed(struct drm_device *dev)
408{ 412{
409 struct drm_nouveau_private *dev_priv = dev->dev_private; 413 struct nouveau_drm *drm = nouveau_newpriv(dev);
410 drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); 414 drm_fb_helper_hotplug_event(&drm->fbcon->helper);
411} 415}
412 416
413static int 417static int
414nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) 418nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
415{ 419{
416 struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; 420 struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
417 struct fb_info *info; 421 struct fb_info *info;
418 422
419 if (nfbdev->helper.fbdev) { 423 if (fbcon->helper.fbdev) {
420 info = nfbdev->helper.fbdev; 424 info = fbcon->helper.fbdev;
421 unregister_framebuffer(info); 425 unregister_framebuffer(info);
422 if (info->cmap.len) 426 if (info->cmap.len)
423 fb_dealloc_cmap(&info->cmap); 427 fb_dealloc_cmap(&info->cmap);
@@ -430,17 +434,17 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
430 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 434 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
431 nouveau_fb->nvbo = NULL; 435 nouveau_fb->nvbo = NULL;
432 } 436 }
433 drm_fb_helper_fini(&nfbdev->helper); 437 drm_fb_helper_fini(&fbcon->helper);
434 drm_framebuffer_cleanup(&nouveau_fb->base); 438 drm_framebuffer_cleanup(&nouveau_fb->base);
435 return 0; 439 return 0;
436} 440}
437 441
438void nouveau_fbcon_gpu_lockup(struct fb_info *info) 442void nouveau_fbcon_gpu_lockup(struct fb_info *info)
439{ 443{
440 struct nouveau_fbdev *nfbdev = info->par; 444 struct nouveau_fbdev *fbcon = info->par;
441 struct drm_device *dev = nfbdev->dev; 445 struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
442 446
443 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 447 NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
444 info->flags |= FBINFO_HWACCEL_DISABLED; 448 info->flags |= FBINFO_HWACCEL_DISABLED;
445} 449}
446 450
@@ -451,74 +455,81 @@ static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
451}; 455};
452 456
453 457
454int nouveau_fbcon_init(struct drm_device *dev) 458int
459nouveau_fbcon_init(struct drm_device *dev)
455{ 460{
456 struct drm_nouveau_private *dev_priv = dev->dev_private; 461 struct nouveau_drm *drm = nouveau_newpriv(dev);
457 struct nouveau_fbdev *nfbdev; 462 struct nouveau_fb *pfb = nouveau_fb(drm->device);
463 struct nouveau_fbdev *fbcon;
458 int preferred_bpp; 464 int preferred_bpp;
459 int ret; 465 int ret;
460 466
461 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 467 if (!dev->mode_config.num_crtc)
462 if (!nfbdev) 468 return 0;
469
470 fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
471 if (!fbcon)
463 return -ENOMEM; 472 return -ENOMEM;
464 473
465 nfbdev->dev = dev; 474 fbcon->dev = dev;
466 dev_priv->nfbdev = nfbdev; 475 drm->fbcon = fbcon;
467 nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; 476 fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
468 477
469 ret = drm_fb_helper_init(dev, &nfbdev->helper, 478 ret = drm_fb_helper_init(dev, &fbcon->helper,
470 dev->mode_config.num_crtc, 4); 479 dev->mode_config.num_crtc, 4);
471 if (ret) { 480 if (ret) {
472 kfree(nfbdev); 481 kfree(fbcon);
473 return ret; 482 return ret;
474 } 483 }
475 484
476 drm_fb_helper_single_add_all_connectors(&nfbdev->helper); 485 drm_fb_helper_single_add_all_connectors(&fbcon->helper);
477 486
478 if (nvfb_vram_size(dev) <= 32 * 1024 * 1024) 487 if (pfb->ram.size <= 32 * 1024 * 1024)
479 preferred_bpp = 8; 488 preferred_bpp = 8;
480 else if (nvfb_vram_size(dev) <= 64 * 1024 * 1024) 489 else
490 if (pfb->ram.size <= 64 * 1024 * 1024)
481 preferred_bpp = 16; 491 preferred_bpp = 16;
482 else 492 else
483 preferred_bpp = 32; 493 preferred_bpp = 32;
484 494
485 drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp); 495 drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
486 return 0; 496 return 0;
487} 497}
488 498
489void nouveau_fbcon_fini(struct drm_device *dev) 499void
500nouveau_fbcon_fini(struct drm_device *dev)
490{ 501{
491 struct drm_nouveau_private *dev_priv = dev->dev_private; 502 struct nouveau_drm *drm = nouveau_newpriv(dev);
492 503
493 if (!dev_priv->nfbdev) 504 if (!drm->fbcon)
494 return; 505 return;
495 506
496 nouveau_fbcon_destroy(dev, dev_priv->nfbdev); 507 nouveau_fbcon_destroy(dev, drm->fbcon);
497 kfree(dev_priv->nfbdev); 508 kfree(drm->fbcon);
498 dev_priv->nfbdev = NULL; 509 drm->fbcon = NULL;
499} 510}
500 511
501void nouveau_fbcon_save_disable_accel(struct drm_device *dev) 512void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
502{ 513{
503 struct drm_nouveau_private *dev_priv = dev->dev_private; 514 struct nouveau_drm *drm = nouveau_newpriv(dev);
504 515
505 dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags; 516 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
506 dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 517 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
507} 518}
508 519
509void nouveau_fbcon_restore_accel(struct drm_device *dev) 520void nouveau_fbcon_restore_accel(struct drm_device *dev)
510{ 521{
511 struct drm_nouveau_private *dev_priv = dev->dev_private; 522 struct nouveau_drm *drm = nouveau_newpriv(dev);
512 dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags; 523 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
513} 524}
514 525
515void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 526void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
516{ 527{
517 struct drm_nouveau_private *dev_priv = dev->dev_private; 528 struct nouveau_drm *drm = nouveau_newpriv(dev);
518 console_lock(); 529 console_lock();
519 if (state == 0) 530 if (state == 0)
520 nouveau_fbcon_save_disable_accel(dev); 531 nouveau_fbcon_save_disable_accel(dev);
521 fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state); 532 fb_set_suspend(drm->fbcon->helper.fbdev, state);
522 if (state == 1) 533 if (state == 1)
523 nouveau_fbcon_restore_accel(dev); 534 nouveau_fbcon_restore_accel(dev);
524 console_unlock(); 535 console_unlock();
@@ -526,6 +537,6 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
526 537
527void nouveau_fbcon_zfill_all(struct drm_device *dev) 538void nouveau_fbcon_zfill_all(struct drm_device *dev)
528{ 539{
529 struct drm_nouveau_private *dev_priv = dev->dev_private; 540 struct nouveau_drm *drm = nouveau_newpriv(dev);
530 nouveau_fbcon_zfill(dev, dev_priv->nfbdev); 541 nouveau_fbcon_zfill(dev, drm->fbcon);
531} 542}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index b73c29f87fc3..e6404e39eaf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -30,6 +30,7 @@
30#include "drm_fb_helper.h" 30#include "drm_fb_helper.h"
31 31
32#include "nouveau_fb.h" 32#include "nouveau_fb.h"
33
33struct nouveau_fbdev { 34struct nouveau_fbdev {
34 struct drm_fb_helper helper; 35 struct drm_fb_helper helper;
35 struct nouveau_framebuffer nouveau_fb; 36 struct nouveau_framebuffer nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index a1835d710f73..5b5471ba6eda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -30,11 +30,9 @@
30#include <linux/ktime.h> 30#include <linux/ktime.h>
31#include <linux/hrtimer.h> 31#include <linux/hrtimer.h>
32 32
33#include "nouveau_drv.h" 33#include "nouveau_drm.h"
34#include <core/ramht.h>
35#include "nouveau_fence.h"
36#include "nouveau_software.h"
37#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_fence.h"
38 36
39void 37void
40nouveau_fence_context_del(struct nouveau_fence_chan *fctx) 38nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
@@ -59,12 +57,10 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
59 spin_lock_init(&fctx->lock); 57 spin_lock_init(&fctx->lock);
60} 58}
61 59
62void 60static void
63nouveau_fence_update(struct nouveau_channel *chan) 61nouveau_fence_update(struct nouveau_channel *chan)
64{ 62{
65 struct drm_device *dev = chan->dev; 63 struct nouveau_fence_priv *priv = chan->drm->fence;
66 struct drm_nouveau_private *dev_priv = dev->dev_private;
67 struct nouveau_fence_priv *priv = dev_priv->fence.func;
68 struct nouveau_fence_chan *fctx = chan->fence; 64 struct nouveau_fence_chan *fctx = chan->fence;
69 struct nouveau_fence *fence, *fnext; 65 struct nouveau_fence *fence, *fnext;
70 66
@@ -85,9 +81,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
85int 81int
86nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) 82nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
87{ 83{
88 struct drm_device *dev = chan->dev; 84 struct nouveau_fence_priv *priv = chan->drm->fence;
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 struct nouveau_fence_priv *priv = dev_priv->fence.func;
91 struct nouveau_fence_chan *fctx = chan->fence; 85 struct nouveau_fence_chan *fctx = chan->fence;
92 int ret; 86 int ret;
93 87
@@ -150,20 +144,17 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
150int 144int
151nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) 145nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
152{ 146{
153 struct drm_device *dev = chan->dev; 147 struct nouveau_fence_priv *priv = chan->drm->fence;
154 struct drm_nouveau_private *dev_priv = dev->dev_private;
155 struct nouveau_fence_priv *priv = dev_priv->fence.func;
156 struct nouveau_channel *prev; 148 struct nouveau_channel *prev;
157 int ret = 0; 149 int ret = 0;
158 150
159 prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; 151 prev = fence ? fence->channel : NULL;
160 if (prev) { 152 if (prev) {
161 if (unlikely(prev != chan && !nouveau_fence_done(fence))) { 153 if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
162 ret = priv->sync(fence, prev, chan); 154 ret = priv->sync(fence, prev, chan);
163 if (unlikely(ret)) 155 if (unlikely(ret))
164 ret = nouveau_fence_wait(fence, true, false); 156 ret = nouveau_fence_wait(fence, true, false);
165 } 157 }
166 nouveau_channel_put_unlocked(&prev);
167 } 158 }
168 159
169 return ret; 160 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 8ee65758f24f..bedafd1c9539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,6 +1,8 @@
1#ifndef __NOUVEAU_FENCE_H__ 1#ifndef __NOUVEAU_FENCE_H__
2#define __NOUVEAU_FENCE_H__ 2#define __NOUVEAU_FENCE_H__
3 3
4struct nouveau_drm;
5
4struct nouveau_fence { 6struct nouveau_fence {
5 struct list_head head; 7 struct list_head head;
6 struct kref kref; 8 struct kref kref;
@@ -22,8 +24,6 @@ int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
22bool nouveau_fence_done(struct nouveau_fence *); 24bool nouveau_fence_done(struct nouveau_fence *);
23int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 25int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
24int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); 26int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
25void nouveau_fence_idle(struct nouveau_channel *);
26void nouveau_fence_update(struct nouveau_channel *);
27 27
28struct nouveau_fence_chan { 28struct nouveau_fence_chan {
29 struct list_head pending; 29 struct list_head pending;
@@ -34,9 +34,9 @@ struct nouveau_fence_chan {
34}; 34};
35 35
36struct nouveau_fence_priv { 36struct nouveau_fence_priv {
37 void (*dtor)(struct drm_device *); 37 void (*dtor)(struct nouveau_drm *);
38 bool (*suspend)(struct drm_device *); 38 bool (*suspend)(struct nouveau_drm *);
39 void (*resume)(struct drm_device *); 39 void (*resume)(struct nouveau_drm *);
40 int (*context_new)(struct nouveau_channel *); 40 int (*context_new)(struct nouveau_channel *);
41 void (*context_del)(struct nouveau_channel *); 41 void (*context_del)(struct nouveau_channel *);
42 int (*emit)(struct nouveau_fence *); 42 int (*emit)(struct nouveau_fence *);
@@ -45,10 +45,12 @@ struct nouveau_fence_priv {
45 u32 (*read)(struct nouveau_channel *); 45 u32 (*read)(struct nouveau_channel *);
46}; 46};
47 47
48#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
49
48void nouveau_fence_context_new(struct nouveau_fence_chan *); 50void nouveau_fence_context_new(struct nouveau_fence_chan *);
49void nouveau_fence_context_del(struct nouveau_fence_chan *); 51void nouveau_fence_context_del(struct nouveau_fence_chan *);
50 52
51int nv04_fence_create(struct drm_device *dev); 53int nv04_fence_create(struct nouveau_drm *);
52int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); 54int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
53 55
54int nv10_fence_emit(struct nouveau_fence *); 56int nv10_fence_emit(struct nouveau_fence *);
@@ -56,12 +58,12 @@ int nv17_fence_sync(struct nouveau_fence *, struct nouveau_channel *,
56 struct nouveau_channel *); 58 struct nouveau_channel *);
57u32 nv10_fence_read(struct nouveau_channel *); 59u32 nv10_fence_read(struct nouveau_channel *);
58void nv10_fence_context_del(struct nouveau_channel *); 60void nv10_fence_context_del(struct nouveau_channel *);
59void nv10_fence_destroy(struct drm_device *); 61void nv10_fence_destroy(struct nouveau_drm *);
60int nv10_fence_create(struct drm_device *dev); 62int nv10_fence_create(struct nouveau_drm *);
61 63
62int nv50_fence_create(struct drm_device *dev); 64int nv50_fence_create(struct nouveau_drm *);
63int nv84_fence_create(struct drm_device *dev); 65int nv84_fence_create(struct nouveau_drm *);
64int nvc0_fence_create(struct drm_device *dev); 66int nvc0_fence_create(struct nouveau_drm *);
65u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc); 67u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
66 68
67int nouveau_flip_complete(void *chan); 69int nouveau_flip_complete(void *chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 96a34bce54ce..ba744daeb50e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,16 +23,19 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26#include <linux/dma-buf.h>
27#include "drmP.h"
28#include "drm.h"
29 26
30#include "nouveau_drv.h" 27#include <linux/dma-buf.h>
31#include <nouveau_drm.h> 28#include <nouveau_drm.h>
29
30#include <subdev/fb.h>
31
32#include "nouveau_drm.h"
32#include "nouveau_dma.h" 33#include "nouveau_dma.h"
33#include "nouveau_fence.h" 34#include "nouveau_fence.h"
35#include "nouveau_abi16.h"
34 36
35#define nouveau_gem_pushbuf_sync(chan) 0 37#include "nouveau_ttm.h"
38#include "nouveau_gem.h"
36 39
37int 40int
38nouveau_gem_object_new(struct drm_gem_object *gem) 41nouveau_gem_object_new(struct drm_gem_object *gem)
@@ -67,19 +70,19 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
67int 70int
68nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) 71nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
69{ 72{
70 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 73 struct nouveau_cli *cli = nouveau_cli(file_priv);
71 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 74 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
72 struct nouveau_vma *vma; 75 struct nouveau_vma *vma;
73 int ret; 76 int ret;
74 77
75 if (!fpriv->vm) 78 if (!cli->base.vm)
76 return 0; 79 return 0;
77 80
78 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 81 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
79 if (ret) 82 if (ret)
80 return ret; 83 return ret;
81 84
82 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 85 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
83 if (!vma) { 86 if (!vma) {
84 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 87 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
85 if (!vma) { 88 if (!vma) {
@@ -87,7 +90,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
87 goto out; 90 goto out;
88 } 91 }
89 92
90 ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); 93 ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
91 if (ret) { 94 if (ret) {
92 kfree(vma); 95 kfree(vma);
93 goto out; 96 goto out;
@@ -104,19 +107,19 @@ out:
104void 107void
105nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 108nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
106{ 109{
107 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 110 struct nouveau_cli *cli = nouveau_cli(file_priv);
108 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 111 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
109 struct nouveau_vma *vma; 112 struct nouveau_vma *vma;
110 int ret; 113 int ret;
111 114
112 if (!fpriv->vm) 115 if (!cli->base.vm)
113 return; 116 return;
114 117
115 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 118 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
116 if (ret) 119 if (ret)
117 return; 120 return;
118 121
119 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 122 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
120 if (vma) { 123 if (vma) {
121 if (--vma->refcount == 0) { 124 if (--vma->refcount == 0) {
122 nouveau_bo_vma_del(nvbo, vma); 125 nouveau_bo_vma_del(nvbo, vma);
@@ -131,7 +134,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
131 uint32_t tile_mode, uint32_t tile_flags, 134 uint32_t tile_mode, uint32_t tile_flags,
132 struct nouveau_bo **pnvbo) 135 struct nouveau_bo **pnvbo)
133{ 136{
134 struct drm_nouveau_private *dev_priv = dev->dev_private; 137 struct nouveau_drm *drm = nouveau_drm(dev);
135 struct nouveau_bo *nvbo; 138 struct nouveau_bo *nvbo;
136 u32 flags = 0; 139 u32 flags = 0;
137 int ret; 140 int ret;
@@ -155,7 +158,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
155 */ 158 */
156 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 159 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
157 NOUVEAU_GEM_DOMAIN_GART; 160 NOUVEAU_GEM_DOMAIN_GART;
158 if (dev_priv->card_type >= NV_50) 161 if (nv_device(drm->device)->card_type >= NV_50)
159 nvbo->valid_domains &= domain; 162 nvbo->valid_domains &= domain;
160 163
161 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 164 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
@@ -173,7 +176,7 @@ static int
173nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, 176nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
174 struct drm_nouveau_gem_info *rep) 177 struct drm_nouveau_gem_info *rep)
175{ 178{
176 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 179 struct nouveau_cli *cli = nouveau_cli(file_priv);
177 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 180 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
178 struct nouveau_vma *vma; 181 struct nouveau_vma *vma;
179 182
@@ -183,8 +186,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
183 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 186 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
184 187
185 rep->offset = nvbo->bo.offset; 188 rep->offset = nvbo->bo.offset;
186 if (fpriv->vm) { 189 if (cli->base.vm) {
187 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 190 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
188 if (!vma) 191 if (!vma)
189 return -EINVAL; 192 return -EINVAL;
190 193
@@ -202,15 +205,16 @@ int
202nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 205nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
203 struct drm_file *file_priv) 206 struct drm_file *file_priv)
204{ 207{
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 208 struct nouveau_drm *drm = nouveau_drm(dev);
209 struct nouveau_fb *pfb = nouveau_fb(drm->device);
206 struct drm_nouveau_gem_new *req = data; 210 struct drm_nouveau_gem_new *req = data;
207 struct nouveau_bo *nvbo = NULL; 211 struct nouveau_bo *nvbo = NULL;
208 int ret = 0; 212 int ret = 0;
209 213
210 dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping; 214 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
211 215
212 if (!nvfb_flags_valid(dev, req->info.tile_flags)) { 216 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
213 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); 217 NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
214 return -EINVAL; 218 return -EINVAL;
215 } 219 }
216 220
@@ -312,16 +316,16 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
312 struct drm_nouveau_gem_pushbuf_bo *pbbo, 316 struct drm_nouveau_gem_pushbuf_bo *pbbo,
313 int nr_buffers, struct validate_op *op) 317 int nr_buffers, struct validate_op *op)
314{ 318{
315 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->drm->dev;
316 struct drm_nouveau_private *dev_priv = dev->dev_private; 320 struct nouveau_drm *drm = nouveau_drm(dev);
317 uint32_t sequence; 321 uint32_t sequence;
318 int trycnt = 0; 322 int trycnt = 0;
319 int ret, i; 323 int ret, i;
320 324
321 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); 325 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
322retry: 326retry:
323 if (++trycnt > 100000) { 327 if (++trycnt > 100000) {
324 NV_ERROR(dev, "%s failed and gave up.\n", __func__); 328 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
325 return -EINVAL; 329 return -EINVAL;
326 } 330 }
327 331
@@ -332,14 +336,14 @@ retry:
332 336
333 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 337 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
334 if (!gem) { 338 if (!gem) {
335 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); 339 NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
336 validate_fini(op, NULL); 340 validate_fini(op, NULL);
337 return -ENOENT; 341 return -ENOENT;
338 } 342 }
339 nvbo = gem->driver_private; 343 nvbo = gem->driver_private;
340 344
341 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 345 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
342 NV_ERROR(dev, "multiple instances of buffer %d on " 346 NV_ERROR(drm, "multiple instances of buffer %d on "
343 "validation list\n", b->handle); 347 "validation list\n", b->handle);
344 drm_gem_object_unreference_unlocked(gem); 348 drm_gem_object_unreference_unlocked(gem);
345 validate_fini(op, NULL); 349 validate_fini(op, NULL);
@@ -354,7 +358,7 @@ retry:
354 drm_gem_object_unreference_unlocked(gem); 358 drm_gem_object_unreference_unlocked(gem);
355 if (unlikely(ret)) { 359 if (unlikely(ret)) {
356 if (ret != -ERESTARTSYS) 360 if (ret != -ERESTARTSYS)
357 NV_ERROR(dev, "fail reserve\n"); 361 NV_ERROR(drm, "fail reserve\n");
358 return ret; 362 return ret;
359 } 363 }
360 goto retry; 364 goto retry;
@@ -373,7 +377,7 @@ retry:
373 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 377 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
374 list_add_tail(&nvbo->entry, &op->gart_list); 378 list_add_tail(&nvbo->entry, &op->gart_list);
375 else { 379 else {
376 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 380 NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
377 b->valid_domains); 381 b->valid_domains);
378 list_add_tail(&nvbo->entry, &op->both_list); 382 list_add_tail(&nvbo->entry, &op->both_list);
379 validate_fini(op, NULL); 383 validate_fini(op, NULL);
@@ -407,10 +411,9 @@ static int
407validate_list(struct nouveau_channel *chan, struct list_head *list, 411validate_list(struct nouveau_channel *chan, struct list_head *list,
408 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 412 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
409{ 413{
410 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 414 struct nouveau_drm *drm = chan->drm;
411 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 415 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
412 (void __force __user *)(uintptr_t)user_pbbo_ptr; 416 (void __force __user *)(uintptr_t)user_pbbo_ptr;
413 struct drm_device *dev = chan->dev;
414 struct nouveau_bo *nvbo; 417 struct nouveau_bo *nvbo;
415 int ret, relocs = 0; 418 int ret, relocs = 0;
416 419
@@ -419,7 +422,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
419 422
420 ret = validate_sync(chan, nvbo); 423 ret = validate_sync(chan, nvbo);
421 if (unlikely(ret)) { 424 if (unlikely(ret)) {
422 NV_ERROR(dev, "fail pre-validate sync\n"); 425 NV_ERROR(drm, "fail pre-validate sync\n");
423 return ret; 426 return ret;
424 } 427 }
425 428
@@ -427,24 +430,24 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
427 b->write_domains, 430 b->write_domains,
428 b->valid_domains); 431 b->valid_domains);
429 if (unlikely(ret)) { 432 if (unlikely(ret)) {
430 NV_ERROR(dev, "fail set_domain\n"); 433 NV_ERROR(drm, "fail set_domain\n");
431 return ret; 434 return ret;
432 } 435 }
433 436
434 ret = nouveau_bo_validate(nvbo, true, false, false); 437 ret = nouveau_bo_validate(nvbo, true, false, false);
435 if (unlikely(ret)) { 438 if (unlikely(ret)) {
436 if (ret != -ERESTARTSYS) 439 if (ret != -ERESTARTSYS)
437 NV_ERROR(dev, "fail ttm_validate\n"); 440 NV_ERROR(drm, "fail ttm_validate\n");
438 return ret; 441 return ret;
439 } 442 }
440 443
441 ret = validate_sync(chan, nvbo); 444 ret = validate_sync(chan, nvbo);
442 if (unlikely(ret)) { 445 if (unlikely(ret)) {
443 NV_ERROR(dev, "fail post-validate sync\n"); 446 NV_ERROR(drm, "fail post-validate sync\n");
444 return ret; 447 return ret;
445 } 448 }
446 449
447 if (dev_priv->card_type < NV_50) { 450 if (nv_device(drm->device)->card_type < NV_50) {
448 if (nvbo->bo.offset == b->presumed.offset && 451 if (nvbo->bo.offset == b->presumed.offset &&
449 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 452 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
450 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 453 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -476,7 +479,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
476 uint64_t user_buffers, int nr_buffers, 479 uint64_t user_buffers, int nr_buffers,
477 struct validate_op *op, int *apply_relocs) 480 struct validate_op *op, int *apply_relocs)
478{ 481{
479 struct drm_device *dev = chan->dev; 482 struct nouveau_drm *drm = chan->drm;
480 int ret, relocs = 0; 483 int ret, relocs = 0;
481 484
482 INIT_LIST_HEAD(&op->vram_list); 485 INIT_LIST_HEAD(&op->vram_list);
@@ -489,14 +492,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
489 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 492 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
490 if (unlikely(ret)) { 493 if (unlikely(ret)) {
491 if (ret != -ERESTARTSYS) 494 if (ret != -ERESTARTSYS)
492 NV_ERROR(dev, "validate_init\n"); 495 NV_ERROR(drm, "validate_init\n");
493 return ret; 496 return ret;
494 } 497 }
495 498
496 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 499 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
497 if (unlikely(ret < 0)) { 500 if (unlikely(ret < 0)) {
498 if (ret != -ERESTARTSYS) 501 if (ret != -ERESTARTSYS)
499 NV_ERROR(dev, "validate vram_list\n"); 502 NV_ERROR(drm, "validate vram_list\n");
500 validate_fini(op, NULL); 503 validate_fini(op, NULL);
501 return ret; 504 return ret;
502 } 505 }
@@ -505,7 +508,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
505 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 508 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
506 if (unlikely(ret < 0)) { 509 if (unlikely(ret < 0)) {
507 if (ret != -ERESTARTSYS) 510 if (ret != -ERESTARTSYS)
508 NV_ERROR(dev, "validate gart_list\n"); 511 NV_ERROR(drm, "validate gart_list\n");
509 validate_fini(op, NULL); 512 validate_fini(op, NULL);
510 return ret; 513 return ret;
511 } 514 }
@@ -514,7 +517,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
514 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 517 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
515 if (unlikely(ret < 0)) { 518 if (unlikely(ret < 0)) {
516 if (ret != -ERESTARTSYS) 519 if (ret != -ERESTARTSYS)
517 NV_ERROR(dev, "validate both_list\n"); 520 NV_ERROR(drm, "validate both_list\n");
518 validate_fini(op, NULL); 521 validate_fini(op, NULL);
519 return ret; 522 return ret;
520 } 523 }
@@ -547,6 +550,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
547 struct drm_nouveau_gem_pushbuf *req, 550 struct drm_nouveau_gem_pushbuf *req,
548 struct drm_nouveau_gem_pushbuf_bo *bo) 551 struct drm_nouveau_gem_pushbuf_bo *bo)
549{ 552{
553 struct nouveau_drm *drm = nouveau_newpriv(dev);
550 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 554 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
551 int ret = 0; 555 int ret = 0;
552 unsigned i; 556 unsigned i;
@@ -562,7 +566,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
562 uint32_t data; 566 uint32_t data;
563 567
564 if (unlikely(r->bo_index > req->nr_buffers)) { 568 if (unlikely(r->bo_index > req->nr_buffers)) {
565 NV_ERROR(dev, "reloc bo index invalid\n"); 569 NV_ERROR(drm, "reloc bo index invalid\n");
566 ret = -EINVAL; 570 ret = -EINVAL;
567 break; 571 break;
568 } 572 }
@@ -572,7 +576,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
572 continue; 576 continue;
573 577
574 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 578 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
575 NV_ERROR(dev, "reloc container bo index invalid\n"); 579 NV_ERROR(drm, "reloc container bo index invalid\n");
576 ret = -EINVAL; 580 ret = -EINVAL;
577 break; 581 break;
578 } 582 }
@@ -580,7 +584,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
580 584
581 if (unlikely(r->reloc_bo_offset + 4 > 585 if (unlikely(r->reloc_bo_offset + 4 >
582 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 586 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
583 NV_ERROR(dev, "reloc outside of bo\n"); 587 NV_ERROR(drm, "reloc outside of bo\n");
584 ret = -EINVAL; 588 ret = -EINVAL;
585 break; 589 break;
586 } 590 }
@@ -589,7 +593,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
589 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 593 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
590 &nvbo->kmap); 594 &nvbo->kmap);
591 if (ret) { 595 if (ret) {
592 NV_ERROR(dev, "failed kmap for reloc\n"); 596 NV_ERROR(drm, "failed kmap for reloc\n");
593 break; 597 break;
594 } 598 }
595 nvbo->validate_mapped = true; 599 nvbo->validate_mapped = true;
@@ -614,7 +618,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
614 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 618 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
615 spin_unlock(&nvbo->bo.bdev->fence_lock); 619 spin_unlock(&nvbo->bo.bdev->fence_lock);
616 if (ret) { 620 if (ret) {
617 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); 621 NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
618 break; 622 break;
619 } 623 }
620 624
@@ -629,62 +633,67 @@ int
629nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 633nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
630 struct drm_file *file_priv) 634 struct drm_file *file_priv)
631{ 635{
632 struct drm_nouveau_private *dev_priv = dev->dev_private; 636 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
637 struct nouveau_abi16_chan *temp;
638 struct nouveau_drm *drm = nouveau_drm(dev);
633 struct drm_nouveau_gem_pushbuf *req = data; 639 struct drm_nouveau_gem_pushbuf *req = data;
634 struct drm_nouveau_gem_pushbuf_push *push; 640 struct drm_nouveau_gem_pushbuf_push *push;
635 struct drm_nouveau_gem_pushbuf_bo *bo; 641 struct drm_nouveau_gem_pushbuf_bo *bo;
636 struct nouveau_channel *chan; 642 struct nouveau_channel *chan = NULL;
637 struct validate_op op; 643 struct validate_op op;
638 struct nouveau_fence *fence = NULL; 644 struct nouveau_fence *fence = NULL;
639 int i, j, ret = 0, do_reloc = 0; 645 int i, j, ret = 0, do_reloc = 0;
640 646
641 chan = nouveau_channel_get(file_priv, req->channel); 647 if (unlikely(!abi16))
642 if (IS_ERR(chan)) 648 return -ENOMEM;
643 return PTR_ERR(chan);
644 649
645 req->vram_available = dev_priv->fb_aper_free; 650 list_for_each_entry(temp, &abi16->channels, head) {
646 req->gart_available = dev_priv->gart_info.aper_free; 651 if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
652 chan = temp->chan;
653 break;
654 }
655 }
656
657 if (!chan)
658 return nouveau_abi16_put(abi16, -ENOENT);
659
660 req->vram_available = drm->gem.vram_available;
661 req->gart_available = drm->gem.gart_available;
647 if (unlikely(req->nr_push == 0)) 662 if (unlikely(req->nr_push == 0))
648 goto out_next; 663 goto out_next;
649 664
650 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 665 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
651 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", 666 NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
652 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 667 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
653 nouveau_channel_put(&chan); 668 return nouveau_abi16_put(abi16, -EINVAL);
654 return -EINVAL;
655 } 669 }
656 670
657 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 671 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
658 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", 672 NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
659 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 673 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
660 nouveau_channel_put(&chan); 674 return nouveau_abi16_put(abi16, -EINVAL);
661 return -EINVAL;
662 } 675 }
663 676
664 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 677 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
665 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", 678 NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
666 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 679 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
667 nouveau_channel_put(&chan); 680 return nouveau_abi16_put(abi16, -EINVAL);
668 return -EINVAL;
669 } 681 }
670 682
671 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 683 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
672 if (IS_ERR(push)) { 684 if (IS_ERR(push))
673 nouveau_channel_put(&chan); 685 return nouveau_abi16_put(abi16, PTR_ERR(push));
674 return PTR_ERR(push);
675 }
676 686
677 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 687 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
678 if (IS_ERR(bo)) { 688 if (IS_ERR(bo)) {
679 kfree(push); 689 kfree(push);
680 nouveau_channel_put(&chan); 690 return nouveau_abi16_put(abi16, PTR_ERR(bo));
681 return PTR_ERR(bo);
682 } 691 }
683 692
684 /* Ensure all push buffers are on validate list */ 693 /* Ensure all push buffers are on validate list */
685 for (i = 0; i < req->nr_push; i++) { 694 for (i = 0; i < req->nr_push; i++) {
686 if (push[i].bo_index >= req->nr_buffers) { 695 if (push[i].bo_index >= req->nr_buffers) {
687 NV_ERROR(dev, "push %d buffer not in list\n", i); 696 NV_ERROR(drm, "push %d buffer not in list\n", i);
688 ret = -EINVAL; 697 ret = -EINVAL;
689 goto out_prevalid; 698 goto out_prevalid;
690 } 699 }
@@ -695,7 +704,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
695 req->nr_buffers, &op, &do_reloc); 704 req->nr_buffers, &op, &do_reloc);
696 if (ret) { 705 if (ret) {
697 if (ret != -ERESTARTSYS) 706 if (ret != -ERESTARTSYS)
698 NV_ERROR(dev, "validate: %d\n", ret); 707 NV_ERROR(drm, "validate: %d\n", ret);
699 goto out_prevalid; 708 goto out_prevalid;
700 } 709 }
701 710
@@ -703,7 +712,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
703 if (do_reloc) { 712 if (do_reloc) {
704 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); 713 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
705 if (ret) { 714 if (ret) {
706 NV_ERROR(dev, "reloc apply: %d\n", ret); 715 NV_ERROR(drm, "reloc apply: %d\n", ret);
707 goto out; 716 goto out;
708 } 717 }
709 } 718 }
@@ -711,7 +720,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
711 if (chan->dma.ib_max) { 720 if (chan->dma.ib_max) {
712 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 721 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
713 if (ret) { 722 if (ret) {
714 NV_INFO(dev, "nv50cal_space: %d\n", ret); 723 NV_ERROR(drm, "nv50cal_space: %d\n", ret);
715 goto out; 724 goto out;
716 } 725 }
717 726
@@ -723,10 +732,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
723 push[i].length); 732 push[i].length);
724 } 733 }
725 } else 734 } else
726 if (dev_priv->chipset >= 0x25) { 735 if (nv_device(drm->device)->chipset >= 0x25) {
727 ret = RING_SPACE(chan, req->nr_push * 2); 736 ret = RING_SPACE(chan, req->nr_push * 2);
728 if (ret) { 737 if (ret) {
729 NV_ERROR(dev, "cal_space: %d\n", ret); 738 NV_ERROR(drm, "cal_space: %d\n", ret);
730 goto out; 739 goto out;
731 } 740 }
732 741
@@ -740,7 +749,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
740 } else { 749 } else {
741 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 750 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
742 if (ret) { 751 if (ret) {
743 NV_ERROR(dev, "jmp_space: %d\n", ret); 752 NV_ERROR(drm, "jmp_space: %d\n", ret);
744 goto out; 753 goto out;
745 } 754 }
746 755
@@ -749,7 +758,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
749 bo[push[i].bo_index].user_priv; 758 bo[push[i].bo_index].user_priv;
750 uint32_t cmd; 759 uint32_t cmd;
751 760
752 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); 761 cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
753 cmd |= 0x20000000; 762 cmd |= 0x20000000;
754 if (unlikely(cmd != req->suffix0)) { 763 if (unlikely(cmd != req->suffix0)) {
755 if (!nvbo->kmap.virtual) { 764 if (!nvbo->kmap.virtual) {
@@ -778,7 +787,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
778 787
779 ret = nouveau_fence_new(chan, &fence); 788 ret = nouveau_fence_new(chan, &fence);
780 if (ret) { 789 if (ret) {
781 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 790 NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
782 WIND_RING(chan); 791 WIND_RING(chan);
783 goto out; 792 goto out;
784 } 793 }
@@ -796,17 +805,16 @@ out_next:
796 req->suffix0 = 0x00000000; 805 req->suffix0 = 0x00000000;
797 req->suffix1 = 0x00000000; 806 req->suffix1 = 0x00000000;
798 } else 807 } else
799 if (dev_priv->chipset >= 0x25) { 808 if (nv_device(drm->device)->chipset >= 0x25) {
800 req->suffix0 = 0x00020000; 809 req->suffix0 = 0x00020000;
801 req->suffix1 = 0x00000000; 810 req->suffix1 = 0x00000000;
802 } else { 811 } else {
803 req->suffix0 = 0x20000000 | 812 req->suffix0 = 0x20000000 |
804 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); 813 (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
805 req->suffix1 = 0x00000000; 814 req->suffix1 = 0x00000000;
806 } 815 }
807 816
808 nouveau_channel_put(&chan); 817 return nouveau_abi16_put(abi16, ret);
809 return ret;
810} 818}
811 819
812static inline uint32_t 820static inline uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
deleted file mode 100644
index a774b7ad0f21..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
+++ /dev/null
@@ -1,518 +0,0 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include <nouveau_drm.h>
37#include <engine/fifo.h>
38#include <core/ramht.h>
39#include "nouveau_software.h"
40
41struct nouveau_gpuobj_method {
42 struct list_head head;
43 u32 mthd;
44 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
45};
46
47struct nouveau_gpuobj_class {
48 struct list_head head;
49 struct list_head methods;
50 u32 id;
51 u32 engine;
52};
53
54int
55nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
56{
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_gpuobj_class *oc;
59
60 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
61 if (!oc)
62 return -ENOMEM;
63
64 INIT_LIST_HEAD(&oc->methods);
65 oc->id = class;
66 oc->engine = engine;
67 list_add(&oc->head, &dev_priv->classes);
68 return 0;
69}
70
71int
72nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
73 int (*exec)(struct nouveau_channel *, u32, u32, u32))
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_gpuobj_method *om;
77 struct nouveau_gpuobj_class *oc;
78
79 list_for_each_entry(oc, &dev_priv->classes, head) {
80 if (oc->id == class)
81 goto found;
82 }
83
84 return -EINVAL;
85
86found:
87 om = kzalloc(sizeof(*om), GFP_KERNEL);
88 if (!om)
89 return -ENOMEM;
90
91 om->mthd = mthd;
92 om->exec = exec;
93 list_add(&om->head, &oc->methods);
94 return 0;
95}
96
97int
98nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
99 u32 class, u32 mthd, u32 data)
100{
101 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
102 struct nouveau_gpuobj_method *om;
103 struct nouveau_gpuobj_class *oc;
104
105 list_for_each_entry(oc, &dev_priv->classes, head) {
106 if (oc->id != class)
107 continue;
108
109 list_for_each_entry(om, &oc->methods, head) {
110 if (om->mthd == mthd)
111 return om->exec(chan, class, mthd, data);
112 }
113 }
114
115 return -ENOENT;
116}
117
118int
119nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
120 u32 class, u32 mthd, u32 data)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
124 struct nouveau_channel *chan = NULL;
125 unsigned long flags;
126 int ret = -EINVAL;
127
128 spin_lock_irqsave(&dev_priv->channels.lock, flags);
129 if (chid >= 0 && chid < pfifo->channels)
130 chan = dev_priv->channels.ptr[chid];
131 if (chan)
132 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
133 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
134 return ret;
135}
136
137void
138nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
139 u64 base, u64 size, int target, int access,
140 u32 type, u32 comp)
141{
142 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
143 u32 flags0;
144
145 flags0 = (comp << 29) | (type << 22) | class;
146 flags0 |= 0x00100000;
147
148 switch (access) {
149 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
150 case NV_MEM_ACCESS_RW:
151 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
152 default:
153 break;
154 }
155
156 switch (target) {
157 case NV_MEM_TARGET_VRAM:
158 flags0 |= 0x00010000;
159 break;
160 case NV_MEM_TARGET_PCI:
161 flags0 |= 0x00020000;
162 break;
163 case NV_MEM_TARGET_PCI_NOSNOOP:
164 flags0 |= 0x00030000;
165 break;
166 case NV_MEM_TARGET_GART:
167 base += dev_priv->gart_info.aper_base;
168 default:
169 flags0 &= ~0x00100000;
170 break;
171 }
172
173 /* convert to base + limit */
174 size = (base + size) - 1;
175
176 nv_wo32(obj, offset + 0x00, flags0);
177 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
178 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
179 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
180 upper_32_bits(base));
181 nv_wo32(obj, offset + 0x10, 0x00000000);
182 nv_wo32(obj, offset + 0x14, 0x00000000);
183
184 nvimem_flush(obj->dev);
185}
186
187int
188nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
189 int target, int access, u32 type, u32 comp,
190 struct nouveau_gpuobj **pobj)
191{
192 struct drm_device *dev = chan->dev;
193 int ret;
194
195 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
196 if (ret)
197 return ret;
198
199 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
200 access, type, comp);
201 return 0;
202}
203
204int
205nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
206 u64 size, int access, int target,
207 struct nouveau_gpuobj **pobj)
208{
209 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
210 struct drm_device *dev = chan->dev;
211 struct nouveau_gpuobj *obj;
212 u32 flags0, flags2;
213 int ret;
214
215 if (dev_priv->card_type >= NV_50) {
216 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
217 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
218
219 return nv50_gpuobj_dma_new(chan, class, base, size,
220 target, access, type, comp, pobj);
221 }
222
223 if (target == NV_MEM_TARGET_GART) {
224 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
225
226 if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
227 if (base == 0) {
228 nouveau_gpuobj_ref(gart, pobj);
229 return 0;
230 }
231
232 base = nouveau_sgdma_get_physical(dev, base);
233 target = NV_MEM_TARGET_PCI;
234 } else {
235 base += dev_priv->gart_info.aper_base;
236 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
237 target = NV_MEM_TARGET_PCI_NOSNOOP;
238 else
239 target = NV_MEM_TARGET_PCI;
240 }
241 }
242
243 flags0 = class;
244 flags0 |= 0x00003000; /* PT present, PT linear */
245 flags2 = 0;
246
247 switch (target) {
248 case NV_MEM_TARGET_PCI:
249 flags0 |= 0x00020000;
250 break;
251 case NV_MEM_TARGET_PCI_NOSNOOP:
252 flags0 |= 0x00030000;
253 break;
254 default:
255 break;
256 }
257
258 switch (access) {
259 case NV_MEM_ACCESS_RO:
260 flags0 |= 0x00004000;
261 break;
262 case NV_MEM_ACCESS_WO:
263 flags0 |= 0x00008000;
264 default:
265 flags2 |= 0x00000002;
266 break;
267 }
268
269 flags0 |= (base & 0x00000fff) << 20;
270 flags2 |= (base & 0xfffff000);
271
272 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
273 if (ret)
274 return ret;
275
276 nv_wo32(obj, 0x00, flags0);
277 nv_wo32(obj, 0x04, size - 1);
278 nv_wo32(obj, 0x08, flags2);
279 nv_wo32(obj, 0x0c, flags2);
280
281 obj->engine = NVOBJ_ENGINE_SW;
282 obj->class = class;
283 *pobj = obj;
284 return 0;
285}
286
287int
288nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
289{
290 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
291 struct drm_device *dev = chan->dev;
292 struct nouveau_gpuobj_class *oc;
293 int ret;
294
295 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
296
297 list_for_each_entry(oc, &dev_priv->classes, head) {
298 struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
299
300 if (oc->id != class)
301 continue;
302
303 if (!chan->engctx[oc->engine]) {
304 ret = eng->context_new(chan, oc->engine);
305 if (ret)
306 return ret;
307 }
308
309 return eng->object_new(chan, oc->engine, handle, class);
310 }
311
312 return -EINVAL;
313}
314
315static int
316nv04_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
317{
318 struct drm_device *dev = chan->dev;
319 int ret;
320
321 ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
322 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
323 if (ret)
324 return ret;
325
326 return 0;
327}
328
329static int
330nv50_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
331{
332 struct drm_device *dev = chan->dev;
333 int ret;
334
335 ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
336 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
337 if (ret)
338 return ret;
339
340 ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->ramfc);
341 if (ret)
342 return ret;
343
344 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, 0, &chan->engptr);
345 if (ret)
346 return ret;
347
348 ret = nouveau_gpuobj_new(dev, chan, 0x4000, 0, 0, &chan->vm_pd);
349 if (ret)
350 return ret;
351
352 return 0;
353}
354
355static int
356nv84_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
357{
358 struct drm_device *dev = chan->dev;
359 int ret;
360
361 ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
362 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
363 if (ret)
364 return ret;
365
366 ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->engptr);
367 if (ret)
368 return ret;
369
370 ret = nouveau_gpuobj_new(dev, chan, 0x4000, 0, 0, &chan->vm_pd);
371 if (ret)
372 return ret;
373
374 return 0;
375}
376
377static int
378nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
379{
380 struct drm_device *dev = chan->dev;
381 int ret;
382
383 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
384 if (ret)
385 return ret;
386
387 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &chan->vm_pd);
388 if (ret)
389 return ret;
390
391 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
392
393 nv_wo32(chan->ramin, 0x0200, lower_32_bits(chan->vm_pd->addr));
394 nv_wo32(chan->ramin, 0x0204, upper_32_bits(chan->vm_pd->addr));
395 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
396 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
397
398 return 0;
399}
400
401int
402nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
403 uint32_t vram_h, uint32_t tt_h)
404{
405 struct drm_device *dev = chan->dev;
406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
408 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
409 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
410 int ret;
411
412 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
413 if (dev_priv->card_type >= NV_C0)
414 return nvc0_gpuobj_channel_init(chan, vm);
415
416 /* Allocate a chunk of memory for per-channel object storage */
417 if (dev_priv->chipset >= 0x84)
418 ret = nv84_gpuobj_channel_init_pramin(chan);
419 else
420 if (dev_priv->chipset == 0x50)
421 ret = nv50_gpuobj_channel_init_pramin(chan);
422 else
423 ret = nv04_gpuobj_channel_init_pramin(chan);
424 if (ret) {
425 NV_ERROR(dev, "init pramin\n");
426 return ret;
427 }
428
429 /* NV50 VM
430 * - Allocate per-channel page-directory
431 * - Link with shared channel VM
432 */
433 if (vm)
434 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
435
436 /* RAMHT */
437 if (dev_priv->card_type < NV_50) {
438 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
439 } else {
440 struct nouveau_gpuobj *ramht = NULL;
441
442 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
443 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
444 if (ret)
445 return ret;
446
447 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
448 nouveau_gpuobj_ref(NULL, &ramht);
449 if (ret)
450 return ret;
451 }
452
453 /* VRAM ctxdma */
454 if (dev_priv->card_type >= NV_50) {
455 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
456 0, (1ULL << 40), NV_MEM_ACCESS_RW,
457 NV_MEM_TARGET_VM, &vram);
458 if (ret) {
459 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
460 return ret;
461 }
462 } else {
463 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
464 0, dev_priv->fb_available_size,
465 NV_MEM_ACCESS_RW,
466 NV_MEM_TARGET_VRAM, &vram);
467 if (ret) {
468 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
469 return ret;
470 }
471 }
472
473 ret = nouveau_ramht_insert(chan, vram_h, vram);
474 nouveau_gpuobj_ref(NULL, &vram);
475 if (ret) {
476 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
477 return ret;
478 }
479
480 /* TT memory ctxdma */
481 if (dev_priv->card_type >= NV_50) {
482 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
483 0, (1ULL << 40), NV_MEM_ACCESS_RW,
484 NV_MEM_TARGET_VM, &tt);
485 } else {
486 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
487 0, dev_priv->gart_info.aper_size,
488 NV_MEM_ACCESS_RW,
489 NV_MEM_TARGET_GART, &tt);
490 }
491
492 if (ret) {
493 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
494 return ret;
495 }
496
497 ret = nouveau_ramht_insert(chan, tt_h, tt);
498 nouveau_gpuobj_ref(NULL, &tt);
499 if (ret) {
500 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
501 return ret;
502 }
503
504 return 0;
505}
506
507void
508nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
509{
510 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
511
512 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
513 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
514 nouveau_gpuobj_ref(NULL, &chan->ramfc);
515 nouveau_gpuobj_ref(NULL, &chan->engptr);
516
517 nouveau_gpuobj_ref(NULL, &chan->ramin);
518}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1a75a96cef26..25e2e63cc53a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,7 +36,6 @@
36#include "nouveau_drv.h" 36#include "nouveau_drv.h"
37#include "nouveau_reg.h" 37#include "nouveau_reg.h"
38#include <core/ramht.h> 38#include <core/ramht.h>
39#include "nouveau_util.h"
40 39
41void 40void
42nouveau_irq_preinstall(struct drm_device *dev) 41nouveau_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 73176bcd1b64..9c35d14fe9d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -37,146 +37,6 @@
37 37
38#include "nouveau_drv.h" 38#include "nouveau_drv.h"
39#include "nouveau_pm.h" 39#include "nouveau_pm.h"
40#include <core/mm.h>
41#include <engine/fifo.h>
42#include "nouveau_fence.h"
43
44/*
45 * Cleanup everything
46 */
47void
48nouveau_mem_vram_fini(struct drm_device *dev)
49{
50 struct drm_nouveau_private *dev_priv = dev->dev_private;
51
52 ttm_bo_device_release(&dev_priv->ttm.bdev);
53
54 nouveau_ttm_global_release(dev_priv);
55
56 if (dev_priv->fb_mtrr >= 0) {
57 drm_mtrr_del(dev_priv->fb_mtrr,
58 pci_resource_start(dev->pdev, 1),
59 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
60 dev_priv->fb_mtrr = -1;
61 }
62}
63
64void
65nouveau_mem_gart_fini(struct drm_device *dev)
66{
67 nouveau_sgdma_takedown(dev);
68}
69
70int
71nouveau_mem_vram_init(struct drm_device *dev)
72{
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
75 int ret, dma_bits;
76
77 dma_bits = 32;
78 if (dev_priv->card_type >= NV_50) {
79 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
80 dma_bits = 40;
81 } else
82 if (0 && pci_is_pcie(dev->pdev) &&
83 dev_priv->chipset > 0x40 &&
84 dev_priv->chipset != 0x45) {
85 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
86 dma_bits = 39;
87 }
88
89 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
90 if (ret)
91 return ret;
92 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
93 if (ret) {
94 /* Reset to default value. */
95 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
96 }
97
98
99 ret = nouveau_ttm_global_init(dev_priv);
100 if (ret)
101 return ret;
102
103 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
104 dev_priv->ttm.bo_global_ref.ref.object,
105 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
106 dma_bits <= 32 ? true : false);
107 if (ret) {
108 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
109 return ret;
110 }
111
112 dev_priv->fb_available_size = nvfb_vram_size(dev);
113 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
114 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
115 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
116 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
117
118 dev_priv->fb_available_size -= nvimem_reserved(dev);
119 dev_priv->fb_aper_free = dev_priv->fb_available_size;
120
121 /* mappable vram */
122 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
123 dev_priv->fb_available_size >> PAGE_SHIFT);
124 if (ret) {
125 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
126 return ret;
127 }
128
129 if (dev_priv->card_type < NV_50) {
130 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
131 0, 0, NULL, &dev_priv->vga_ram);
132 if (ret == 0)
133 ret = nouveau_bo_pin(dev_priv->vga_ram,
134 TTM_PL_FLAG_VRAM);
135
136 if (ret) {
137 NV_WARN(dev, "failed to reserve VGA memory\n");
138 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
139 }
140 }
141
142 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
143 pci_resource_len(dev->pdev, 1),
144 DRM_MTRR_WC);
145 return 0;
146}
147
148int
149nouveau_mem_gart_init(struct drm_device *dev)
150{
151 struct drm_nouveau_private *dev_priv = dev->dev_private;
152 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
153 int ret;
154
155 if (!nvdrm_gart_init(dev, &dev_priv->gart_info.aper_base,
156 &dev_priv->gart_info.aper_size))
157 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
158
159 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
160 ret = nouveau_sgdma_init(dev);
161 if (ret) {
162 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
163 return ret;
164 }
165 }
166
167 NV_INFO(dev, "%d MiB GART (aperture)\n",
168 (int)(dev_priv->gart_info.aper_size >> 20));
169 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
170
171 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
172 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
173 if (ret) {
174 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
175 return ret;
176 }
177
178 return 0;
179}
180 40
181static int 41static int
182nv40_mem_timing_calc(struct drm_device *dev, u32 freq, 42nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
deleted file mode 100644
index 2cc4779b4299..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "nouveau_drv.h"
31#include <core/ramht.h>
32
33int
34nouveau_notifier_init_channel(struct nouveau_channel *chan)
35{
36 struct drm_device *dev = chan->dev;
37 struct drm_nouveau_private *dev_priv = dev->dev_private;
38 struct nouveau_bo *ntfy = NULL;
39 uint32_t flags, ttmpl;
40 int ret;
41
42 if (nouveau_vram_notify) {
43 flags = NOUVEAU_GEM_DOMAIN_VRAM;
44 ttmpl = TTM_PL_FLAG_VRAM;
45 } else {
46 flags = NOUVEAU_GEM_DOMAIN_GART;
47 ttmpl = TTM_PL_FLAG_TT;
48 }
49
50 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
51 if (ret)
52 return ret;
53
54 ret = nouveau_bo_pin(ntfy, ttmpl);
55 if (ret)
56 goto out_err;
57
58 ret = nouveau_bo_map(ntfy);
59 if (ret)
60 goto out_err;
61
62 if (dev_priv->card_type >= NV_50) {
63 ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
64 if (ret)
65 goto out_err;
66 }
67
68 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
69 if (ret)
70 goto out_err;
71
72 chan->notifier_bo = ntfy;
73out_err:
74 if (ret) {
75 nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
76 drm_gem_object_unreference_unlocked(ntfy->gem);
77 }
78
79 return ret;
80}
81
82void
83nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
84{
85 struct drm_device *dev = chan->dev;
86
87 if (!chan->notifier_bo)
88 return;
89
90 nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
91 nouveau_bo_unmap(chan->notifier_bo);
92 mutex_lock(&dev->struct_mutex);
93 nouveau_bo_unpin(chan->notifier_bo);
94 mutex_unlock(&dev->struct_mutex);
95 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
96 drm_mm_takedown(&chan->notifier_heap);
97}
98
99int
100nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
101 int size, uint32_t start, uint32_t end,
102 uint32_t *b_offset)
103{
104 struct drm_device *dev = chan->dev;
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_gpuobj *nobj = NULL;
107 struct drm_mm_node *mem;
108 uint64_t offset;
109 int target, ret;
110
111 mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
112 start, end, 0);
113 if (mem)
114 mem = drm_mm_get_block_range(mem, size, 0, start, end);
115 if (!mem) {
116 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
117 return -ENOMEM;
118 }
119
120 if (dev_priv->card_type < NV_50) {
121 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
122 target = NV_MEM_TARGET_VRAM;
123 else
124 target = NV_MEM_TARGET_GART;
125 offset = chan->notifier_bo->bo.offset;
126 } else {
127 target = NV_MEM_TARGET_VM;
128 offset = chan->notifier_vma.offset;
129 }
130 offset += mem->start;
131
132 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
133 mem->size, NV_MEM_ACCESS_RW, target,
134 &nobj);
135 if (ret) {
136 drm_mm_put_block(mem);
137 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
138 return ret;
139 }
140
141 ret = nouveau_ramht_insert(chan, handle, nobj);
142 nouveau_gpuobj_ref(NULL, &nobj);
143 if (ret) {
144 drm_mm_put_block(mem);
145 NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
146 return ret;
147 }
148
149 *b_offset = mem->start;
150 return 0;
151}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index cce47fa7cb52..de0b81fbdcc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -27,7 +27,6 @@
27 27
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include <nouveau_drm.h> 29#include <nouveau_drm.h>
30#include "nouveau_dma.h"
31 30
32#include <linux/dma-buf.h> 31#include <linux/dma-buf.h>
33 32
diff --git a/drivers/gpu/drm/nouveau/nouveau_revcompat.c b/drivers/gpu/drm/nouveau/nouveau_revcompat.c
new file mode 100644
index 000000000000..d5c3390503db
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_revcompat.c
@@ -0,0 +1,22 @@
1#include "nouveau_revcompat.h"
2#include "nouveau_drv.h"
3#include "nv50_display.h"
4
5struct nouveau_drm *
6nouveau_newpriv(struct drm_device *dev)
7{
8 struct drm_nouveau_private *dev_priv = dev->dev_private;
9 return dev_priv->newpriv;
10}
11
12struct nouveau_bo *
13nv50sema(struct drm_device *dev, int crtc)
14{
15 return nv50_display(dev)->crtc[crtc].sem.bo;
16}
17
18struct nouveau_bo *
19nvd0sema(struct drm_device *dev, int crtc)
20{
21 return nvd0_display_crtc_sema(dev, crtc);
22}
diff --git a/drivers/gpu/drm/nouveau/nouveau_revcompat.h b/drivers/gpu/drm/nouveau/nouveau_revcompat.h
new file mode 100644
index 000000000000..41cf61f1415f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_revcompat.h
@@ -0,0 +1,12 @@
1#ifndef __NOUVEAU_REVCOMPAT_H__
2#define __NOUVEAU_REVCOMPAT_H__
3
4#include "drmP.h"
5
6struct nouveau_drm *
7nouveau_newpriv(struct drm_device *);
8
9struct nouveau_bo *nv50sema(struct drm_device *dev, int crtc);
10struct nouveau_bo *nvd0sema(struct drm_device *dev, int crtc);
11
12#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 464beda94c58..ca5492ac2da5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,11 +1,10 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h> 1#include <linux/pagemap.h>
4#include <linux/slab.h> 2#include <linux/slab.h>
5 3
6#define NV_CTXDMA_PAGE_SHIFT 12 4#include <subdev/fb.h>
7#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) 5
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) 6#include "nouveau_drm.h"
7#include "nouveau_ttm.h"
9 8
10struct nouveau_sgdma_be { 9struct nouveau_sgdma_be {
11 /* this has to be the first field so populate/unpopulated in 10 /* this has to be the first field so populate/unpopulated in
@@ -22,7 +21,6 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
22 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
23 22
24 if (ttm) { 23 if (ttm) {
25 NV_DEBUG(nvbe->dev, "\n");
26 ttm_dma_tt_fini(&nvbe->ttm); 24 ttm_dma_tt_fini(&nvbe->ttm);
27 kfree(nvbe); 25 kfree(nvbe);
28 } 26 }
@@ -93,16 +91,18 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
93 unsigned long size, uint32_t page_flags, 91 unsigned long size, uint32_t page_flags,
94 struct page *dummy_read_page) 92 struct page *dummy_read_page)
95{ 93{
96 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 94 struct nouveau_drm *drm = nouveau_bdev(bdev);
97 struct drm_device *dev = dev_priv->dev;
98 struct nouveau_sgdma_be *nvbe; 95 struct nouveau_sgdma_be *nvbe;
99 96
100 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 97 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
101 if (!nvbe) 98 if (!nvbe)
102 return NULL; 99 return NULL;
103 100
104 nvbe->dev = dev; 101 nvbe->dev = drm->dev;
105 nvbe->ttm.ttm.func = dev_priv->gart_info.func; 102 if (nv_device(drm->device)->card_type < NV_50)
103 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
104 else
105 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
106 106
107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
108 kfree(nvbe); 108 kfree(nvbe);
@@ -110,51 +110,3 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
110 } 110 }
111 return &nvbe->ttm.ttm; 111 return &nvbe->ttm.ttm;
112} 112}
113
114int
115nouveau_sgdma_init(struct drm_device *dev)
116{
117 struct drm_nouveau_private *dev_priv = dev->dev_private;
118 u32 aper_size;
119
120 if (dev_priv->card_type >= NV_50)
121 aper_size = 512 * 1024 * 1024;
122 else
123 aper_size = 128 * 1024 * 1024;
124
125 if (dev_priv->card_type >= NV_50) {
126 dev_priv->gart_info.aper_base = 0;
127 dev_priv->gart_info.aper_size = aper_size;
128 dev_priv->gart_info.type = NOUVEAU_GART_HW;
129 dev_priv->gart_info.func = &nv50_sgdma_backend;
130 } else {
131 dev_priv->gart_info.aper_base = 0;
132 dev_priv->gart_info.aper_size = aper_size;
133 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
134 dev_priv->gart_info.func = &nv04_sgdma_backend;
135 dev_priv->gart_info.sg_ctxdma = nv04vm_refdma(dev);
136 }
137
138 return 0;
139}
140
141void
142nouveau_sgdma_takedown(struct drm_device *dev)
143{
144 struct drm_nouveau_private *dev_priv = dev->dev_private;
145
146 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
147}
148
149uint32_t
150nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
151{
152 struct drm_nouveau_private *dev_priv = dev->dev_private;
153 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
154 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
155
156 BUG_ON(dev_priv->card_type >= NV_50);
157
158 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
159 (offset & NV_CTXDMA_PAGE_MASK);
160}
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
deleted file mode 100644
index 2105a9eef52c..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_software.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef __NOUVEAU_SOFTWARE_H__
2#define __NOUVEAU_SOFTWARE_H__
3
4#include "nouveau_fence.h"
5
6struct nouveau_software_priv {
7 struct nouveau_exec_engine base;
8 struct list_head vblank;
9 spinlock_t peephole_lock;
10};
11
12struct nouveau_software_chan {
13 int (*flip)(void *data);
14 void *flip_data;
15
16 struct {
17 struct list_head list;
18 u32 channel;
19 u32 ctxdma;
20 u32 offset;
21 u32 value;
22 u32 head;
23 } vblank;
24};
25
26static inline void
27nouveau_software_context_new(struct nouveau_channel *chan,
28 struct nouveau_software_chan *pch)
29{
30 pch->flip = nouveau_flip_complete;
31 pch->flip_data = chan;
32}
33
34static inline void
35nouveau_software_create(struct nouveau_software_priv *psw)
36{
37 INIT_LIST_HEAD(&psw->vblank);
38 spin_lock_init(&psw->peephole_lock);
39}
40
41static inline u16
42nouveau_software_class(struct drm_device *dev)
43{
44 struct drm_nouveau_private *dev_priv = dev->dev_private;
45 if (dev_priv->card_type <= NV_04)
46 return 0x006e;
47 if (dev_priv->card_type <= NV_40)
48 return 0x016e;
49 if (dev_priv->card_type <= NV_50)
50 return 0x506e;
51 if (dev_priv->card_type <= NV_E0)
52 return 0x906e;
53 return 0x0000;
54}
55
56int nv04_software_create(struct drm_device *);
57int nv50_software_create(struct drm_device *);
58int nvc0_software_create(struct drm_device *);
59
60#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 5c2836fbf01a..4349b337cfdd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -35,13 +35,9 @@
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include <nouveau_drm.h> 36#include <nouveau_drm.h>
37#include "nouveau_fbcon.h" 37#include "nouveau_fbcon.h"
38#include <core/ramht.h>
39#include "nouveau_pm.h" 38#include "nouveau_pm.h"
40#include "nv04_display.h" 39#include "nv04_display.h"
41#include "nv50_display.h" 40#include "nv50_display.h"
42#include <engine/fifo.h>
43#include "nouveau_fence.h"
44#include "nouveau_software.h"
45 41
46static void nouveau_stub_takedown(struct drm_device *dev) {} 42static void nouveau_stub_takedown(struct drm_device *dev) {}
47static int nouveau_stub_init(struct drm_device *dev) { return 0; } 43static int nouveau_stub_init(struct drm_device *dev) { return 0; }
@@ -266,38 +262,6 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
266 return can_switch; 262 return can_switch;
267} 263}
268 264
269static void
270nouveau_card_channel_fini(struct drm_device *dev)
271{
272 struct drm_nouveau_private *dev_priv = dev->dev_private;
273
274 if (dev_priv->channel) {
275 nouveau_channel_put_unlocked(&dev_priv->channel);
276 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
277 }
278}
279
280static int
281nouveau_card_channel_init(struct drm_device *dev)
282{
283 struct drm_nouveau_private *dev_priv = dev->dev_private;
284 struct nouveau_channel *chan;
285 int ret;
286
287 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x1000, &dev_priv->chan_vm);
288 if (ret)
289 return ret;
290
291 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
292 dev_priv->channel = chan;
293 if (ret)
294 return ret;
295 mutex_unlock(&dev_priv->channel->mutex);
296
297 nouveau_bo_move_init(chan);
298 return 0;
299}
300
301static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = { 265static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
302 .set_gpu_state = nouveau_switcheroo_set_state, 266 .set_gpu_state = nouveau_switcheroo_set_state,
303 .reprobe = nouveau_switcheroo_reprobe, 267 .reprobe = nouveau_switcheroo_reprobe,
@@ -309,7 +273,7 @@ nouveau_card_init(struct drm_device *dev)
309{ 273{
310 struct drm_nouveau_private *dev_priv = dev->dev_private; 274 struct drm_nouveau_private *dev_priv = dev->dev_private;
311 struct nouveau_engine *engine; 275 struct nouveau_engine *engine;
312 int ret, e = 0; 276 int ret;
313 277
314 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 278 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
315 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops); 279 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
@@ -319,11 +283,7 @@ nouveau_card_init(struct drm_device *dev)
319 if (ret) 283 if (ret)
320 goto out; 284 goto out;
321 engine = &dev_priv->engine; 285 engine = &dev_priv->engine;
322 spin_lock_init(&dev_priv->channels.lock);
323 spin_lock_init(&dev_priv->tile.lock);
324 spin_lock_init(&dev_priv->context_switch_lock); 286 spin_lock_init(&dev_priv->context_switch_lock);
325 spin_lock_init(&dev_priv->vm_lock);
326 INIT_LIST_HEAD(&dev_priv->classes);
327 287
328 /* Make the CRTCs and I2C buses accessible */ 288 /* Make the CRTCs and I2C buses accessible */
329 ret = engine->display.early_init(dev); 289 ret = engine->display.early_init(dev);
@@ -343,187 +303,9 @@ nouveau_card_init(struct drm_device *dev)
343 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000); 303 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
344 } 304 }
345 305
346 ret = nouveau_mem_vram_init(dev);
347 if (ret)
348 goto out_bios;
349
350 ret = nouveau_mem_gart_init(dev);
351 if (ret)
352 goto out_ttmvram;
353
354 if (!dev_priv->noaccel) {
355 switch (dev_priv->card_type) {
356 case NV_04:
357 nv04_fifo_create(dev);
358 break;
359 case NV_10:
360 case NV_20:
361 case NV_30:
362 if (dev_priv->chipset < 0x17)
363 nv10_fifo_create(dev);
364 else
365 nv17_fifo_create(dev);
366 break;
367 case NV_40:
368 nv40_fifo_create(dev);
369 break;
370 case NV_50:
371 if (dev_priv->chipset == 0x50)
372 nv50_fifo_create(dev);
373 else
374 nv84_fifo_create(dev);
375 break;
376 case NV_C0:
377 case NV_D0:
378 nvc0_fifo_create(dev);
379 break;
380 case NV_E0:
381 nve0_fifo_create(dev);
382 break;
383 default:
384 break;
385 }
386
387 switch (dev_priv->card_type) {
388 case NV_04:
389 nv04_fence_create(dev);
390 break;
391 case NV_10:
392 case NV_20:
393 case NV_30:
394 case NV_40:
395 case NV_50:
396 if (dev_priv->chipset < 0x84)
397 nv50_fence_create(dev);
398 else
399 nv84_fence_create(dev);
400 break;
401 case NV_C0:
402 case NV_D0:
403 case NV_E0:
404 nvc0_fence_create(dev);
405 break;
406 default:
407 break;
408 }
409
410 switch (dev_priv->card_type) {
411 case NV_04:
412 case NV_10:
413 case NV_20:
414 case NV_30:
415 case NV_40:
416 nv04_software_create(dev);
417 break;
418 case NV_50:
419 nv50_software_create(dev);
420 break;
421 case NV_C0:
422 case NV_D0:
423 case NV_E0:
424 nvc0_software_create(dev);
425 break;
426 default:
427 break;
428 }
429
430 switch (dev_priv->card_type) {
431 case NV_04:
432 nv04_graph_create(dev);
433 break;
434 case NV_10:
435 nv10_graph_create(dev);
436 break;
437 case NV_20:
438 case NV_30:
439 nv20_graph_create(dev);
440 break;
441 case NV_40:
442 nv40_graph_create(dev);
443 break;
444 case NV_50:
445 nv50_graph_create(dev);
446 break;
447 case NV_C0:
448 case NV_D0:
449 nvc0_graph_create(dev);
450 break;
451 case NV_E0:
452 nve0_graph_create(dev);
453 break;
454 default:
455 break;
456 }
457
458 switch (dev_priv->chipset) {
459 case 0x84:
460 case 0x86:
461 case 0x92:
462 case 0x94:
463 case 0x96:
464 case 0xa0:
465 nv84_crypt_create(dev);
466 break;
467 case 0x98:
468 case 0xaa:
469 case 0xac:
470 nv98_crypt_create(dev);
471 break;
472 }
473
474 switch (dev_priv->card_type) {
475 case NV_50:
476 switch (dev_priv->chipset) {
477 case 0xa3:
478 case 0xa5:
479 case 0xa8:
480 nva3_copy_create(dev);
481 break;
482 }
483 break;
484 case NV_C0:
485 if (!(nv_rd32(dev, 0x022500) & 0x00000200))
486 nvc0_copy_create(dev, 1);
487 case NV_D0:
488 if (!(nv_rd32(dev, 0x022500) & 0x00000100))
489 nvc0_copy_create(dev, 0);
490 break;
491 default:
492 break;
493 }
494
495 if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
496 nv84_bsp_create(dev);
497 nv84_vp_create(dev);
498 nv98_ppp_create(dev);
499 } else
500 if (dev_priv->chipset >= 0x84) {
501 nv50_mpeg_create(dev);
502 nv84_bsp_create(dev);
503 nv84_vp_create(dev);
504 } else
505 if (dev_priv->chipset >= 0x50) {
506 nv50_mpeg_create(dev);
507 } else
508 if (dev_priv->card_type == NV_40 ||
509 dev_priv->chipset == 0x31 ||
510 dev_priv->chipset == 0x34 ||
511 dev_priv->chipset == 0x36) {
512 nv31_mpeg_create(dev);
513 }
514
515 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
516 if (dev_priv->eng[e]) {
517 ret = dev_priv->eng[e]->init(dev, e);
518 if (ret)
519 goto out_engine;
520 }
521 }
522 }
523
524 ret = nouveau_irq_init(dev); 306 ret = nouveau_irq_init(dev);
525 if (ret) 307 if (ret)
526 goto out_engine; 308 goto out_bios;
527 309
528 ret = nouveau_display_create(dev); 310 ret = nouveau_display_create(dev);
529 if (ret) 311 if (ret)
@@ -532,42 +314,20 @@ nouveau_card_init(struct drm_device *dev)
532 nouveau_backlight_init(dev); 314 nouveau_backlight_init(dev);
533 nouveau_pm_init(dev); 315 nouveau_pm_init(dev);
534 316
535 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
536 ret = nouveau_card_channel_init(dev);
537 if (ret)
538 goto out_pm;
539 }
540
541 if (dev->mode_config.num_crtc) { 317 if (dev->mode_config.num_crtc) {
542 ret = nouveau_display_init(dev); 318 ret = nouveau_display_init(dev);
543 if (ret) 319 if (ret)
544 goto out_chan; 320 goto out_pm;
545
546 nouveau_fbcon_init(dev);
547 } 321 }
548 322
549 return 0; 323 return 0;
550 324
551out_chan:
552 nouveau_card_channel_fini(dev);
553out_pm: 325out_pm:
554 nouveau_pm_fini(dev); 326 nouveau_pm_fini(dev);
555 nouveau_backlight_exit(dev); 327 nouveau_backlight_exit(dev);
556 nouveau_display_destroy(dev); 328 nouveau_display_destroy(dev);
557out_irq: 329out_irq:
558 nouveau_irq_fini(dev); 330 nouveau_irq_fini(dev);
559out_engine:
560 if (!dev_priv->noaccel) {
561 for (e = e - 1; e >= 0; e--) {
562 if (!dev_priv->eng[e])
563 continue;
564 dev_priv->eng[e]->fini(dev, e, false);
565 dev_priv->eng[e]->destroy(dev,e );
566 }
567 }
568 nouveau_mem_gart_fini(dev);
569out_ttmvram:
570 nouveau_mem_vram_fini(dev);
571out_bios: 331out_bios:
572 nouveau_bios_takedown(dev); 332 nouveau_bios_takedown(dev);
573out_display_early: 333out_display_early:
@@ -582,39 +342,19 @@ static void nouveau_card_takedown(struct drm_device *dev)
582{ 342{
583 struct drm_nouveau_private *dev_priv = dev->dev_private; 343 struct drm_nouveau_private *dev_priv = dev->dev_private;
584 struct nouveau_engine *engine = &dev_priv->engine; 344 struct nouveau_engine *engine = &dev_priv->engine;
585 int e;
586 345
587 if (dev->mode_config.num_crtc) { 346 if (dev->mode_config.num_crtc)
588 nouveau_fbcon_fini(dev);
589 nouveau_display_fini(dev); 347 nouveau_display_fini(dev);
590 }
591 348
592 nouveau_card_channel_fini(dev);
593 nouveau_pm_fini(dev); 349 nouveau_pm_fini(dev);
594 nouveau_backlight_exit(dev); 350 nouveau_backlight_exit(dev);
595 nouveau_display_destroy(dev); 351 nouveau_display_destroy(dev);
596 352
597 if (!dev_priv->noaccel) {
598 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
599 if (dev_priv->eng[e]) {
600 dev_priv->eng[e]->fini(dev, e, false);
601 dev_priv->eng[e]->destroy(dev,e );
602 }
603 }
604 }
605
606 if (dev_priv->vga_ram) { 353 if (dev_priv->vga_ram) {
607 nouveau_bo_unpin(dev_priv->vga_ram); 354 nouveau_bo_unpin(dev_priv->vga_ram);
608 nouveau_bo_ref(NULL, &dev_priv->vga_ram); 355 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
609 } 356 }
610 357
611 mutex_lock(&dev->struct_mutex);
612 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
613 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
614 mutex_unlock(&dev->struct_mutex);
615 nouveau_mem_gart_fini(dev);
616 nouveau_mem_vram_fini(dev);
617
618 nouveau_bios_takedown(dev); 358 nouveau_bios_takedown(dev);
619 engine->display.late_takedown(dev); 359 engine->display.late_takedown(dev);
620 360
@@ -624,56 +364,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
624 vga_client_register(dev->pdev, NULL, NULL, NULL); 364 vga_client_register(dev->pdev, NULL, NULL, NULL);
625} 365}
626 366
627int
628nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
629{
630 struct drm_nouveau_private *dev_priv = dev->dev_private;
631 struct nouveau_fpriv *fpriv;
632 int ret;
633
634 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
635 if (unlikely(!fpriv))
636 return -ENOMEM;
637
638 spin_lock_init(&fpriv->lock);
639 INIT_LIST_HEAD(&fpriv->channels);
640
641 if (dev_priv->card_type == NV_50) {
642 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
643 &fpriv->vm);
644 if (ret) {
645 kfree(fpriv);
646 return ret;
647 }
648 } else
649 if (dev_priv->card_type >= NV_C0) {
650 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
651 &fpriv->vm);
652 if (ret) {
653 kfree(fpriv);
654 return ret;
655 }
656 }
657
658 file_priv->driver_priv = fpriv;
659 return 0;
660}
661
662/* here a client dies, release the stuff that was allocated for its
663 * file_priv */
664void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
665{
666 nouveau_channel_cleanup(dev, file_priv);
667}
668
669void
670nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
671{
672 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
673 nouveau_vm_ref(NULL, &fpriv->vm, NULL);
674 kfree(fpriv);
675}
676
677/* first module load, setup the mmio/fb mapping */ 367/* first module load, setup the mmio/fb mapping */
678/* KMS: we need mmio at load time, not when the first drm client opens. */ 368/* KMS: we need mmio at load time, not when the first drm client opens. */
679int nouveau_firstopen(struct drm_device *dev) 369int nouveau_firstopen(struct drm_device *dev)
@@ -704,55 +394,6 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
704#endif 394#endif
705} 395}
706 396
707static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
708{
709 struct pci_dev *pdev = dev->pdev;
710 struct apertures_struct *aper = alloc_apertures(3);
711 if (!aper)
712 return NULL;
713
714 aper->ranges[0].base = pci_resource_start(pdev, 1);
715 aper->ranges[0].size = pci_resource_len(pdev, 1);
716 aper->count = 1;
717
718 if (pci_resource_len(pdev, 2)) {
719 aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
720 aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
721 aper->count++;
722 }
723
724 if (pci_resource_len(pdev, 3)) {
725 aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
726 aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
727 aper->count++;
728 }
729
730 return aper;
731}
732
733static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
734{
735 struct drm_nouveau_private *dev_priv = dev->dev_private;
736 bool primary = false;
737 dev_priv->apertures = nouveau_get_apertures(dev);
738 if (!dev_priv->apertures)
739 return -ENOMEM;
740
741#ifdef CONFIG_X86
742 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
743#endif
744
745 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
746 return 0;
747}
748
749void *
750nouveau_newpriv(struct drm_device *dev)
751{
752 struct drm_nouveau_private *dev_priv = dev->dev_private;
753 return dev_priv->newpriv;
754}
755
756int nouveau_load(struct drm_device *dev, unsigned long flags) 397int nouveau_load(struct drm_device *dev, unsigned long flags)
757{ 398{
758 struct drm_nouveau_private *dev_priv; 399 struct drm_nouveau_private *dev_priv;
@@ -840,30 +481,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
840 481
841 NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal); 482 NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
842 483
843 /* Determine whether we'll attempt acceleration or not, some
844 * cards are disabled by default here due to them being known
845 * non-functional, or never been tested due to lack of hw.
846 */
847 dev_priv->noaccel = !!nouveau_noaccel;
848 if (nouveau_noaccel == -1) {
849 switch (dev_priv->chipset) {
850 case 0xd9: /* known broken */
851 case 0xe4: /* needs binary driver firmware */
852 case 0xe7: /* needs binary driver firmware */
853 NV_INFO(dev, "acceleration disabled by default, pass "
854 "noaccel=0 to force enable\n");
855 dev_priv->noaccel = true;
856 break;
857 default:
858 dev_priv->noaccel = false;
859 break;
860 }
861 }
862
863 ret = nouveau_remove_conflicting_drivers(dev);
864 if (ret)
865 goto err_priv;
866
867 nouveau_OF_copy_vbios_to_ramin(dev); 484 nouveau_OF_copy_vbios_to_ramin(dev);
868 485
869 /* Special flags */ 486 /* Special flags */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e729535e9b26..560e816138eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,9 +24,13 @@
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */ 25 */
26 26
27#include "drmP.h" 27#include <subdev/fb.h>
28#include <subdev/vm.h>
29#include <subdev/instmem.h>
28 30
29#include "nouveau_drv.h" 31#include "nouveau_drm.h"
32#include "nouveau_ttm.h"
33#include "nouveau_gem.h"
30 34
31static int 35static int
32nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
@@ -60,11 +64,10 @@ static void
60nouveau_vram_manager_del(struct ttm_mem_type_manager *man, 64nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
61 struct ttm_mem_reg *mem) 65 struct ttm_mem_reg *mem)
62{ 66{
63 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 67 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
64 struct drm_device *dev = dev_priv->dev; 68 struct nouveau_fb *pfb = nouveau_fb(drm->device);
65
66 nouveau_mem_node_cleanup(mem->mm_node); 69 nouveau_mem_node_cleanup(mem->mm_node);
67 nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node); 70 pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
68} 71}
69 72
70static int 73static int
@@ -73,8 +76,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
73 struct ttm_placement *placement, 76 struct ttm_placement *placement,
74 struct ttm_mem_reg *mem) 77 struct ttm_mem_reg *mem)
75{ 78{
76 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 79 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
77 struct drm_device *dev = dev_priv->dev; 80 struct nouveau_fb *pfb = nouveau_fb(drm->device);
78 struct nouveau_bo *nvbo = nouveau_bo(bo); 81 struct nouveau_bo *nvbo = nouveau_bo(bo);
79 struct nouveau_mem *node; 82 struct nouveau_mem *node;
80 u32 size_nc = 0; 83 u32 size_nc = 0;
@@ -83,9 +86,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
83 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 86 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
84 size_nc = 1 << nvbo->page_shift; 87 size_nc = 1 << nvbo->page_shift;
85 88
86 ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT, 89 ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
87 mem->page_alignment << PAGE_SHIFT, size_nc, 90 mem->page_alignment << PAGE_SHIFT, size_nc,
88 (nvbo->tile_flags >> 8) & 0x3ff, &node); 91 (nvbo->tile_flags >> 8) & 0x3ff, &node);
89 if (ret) { 92 if (ret) {
90 mem->mm_node = NULL; 93 mem->mm_node = NULL;
91 return (ret == -ENOSPC) ? 0 : ret; 94 return (ret == -ENOSPC) ? 0 : ret;
@@ -158,11 +161,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
158 struct ttm_placement *placement, 161 struct ttm_placement *placement,
159 struct ttm_mem_reg *mem) 162 struct ttm_mem_reg *mem)
160{ 163{
161 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
162 struct nouveau_mem *node; 164 struct nouveau_mem *node;
163 165
164 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 166 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
165 dev_priv->gart_info.aper_size))
166 return -ENOMEM; 167 return -ENOMEM;
167 168
168 node = kzalloc(sizeof(*node), GFP_KERNEL); 169 node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -188,13 +189,17 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
188 nouveau_gart_manager_debug 189 nouveau_gart_manager_debug
189}; 190};
190 191
192#include <core/subdev/vm/nv04.h>
191static int 193static int
192nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 194nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
193{ 195{
194 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 196 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
195 struct drm_device *dev = dev_priv->dev; 197 struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
196 man->priv = nv04vm_ref(dev); 198 struct nv04_vmmgr_priv *priv = (void *)vmm;
197 return (man->priv != NULL) ? 0 : -ENODEV; 199 struct nouveau_vm *vm = NULL;
200 nouveau_vm_ref(priv->vm, &vm, NULL);
201 man->priv = vm;
202 return 0;
198} 203}
199 204
200static int 205static int
@@ -260,13 +265,12 @@ int
260nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 265nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
261{ 266{
262 struct drm_file *file_priv = filp->private_data; 267 struct drm_file *file_priv = filp->private_data;
263 struct drm_nouveau_private *dev_priv = 268 struct nouveau_drm *drm = nouveau_newpriv(file_priv->minor->dev);
264 file_priv->minor->dev->dev_private;
265 269
266 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 270 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
267 return drm_mmap(filp, vma); 271 return drm_mmap(filp, vma);
268 272
269 return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); 273 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
270} 274}
271 275
272static int 276static int
@@ -282,12 +286,12 @@ nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
282} 286}
283 287
284int 288int
285nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv) 289nouveau_ttm_global_init(struct nouveau_drm *drm)
286{ 290{
287 struct drm_global_reference *global_ref; 291 struct drm_global_reference *global_ref;
288 int ret; 292 int ret;
289 293
290 global_ref = &dev_priv->ttm.mem_global_ref; 294 global_ref = &drm->ttm.mem_global_ref;
291 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 295 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
292 global_ref->size = sizeof(struct ttm_mem_global); 296 global_ref->size = sizeof(struct ttm_mem_global);
293 global_ref->init = &nouveau_ttm_mem_global_init; 297 global_ref->init = &nouveau_ttm_mem_global_init;
@@ -296,12 +300,12 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
296 ret = drm_global_item_ref(global_ref); 300 ret = drm_global_item_ref(global_ref);
297 if (unlikely(ret != 0)) { 301 if (unlikely(ret != 0)) {
298 DRM_ERROR("Failed setting up TTM memory accounting\n"); 302 DRM_ERROR("Failed setting up TTM memory accounting\n");
299 dev_priv->ttm.mem_global_ref.release = NULL; 303 drm->ttm.mem_global_ref.release = NULL;
300 return ret; 304 return ret;
301 } 305 }
302 306
303 dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object; 307 drm->ttm.bo_global_ref.mem_glob = global_ref->object;
304 global_ref = &dev_priv->ttm.bo_global_ref.ref; 308 global_ref = &drm->ttm.bo_global_ref.ref;
305 global_ref->global_type = DRM_GLOBAL_TTM_BO; 309 global_ref->global_type = DRM_GLOBAL_TTM_BO;
306 global_ref->size = sizeof(struct ttm_bo_global); 310 global_ref->size = sizeof(struct ttm_bo_global);
307 global_ref->init = &ttm_bo_global_init; 311 global_ref->init = &ttm_bo_global_init;
@@ -310,8 +314,8 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
310 ret = drm_global_item_ref(global_ref); 314 ret = drm_global_item_ref(global_ref);
311 if (unlikely(ret != 0)) { 315 if (unlikely(ret != 0)) {
312 DRM_ERROR("Failed setting up TTM BO subsystem\n"); 316 DRM_ERROR("Failed setting up TTM BO subsystem\n");
313 drm_global_item_unref(&dev_priv->ttm.mem_global_ref); 317 drm_global_item_unref(&drm->ttm.mem_global_ref);
314 dev_priv->ttm.mem_global_ref.release = NULL; 318 drm->ttm.mem_global_ref.release = NULL;
315 return ret; 319 return ret;
316 } 320 }
317 321
@@ -319,12 +323,105 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
319} 323}
320 324
321void 325void
322nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv) 326nouveau_ttm_global_release(struct nouveau_drm *drm)
323{ 327{
324 if (dev_priv->ttm.mem_global_ref.release == NULL) 328 if (drm->ttm.mem_global_ref.release == NULL)
325 return; 329 return;
326 330
327 drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref); 331 drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
328 drm_global_item_unref(&dev_priv->ttm.mem_global_ref); 332 drm_global_item_unref(&drm->ttm.mem_global_ref);
329 dev_priv->ttm.mem_global_ref.release = NULL; 333 drm->ttm.mem_global_ref.release = NULL;
334}
335
336int
337nouveau_ttm_init(struct nouveau_drm *drm)
338{
339 struct drm_device *dev = drm->dev;
340 u32 bits;
341 int ret;
342
343 if (nv_device(drm->device)->card_type >= NV_50) {
344 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
345 bits = 40;
346 else
347 bits = 32;
348 } else {
349 bits = 32;
350 }
351
352 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
353 if (ret)
354 return ret;
355
356 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
357 if (ret)
358 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
359
360 ret = nouveau_ttm_global_init(drm);
361 if (ret)
362 return ret;
363
364 ret = ttm_bo_device_init(&drm->ttm.bdev,
365 drm->ttm.bo_global_ref.ref.object,
366 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
367 bits <= 32 ? true : false);
368 if (ret) {
369 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
370 return ret;
371 }
372
373 /* VRAM init */
374 drm->gem.vram_available = nouveau_fb(drm->device)->ram.size;
375 drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
376
377 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
378 drm->gem.vram_available >> PAGE_SHIFT);
379 if (ret) {
380 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
381 return ret;
382 }
383
384 drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
385 pci_resource_len(dev->pdev, 1),
386 DRM_MTRR_WC);
387
388 /* GART init */
389 if (drm->agp.stat != ENABLED) {
390 drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
391 if (drm->gem.gart_available > 512 * 1024 * 1024)
392 drm->gem.gart_available = 512 * 1024 * 1024;
393 } else {
394 drm->gem.gart_available = drm->agp.size;
395 }
396
397 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
398 drm->gem.gart_available >> PAGE_SHIFT);
399 if (ret) {
400 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
401 return ret;
402 }
403
404 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
405 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
406 return 0;
407}
408
409void
410nouveau_ttm_fini(struct nouveau_drm *drm)
411{
412 mutex_lock(&drm->dev->struct_mutex);
413 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
414 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
415 mutex_unlock(&drm->dev->struct_mutex);
416
417 ttm_bo_device_release(&drm->ttm.bdev);
418
419 nouveau_ttm_global_release(drm);
420
421 if (drm->ttm.mtrr >= 0) {
422 drm_mtrr_del(drm->ttm.mtrr,
423 pci_resource_start(drm->dev->pdev, 1),
424 pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
425 drm->ttm.mtrr = -1;
426 }
330} 427}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
new file mode 100644
index 000000000000..9f4d2715584b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -0,0 +1,21 @@
1#ifndef __NOUVEAU_TTM_H__
2#define __NOUVEAU_TTM_H__
3
4static inline struct nouveau_drm *
5nouveau_bdev(struct ttm_bo_device *bd)
6{
7 return container_of(bd, struct nouveau_drm, ttm.bdev);
8}
9
10extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
11extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
12extern const struct ttm_mem_type_manager_func nv04_gart_manager;
13
14struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
15 unsigned long size, u32 page_flags,
16 struct page *dummy_read_page);
17
18int nouveau_ttm_init(struct nouveau_drm *drm);
19void nouveau_ttm_fini(struct nouveau_drm *drm);
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
deleted file mode 100644
index 6bff634c95fe..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (C) 2010 Nouveau Project
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include <linux/ratelimit.h>
29#include "nouveau_util.h"
30
31#include <core/enum.h>
32
33static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
34
35int
36nouveau_ratelimit(void)
37{
38 return __ratelimit(&nouveau_ratelimit_state);
39}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
deleted file mode 100644
index 114293758f8c..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_util.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (C) 2010 Nouveau Project
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#ifndef __NOUVEAU_UTIL_H__
29#define __NOUVEAU_UTIL_H__
30
31#include <core/enum.h>
32
33int nouveau_ratelimit(void);
34
35#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 81947ea0f82d..6ab936376c40 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,9 +32,6 @@
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33#include "nouveau_connector.h" 33#include "nouveau_connector.h"
34 34
35static void nv04_vblank_crtc0_isr(struct drm_device *);
36static void nv04_vblank_crtc1_isr(struct drm_device *);
37
38int 35int
39nv04_display_early_init(struct drm_device *dev) 36nv04_display_early_init(struct drm_device *dev)
40{ 37{
@@ -126,8 +123,6 @@ nv04_display_create(struct drm_device *dev)
126 func->save(encoder); 123 func->save(encoder);
127 } 124 }
128 125
129 nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
130 nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
131 return 0; 126 return 0;
132} 127}
133 128
@@ -141,9 +136,6 @@ nv04_display_destroy(struct drm_device *dev)
141 136
142 NV_DEBUG_KMS(dev, "\n"); 137 NV_DEBUG_KMS(dev, "\n");
143 138
144 nouveau_irq_unregister(dev, 24);
145 nouveau_irq_unregister(dev, 25);
146
147 /* Turn every CRTC off. */ 139 /* Turn every CRTC off. */
148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 140 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
149 struct drm_mode_set modeset = { 141 struct drm_mode_set modeset = {
@@ -203,17 +195,3 @@ nv04_display_fini(struct drm_device *dev)
203 if (nv_two_heads(dev)) 195 if (nv_two_heads(dev))
204 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); 196 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
205} 197}
206
207static void
208nv04_vblank_crtc0_isr(struct drm_device *dev)
209{
210 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
211 drm_handle_vblank(dev, 0);
212}
213
214static void
215nv04_vblank_crtc1_isr(struct drm_device *dev)
216{
217 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
218 drm_handle_vblank(dev, 1);
219}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 06fb68acf813..35480b6776f8 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,19 +22,18 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26
27#include "nouveau_drm.h"
27#include "nouveau_dma.h" 28#include "nouveau_dma.h"
28#include <core/ramht.h>
29#include "nouveau_fbcon.h" 29#include "nouveau_fbcon.h"
30 30
31int 31int
32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
33{ 33{
34 struct nouveau_fbdev *nfbdev = info->par; 34 struct nouveau_fbdev *nfbdev = info->par;
35 struct drm_device *dev = nfbdev->dev; 35 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 36 struct nouveau_channel *chan = drm->channel;
37 struct nouveau_channel *chan = dev_priv->channel;
38 int ret; 37 int ret;
39 38
40 ret = RING_SPACE(chan, 4); 39 ret = RING_SPACE(chan, 4);
@@ -53,9 +52,8 @@ int
53nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 52nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
54{ 53{
55 struct nouveau_fbdev *nfbdev = info->par; 54 struct nouveau_fbdev *nfbdev = info->par;
56 struct drm_device *dev = nfbdev->dev; 55 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
57 struct drm_nouveau_private *dev_priv = dev->dev_private; 56 struct nouveau_channel *chan = drm->channel;
58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret; 57 int ret;
60 58
61 ret = RING_SPACE(chan, 7); 59 ret = RING_SPACE(chan, 7);
@@ -81,9 +79,8 @@ int
81nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 79nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
82{ 80{
83 struct nouveau_fbdev *nfbdev = info->par; 81 struct nouveau_fbdev *nfbdev = info->par;
84 struct drm_device *dev = nfbdev->dev; 82 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
85 struct drm_nouveau_private *dev_priv = dev->dev_private; 83 struct nouveau_channel *chan = drm->channel;
86 struct nouveau_channel *chan = dev_priv->channel;
87 uint32_t fg; 84 uint32_t fg;
88 uint32_t bg; 85 uint32_t bg;
89 uint32_t dsize; 86 uint32_t dsize;
@@ -142,9 +139,10 @@ nv04_fbcon_accel_init(struct fb_info *info)
142{ 139{
143 struct nouveau_fbdev *nfbdev = info->par; 140 struct nouveau_fbdev *nfbdev = info->par;
144 struct drm_device *dev = nfbdev->dev; 141 struct drm_device *dev = nfbdev->dev;
145 struct drm_nouveau_private *dev_priv = dev->dev_private; 142 struct nouveau_drm *drm = nouveau_newpriv(dev);
146 struct nouveau_channel *chan = dev_priv->channel; 143 struct nouveau_channel *chan = drm->channel;
147 const int sub = NvSubCtxSurf2D; 144 struct nouveau_device *device = nv_device(drm->device);
145 struct nouveau_object *object;
148 int surface_fmt, pattern_fmt, rect_fmt; 146 int surface_fmt, pattern_fmt, rect_fmt;
149 int ret; 147 int ret;
150 148
@@ -176,31 +174,35 @@ nv04_fbcon_accel_init(struct fb_info *info)
176 return -EINVAL; 174 return -EINVAL;
177 } 175 }
178 176
179 ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D, 177 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D,
180 dev_priv->card_type >= NV_10 ? 178 device->card_type >= NV_10 ? 0x0062 : 0x0042,
181 0x0062 : 0x0042); 179 NULL, 0, &object);
182 if (ret) 180 if (ret)
183 return ret; 181 return ret;
184 182
185 ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019); 183 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect,
184 0x0019, NULL, 0, &object);
186 if (ret) 185 if (ret)
187 return ret; 186 return ret;
188 187
189 ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043); 188 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop,
189 0x0043, NULL, 0, &object);
190 if (ret) 190 if (ret)
191 return ret; 191 return ret;
192 192
193 ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044); 193 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt,
194 0x0044, NULL, 0, &object);
194 if (ret) 195 if (ret)
195 return ret; 196 return ret;
196 197
197 ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a); 198 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect,
199 0x004a, NULL, 0, &object);
198 if (ret) 200 if (ret)
199 return ret; 201 return ret;
200 202
201 ret = nouveau_gpuobj_gr_new(chan, NvImageBlit, 203 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit,
202 dev_priv->chipset >= 0x11 ? 204 device->chipset >= 0x11 ? 0x009f : 0x005f,
203 0x009f : 0x005f); 205 NULL, 0, &object);
204 if (ret) 206 if (ret)
205 return ret; 207 return ret;
206 208
@@ -209,25 +211,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
209 return 0; 211 return 0;
210 } 212 }
211 213
212 BEGIN_NV04(chan, sub, 0x0000, 1); 214 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
213 OUT_RING(chan, NvCtxSurf2D); 215 OUT_RING(chan, NvCtxSurf2D);
214 BEGIN_NV04(chan, sub, 0x0184, 2); 216 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
215 OUT_RING(chan, NvDmaFB); 217 OUT_RING(chan, NvDmaFB);
216 OUT_RING(chan, NvDmaFB); 218 OUT_RING(chan, NvDmaFB);
217 BEGIN_NV04(chan, sub, 0x0300, 4); 219 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
218 OUT_RING(chan, surface_fmt); 220 OUT_RING(chan, surface_fmt);
219 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); 221 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
220 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 222 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
221 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 223 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
222 224
223 BEGIN_NV04(chan, sub, 0x0000, 1); 225 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
224 OUT_RING(chan, NvRop); 226 OUT_RING(chan, NvRop);
225 BEGIN_NV04(chan, sub, 0x0300, 1); 227 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
226 OUT_RING(chan, 0x55); 228 OUT_RING(chan, 0x55);
227 229
228 BEGIN_NV04(chan, sub, 0x0000, 1); 230 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
229 OUT_RING(chan, NvImagePatt); 231 OUT_RING(chan, NvImagePatt);
230 BEGIN_NV04(chan, sub, 0x0300, 8); 232 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
231 OUT_RING(chan, pattern_fmt); 233 OUT_RING(chan, pattern_fmt);
232#ifdef __BIG_ENDIAN 234#ifdef __BIG_ENDIAN
233 OUT_RING(chan, 2); 235 OUT_RING(chan, 2);
@@ -241,9 +243,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
241 OUT_RING(chan, ~0); 243 OUT_RING(chan, ~0);
242 OUT_RING(chan, ~0); 244 OUT_RING(chan, ~0);
243 245
244 BEGIN_NV04(chan, sub, 0x0000, 1); 246 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
245 OUT_RING(chan, NvClipRect); 247 OUT_RING(chan, NvClipRect);
246 BEGIN_NV04(chan, sub, 0x0300, 2); 248 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
247 OUT_RING(chan, 0); 249 OUT_RING(chan, 0);
248 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); 250 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
249 251
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1b45a4f8c0a5..a220b94ba9f2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,15 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <engine/fifo.h>
26#include "nouveau_drv.h" 26
27#include "nouveau_drm.h"
27#include "nouveau_dma.h" 28#include "nouveau_dma.h"
28#include <core/ramht.h>
29#include "nouveau_fence.h" 29#include "nouveau_fence.h"
30 30
31struct nv04_fence_chan { 31struct nv04_fence_chan {
32 struct nouveau_fence_chan base; 32 struct nouveau_fence_chan base;
33 atomic_t sequence;
34}; 33};
35 34
36struct nv04_fence_priv { 35struct nv04_fence_priv {
@@ -57,19 +56,11 @@ nv04_fence_sync(struct nouveau_fence *fence,
57 return -ENODEV; 56 return -ENODEV;
58} 57}
59 58
60int
61nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
62{
63 struct nv04_fence_chan *fctx = chan->fence;
64 atomic_set(&fctx->sequence, data);
65 return 0;
66}
67
68static u32 59static u32
69nv04_fence_read(struct nouveau_channel *chan) 60nv04_fence_read(struct nouveau_channel *chan)
70{ 61{
71 struct nv04_fence_chan *fctx = chan->fence; 62 struct nouveau_fifo_chan *fifo = (void *)chan->object;
72 return atomic_read(&fctx->sequence); 63 return atomic_read(&fifo->refcnt);
73} 64}
74 65
75static void 66static void
@@ -87,7 +78,6 @@ nv04_fence_context_new(struct nouveau_channel *chan)
87 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); 78 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
88 if (fctx) { 79 if (fctx) {
89 nouveau_fence_context_new(&fctx->base); 80 nouveau_fence_context_new(&fctx->base);
90 atomic_set(&fctx->sequence, 0);
91 chan->fence = fctx; 81 chan->fence = fctx;
92 return 0; 82 return 0;
93 } 83 }
@@ -95,23 +85,19 @@ nv04_fence_context_new(struct nouveau_channel *chan)
95} 85}
96 86
97static void 87static void
98nv04_fence_destroy(struct drm_device *dev) 88nv04_fence_destroy(struct nouveau_drm *drm)
99{ 89{
100 struct drm_nouveau_private *dev_priv = dev->dev_private; 90 struct nv04_fence_priv *priv = drm->fence;
101 struct nv04_fence_priv *priv = dev_priv->fence.func; 91 drm->fence = NULL;
102
103 dev_priv->fence.func = NULL;
104 kfree(priv); 92 kfree(priv);
105} 93}
106 94
107int 95int
108nv04_fence_create(struct drm_device *dev) 96nv04_fence_create(struct nouveau_drm *drm)
109{ 97{
110 struct drm_nouveau_private *dev_priv = dev->dev_private;
111 struct nv04_fence_priv *priv; 98 struct nv04_fence_priv *priv;
112 int ret;
113 99
114 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 100 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
115 if (!priv) 101 if (!priv)
116 return -ENOMEM; 102 return -ENOMEM;
117 103
@@ -121,6 +107,5 @@ nv04_fence_create(struct drm_device *dev)
121 priv->base.emit = nv04_fence_emit; 107 priv->base.emit = nv04_fence_emit;
122 priv->base.sync = nv04_fence_sync; 108 priv->base.sync = nv04_fence_sync;
123 priv->base.read = nv04_fence_read; 109 priv->base.read = nv04_fence_read;
124 dev_priv->fence.func = &priv->base; 110 return 0;
125 return ret;
126} 111}
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
deleted file mode 100644
index ceeb868c7c29..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_software.c
+++ /dev/null
@@ -1,139 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include <core/ramht.h>
29#include "nouveau_fence.h"
30#include "nouveau_software.h"
31#include "nouveau_hw.h"
32
33struct nv04_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv04_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
43{
44 struct nv04_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
45 return pch->base.flip(pch->base.flip_data);
46}
47
48static int
49nv04_software_context_new(struct nouveau_channel *chan, int engine)
50{
51 struct nv04_software_chan *pch;
52
53 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
54 if (!pch)
55 return -ENOMEM;
56
57 nouveau_software_context_new(chan, &pch->base);
58 chan->engctx[engine] = pch;
59 return 0;
60}
61
62static void
63nv04_software_context_del(struct nouveau_channel *chan, int engine)
64{
65 struct nv04_software_chan *pch = chan->engctx[engine];
66 chan->engctx[engine] = NULL;
67 kfree(pch);
68}
69
70static int
71nv04_software_object_new(struct nouveau_channel *chan, int engine,
72 u32 handle, u16 class)
73{
74 struct drm_device *dev = chan->dev;
75 struct nouveau_gpuobj *obj = NULL;
76 int ret;
77
78 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
79 if (ret)
80 return ret;
81 obj->engine = 0;
82 obj->class = class;
83
84 ret = nouveau_ramht_insert(chan, handle, obj);
85 nouveau_gpuobj_ref(NULL, &obj);
86 return ret;
87}
88
89static int
90nv04_software_init(struct drm_device *dev, int engine)
91{
92 return 0;
93}
94
95static int
96nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
97{
98 return 0;
99}
100
101static void
102nv04_software_destroy(struct drm_device *dev, int engine)
103{
104 struct nv04_software_priv *psw = nv_engine(dev, engine);
105
106 NVOBJ_ENGINE_DEL(dev, SW);
107 kfree(psw);
108}
109
110int
111nv04_software_create(struct drm_device *dev)
112{
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 struct nv04_software_priv *psw;
115
116 psw = kzalloc(sizeof(*psw), GFP_KERNEL);
117 if (!psw)
118 return -ENOMEM;
119
120 psw->base.base.destroy = nv04_software_destroy;
121 psw->base.base.init = nv04_software_init;
122 psw->base.base.fini = nv04_software_fini;
123 psw->base.base.context_new = nv04_software_context_new;
124 psw->base.base.context_del = nv04_software_context_del;
125 psw->base.base.object_new = nv04_software_object_new;
126 nouveau_software_create(&psw->base);
127
128 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
129 if (dev_priv->card_type <= NV_04) {
130 NVOBJ_CLASS(dev, 0x006e, SW);
131 NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
132 NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
133 } else {
134 NVOBJ_CLASS(dev, 0x016e, SW);
135 NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
136 }
137
138 return 0;
139}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index dd7f17dd9903..ce752bf5cc4e 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,10 +22,11 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27
28#include "nouveau_drm.h"
27#include "nouveau_dma.h" 29#include "nouveau_dma.h"
28#include <core/ramht.h>
29#include "nouveau_fence.h" 30#include "nouveau_fence.h"
30 31
31struct nv10_fence_chan { 32struct nv10_fence_chan {
@@ -64,12 +65,11 @@ int
64nv17_fence_sync(struct nouveau_fence *fence, 65nv17_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan) 66 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{ 67{
67 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 68 struct nv10_fence_priv *priv = chan->drm->fence;
68 struct nv10_fence_priv *priv = dev_priv->fence.func;
69 u32 value; 69 u32 value;
70 int ret; 70 int ret;
71 71
72 if (!mutex_trylock(&prev->mutex)) 72 if (!mutex_trylock(&prev->cli->mutex))
73 return -EBUSY; 73 return -EBUSY;
74 74
75 spin_lock(&priv->lock); 75 spin_lock(&priv->lock);
@@ -96,14 +96,14 @@ nv17_fence_sync(struct nouveau_fence *fence,
96 FIRE_RING (chan); 96 FIRE_RING (chan);
97 } 97 }
98 98
99 mutex_unlock(&prev->mutex); 99 mutex_unlock(&prev->cli->mutex);
100 return 0; 100 return 0;
101} 101}
102 102
103u32 103u32
104nv10_fence_read(struct nouveau_channel *chan) 104nv10_fence_read(struct nouveau_channel *chan)
105{ 105{
106 return nvchan_rd32(chan, 0x0048); 106 return nv_ro32(chan->object, 0x0048);
107} 107}
108 108
109void 109void
@@ -118,10 +118,8 @@ nv10_fence_context_del(struct nouveau_channel *chan)
118static int 118static int
119nv10_fence_context_new(struct nouveau_channel *chan) 119nv10_fence_context_new(struct nouveau_channel *chan)
120{ 120{
121 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 121 struct nv10_fence_priv *priv = chan->drm->fence;
122 struct nv10_fence_priv *priv = dev_priv->fence.func;
123 struct nv10_fence_chan *fctx; 122 struct nv10_fence_chan *fctx;
124 struct nouveau_gpuobj *obj;
125 int ret = 0; 123 int ret = 0;
126 124
127 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 125 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -132,15 +130,19 @@ nv10_fence_context_new(struct nouveau_channel *chan)
132 130
133 if (priv->bo) { 131 if (priv->bo) {
134 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 132 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
135 133 struct nouveau_object *object;
136 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, 134 u32 start = mem->start * PAGE_SIZE;
137 mem->start * PAGE_SIZE, mem->size, 135 u32 limit = mem->start + mem->size - 1;
138 NV_MEM_ACCESS_RW, 136
139 NV_MEM_TARGET_VRAM, &obj); 137 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
140 if (!ret) { 138 NvSema, 0x0002,
141 ret = nouveau_ramht_insert(chan, NvSema, obj); 139 &(struct nv_dma_class) {
142 nouveau_gpuobj_ref(NULL, &obj); 140 .flags = NV_DMA_TARGET_VRAM |
143 } 141 NV_DMA_ACCESS_RDWR,
142 .start = start,
143 .limit = limit,
144 }, sizeof(struct nv_dma_class),
145 &object);
144 } 146 }
145 147
146 if (ret) 148 if (ret)
@@ -149,24 +151,22 @@ nv10_fence_context_new(struct nouveau_channel *chan)
149} 151}
150 152
151void 153void
152nv10_fence_destroy(struct drm_device *dev) 154nv10_fence_destroy(struct nouveau_drm *drm)
153{ 155{
154 struct drm_nouveau_private *dev_priv = dev->dev_private; 156 struct nv10_fence_priv *priv = drm->fence;
155 struct nv10_fence_priv *priv = dev_priv->fence.func; 157 nouveau_bo_unmap(priv->bo);
156
157 nouveau_bo_ref(NULL, &priv->bo); 158 nouveau_bo_ref(NULL, &priv->bo);
158 dev_priv->fence.func = NULL; 159 drm->fence = NULL;
159 kfree(priv); 160 kfree(priv);
160} 161}
161 162
162int 163int
163nv10_fence_create(struct drm_device *dev) 164nv10_fence_create(struct nouveau_drm *drm)
164{ 165{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nv10_fence_priv *priv; 166 struct nv10_fence_priv *priv;
167 int ret = 0; 167 int ret = 0;
168 168
169 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 169 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
170 if (!priv) 170 if (!priv)
171 return -ENOMEM; 171 return -ENOMEM;
172 172
@@ -176,11 +176,10 @@ nv10_fence_create(struct drm_device *dev)
176 priv->base.emit = nv10_fence_emit; 176 priv->base.emit = nv10_fence_emit;
177 priv->base.read = nv10_fence_read; 177 priv->base.read = nv10_fence_read;
178 priv->base.sync = nv10_fence_sync; 178 priv->base.sync = nv10_fence_sync;
179 dev_priv->fence.func = &priv->base;
180 spin_lock_init(&priv->lock); 179 spin_lock_init(&priv->lock);
181 180
182 if (dev_priv->chipset >= 0x17) { 181 if (nv_device(drm->device)->chipset >= 0x17) {
183 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 182 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
184 0, 0x0000, NULL, &priv->bo); 183 0, 0x0000, NULL, &priv->bo);
185 if (!ret) { 184 if (!ret) {
186 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 185 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -197,6 +196,6 @@ nv10_fence_create(struct drm_device *dev)
197 } 196 }
198 197
199 if (ret) 198 if (ret)
200 nv10_fence_destroy(dev); 199 nv10_fence_destroy(drm);
201 return ret; 200 return ret;
202} 201}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index d857525666ee..93f536de3779 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -27,7 +27,6 @@
27#include <nouveau_bios.h> 27#include <nouveau_bios.h>
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_hw.h" 29#include "nouveau_hw.h"
30#include <engine/fifo.h>
31 30
32#define min2(a,b) ((a) < (b) ? (a) : (b)) 31#define min2(a,b) ((a) < (b) ? (a) : (b))
33 32
@@ -259,7 +258,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
259 if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000)) 258 if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
260 goto resume; 259 goto resume;
261 nv_mask(dev, 0x003200, 0x00000001, 0x00000000); 260 nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
262 nv04_fifo_cache_pull(dev, false); 261 //XXX: nv04_fifo_cache_pull(dev, false);
263 262
264 if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev)) 263 if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
265 goto resume; 264 goto resume;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 83419a2daa0b..ae72f7656106 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -31,7 +31,6 @@
31#include "nouveau_connector.h" 31#include "nouveau_connector.h"
32#include "nouveau_fb.h" 32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h" 33#include "nouveau_fbcon.h"
34#include <core/ramht.h>
35#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
36#include "nouveau_fence.h" 35#include "nouveau_fence.h"
37 36
@@ -102,17 +101,17 @@ nv50_display_sync(struct drm_device *dev)
102 BEGIN_NV04(evo, 0, 0x0084, 1); 101 BEGIN_NV04(evo, 0, 0x0084, 1);
103 OUT_RING (evo, 0x00000000); 102 OUT_RING (evo, 0x00000000);
104 103
105 nv_wo32(disp->ntfy, 0x000, 0x00000000); 104 nv_wo32(disp->ramin, 0x2000, 0x00000000);
106 FIRE_RING (evo); 105 FIRE_RING (evo);
107 106
108 start = nv_timer_read(dev); 107 start = nv_timer_read(dev);
109 do { 108 do {
110 if (nv_ro32(disp->ntfy, 0x000)) 109 if (nv_ro32(disp->ramin, 0x2000))
111 return 0; 110 return 0;
112 } while (nv_timer_read(dev) - start < 2000000000ULL); 111 } while (nv_timer_read(dev) - start < 2000000000ULL);
113 } 112 }
114 113
115 return -EBUSY; 114 return 0;
116} 115}
117 116
118int 117int
@@ -217,7 +216,7 @@ nv50_display_init(struct drm_device *dev)
217 return ret; 216 return ret;
218 evo = nv50_display(dev)->master; 217 evo = nv50_display(dev)->master;
219 218
220 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->addr >> 8) | 9); 219 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
221 220
222 ret = RING_SPACE(evo, 3); 221 ret = RING_SPACE(evo, 3);
223 if (ret) 222 if (ret)
@@ -444,7 +443,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
444 if (dev_priv->chipset < 0x84) 443 if (dev_priv->chipset < 0x84)
445 OUT_RING (chan, NvSema); 444 OUT_RING (chan, NvSema);
446 else 445 else
447 OUT_RING (chan, chan->vram_handle); 446 OUT_RING (chan, chan->vram);
448 } else { 447 } else {
449 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index); 448 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
450 offset += dispc->sem.offset; 449 offset += dispc->sem.offset;
@@ -598,48 +597,6 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
598} 597}
599 598
600static void 599static void
601nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
602{
603 struct drm_nouveau_private *dev_priv = dev->dev_private;
604 struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
605 struct nouveau_software_chan *pch, *tmp;
606
607 list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
608 if (pch->vblank.head != crtc)
609 continue;
610
611 spin_lock(&psw->peephole_lock);
612 nv_wr32(dev, 0x001704, pch->vblank.channel);
613 nv_wr32(dev, 0x001710, 0x80000000 | pch->vblank.ctxdma);
614 if (dev_priv->chipset == 0x50) {
615 nv_wr32(dev, 0x001570, pch->vblank.offset);
616 nv_wr32(dev, 0x001574, pch->vblank.value);
617 } else {
618 nv_wr32(dev, 0x060010, pch->vblank.offset);
619 nv_wr32(dev, 0x060014, pch->vblank.value);
620 }
621 spin_unlock(&psw->peephole_lock);
622
623 list_del(&pch->vblank.list);
624 drm_vblank_put(dev, crtc);
625 }
626
627 drm_handle_vblank(dev, crtc);
628}
629
630static void
631nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
632{
633 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
634 nv50_display_vblank_crtc_handler(dev, 0);
635
636 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
637 nv50_display_vblank_crtc_handler(dev, 1);
638
639 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
640}
641
642static void
643nv50_display_unk10_handler(struct drm_device *dev) 600nv50_display_unk10_handler(struct drm_device *dev)
644{ 601{
645 struct drm_nouveau_private *dev_priv = dev->dev_private; 602 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -978,8 +935,8 @@ nv50_display_isr(struct drm_device *dev)
978 } 935 }
979 936
980 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { 937 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
981 nv50_display_vblank_handler(dev, intr1);
982 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC; 938 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
939 delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
983 } 940 }
984 941
985 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 | 942 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 25c301391724..ef12a7afac9c 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -33,7 +33,6 @@
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_reg.h" 34#include "nouveau_reg.h"
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nouveau_software.h"
37#include "nv50_evo.h" 36#include "nv50_evo.h"
38 37
39struct nv50_display_crtc { 38struct nv50_display_crtc {
@@ -47,7 +46,10 @@ struct nv50_display_crtc {
47 46
48struct nv50_display { 47struct nv50_display {
49 struct nouveau_channel *master; 48 struct nouveau_channel *master;
50 struct nouveau_gpuobj *ntfy; 49
50 struct nouveau_gpuobj *ramin;
51 u32 dmao;
52 u32 hash;
51 53
52 struct nv50_display_crtc crtc[2]; 54 struct nv50_display_crtc crtc[2];
53 55
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 7e9a6d6d673b..d7d8080c6a14 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -26,9 +26,22 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_dma.h" 28#include "nouveau_dma.h"
29#include <core/ramht.h>
30#include "nv50_display.h" 29#include "nv50_display.h"
31 30
31static u32
32nv50_evo_rd32(struct nouveau_object *object, u32 addr)
33{
34 void __iomem *iomem = object->oclass->ofuncs->rd08;
35 return ioread32_native(iomem + addr);
36}
37
38static void
39nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
40{
41 void __iomem *iomem = object->oclass->ofuncs->rd08;
42 iowrite32_native(data, iomem + addr);
43}
44
32static void 45static void
33nv50_evo_channel_del(struct nouveau_channel **pevo) 46nv50_evo_channel_del(struct nouveau_channel **pevo)
34{ 47{
@@ -38,21 +51,24 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
38 return; 51 return;
39 *pevo = NULL; 52 *pevo = NULL;
40 53
41 nouveau_ramht_ref(NULL, &evo->ramht, evo); 54 nouveau_bo_unmap(evo->push.buffer);
42 nouveau_gpuobj_channel_takedown(evo); 55 nouveau_bo_ref(NULL, &evo->push.buffer);
43 nouveau_bo_unmap(evo->pushbuf_bo);
44 nouveau_bo_ref(NULL, &evo->pushbuf_bo);
45 56
46 if (evo->user) 57 if (evo->object)
47 iounmap(evo->user); 58 iounmap(evo->object->oclass->ofuncs);
48 59
49 kfree(evo); 60 kfree(evo);
50} 61}
51 62
52void 63int
53nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size) 64nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
65 u64 base, u64 size, struct nouveau_gpuobj **pobj)
54{ 66{
55 struct drm_nouveau_private *dev_priv = obj->dev->dev_private; 67 struct drm_device *dev = evo->fence;
68 struct drm_nouveau_private *dev_priv = dev->dev_private;
69 struct nv50_display *disp = nv50_display(dev);
70 u32 dmao = disp->dmao;
71 u32 hash = disp->hash;
56 u32 flags5; 72 u32 flags5;
57 73
58 if (dev_priv->chipset < 0xc0) { 74 if (dev_priv->chipset < 0xc0) {
@@ -67,36 +83,21 @@ nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size
67 flags5 = 0x00020000; 83 flags5 = 0x00020000;
68 } 84 }
69 85
70 nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM, 86 nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
71 NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0); 87 nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
72 nv_wo32(obj, 0x14, flags5); 88 nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
73 nvimem_flush(obj->dev); 89 nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
74} 90 upper_32_bits(base));
91 nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
92 nv_wo32(disp->ramin, dmao + 0x14, flags5);
75 93
76int 94 nv_wo32(disp->ramin, hash + 0x00, handle);
77nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, 95 nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
78 u64 base, u64 size, struct nouveau_gpuobj **pobj) 96 evo->handle);
79{
80 struct nv50_display *disp = nv50_display(evo->dev);
81 struct nouveau_gpuobj *obj = NULL;
82 int ret;
83 97
84 ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj); 98 disp->dmao += 0x20;
85 if (ret) 99 disp->hash += 0x08;
86 return ret; 100 return 0;
87 obj->engine = NVOBJ_ENGINE_DISPLAY;
88
89 nv50_evo_dmaobj_init(obj, memtype, base, size);
90
91 ret = nouveau_ramht_insert(evo, handle, obj);
92 if (ret)
93 goto out;
94
95 if (pobj)
96 nouveau_gpuobj_ref(obj, pobj);
97out:
98 nouveau_gpuobj_ref(NULL, &obj);
99 return ret;
100} 101}
101 102
102static int 103static int
@@ -112,49 +113,52 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
112 return -ENOMEM; 113 return -ENOMEM;
113 *pevo = evo; 114 *pevo = evo;
114 115
115 evo->id = chid; 116 evo->handle = chid;
116 evo->dev = dev; 117 evo->fence = dev;
117 evo->user_get = 4; 118 evo->user_get = 4;
118 evo->user_put = 0; 119 evo->user_put = 0;
119 120
120 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL, 121 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
121 &evo->pushbuf_bo); 122 &evo->push.buffer);
122 if (ret == 0) 123 if (ret == 0)
123 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); 124 ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
124 if (ret) { 125 if (ret) {
125 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); 126 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
126 nv50_evo_channel_del(pevo); 127 nv50_evo_channel_del(pevo);
127 return ret; 128 return ret;
128 } 129 }
129 130
130 ret = nouveau_bo_map(evo->pushbuf_bo); 131 ret = nouveau_bo_map(evo->push.buffer);
131 if (ret) { 132 if (ret) {
132 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); 133 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
133 nv50_evo_channel_del(pevo); 134 nv50_evo_channel_del(pevo);
134 return ret; 135 return ret;
135 } 136 }
136 137
137 evo->user = ioremap(pci_resource_start(dev->pdev, 0) + 138 evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
138 NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); 139#ifdef NOUVEAU_OBJECT_MAGIC
139 if (!evo->user) { 140 evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
140 NV_ERROR(dev, "Error mapping EVO control regs.\n"); 141#endif
141 nv50_evo_channel_del(pevo); 142 evo->object->parent = nv_object(disp->ramin)->parent;
142 return -ENOMEM; 143 evo->object->engine = nv_object(disp->ramin)->engine;
143 } 144 evo->object->oclass =
144 145 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
145 /* bind primary evo channel's ramht to the channel */ 146 evo->object->oclass->ofuncs =
146 if (disp->master && evo != disp->master) 147 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
147 nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL); 148 evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
148 149 evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
150 evo->object->oclass->ofuncs->rd08 =
151 ioremap(pci_resource_start(dev->pdev, 0) +
152 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
149 return 0; 153 return 0;
150} 154}
151 155
152static int 156static int
153nv50_evo_channel_init(struct nouveau_channel *evo) 157nv50_evo_channel_init(struct nouveau_channel *evo)
154{ 158{
155 struct drm_device *dev = evo->dev; 159 struct drm_device *dev = evo->fence;
156 int id = evo->id, ret, i; 160 int id = evo->handle, ret, i;
157 u64 pushbuf = evo->pushbuf_bo->bo.offset; 161 u64 pushbuf = evo->push.buffer->bo.offset;
158 u32 tmp; 162 u32 tmp;
159 163
160 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); 164 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
@@ -205,8 +209,8 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
205static void 209static void
206nv50_evo_channel_fini(struct nouveau_channel *evo) 210nv50_evo_channel_fini(struct nouveau_channel *evo)
207{ 211{
208 struct drm_device *dev = evo->dev; 212 struct drm_device *dev = evo->fence;
209 int id = evo->id; 213 int id = evo->handle;
210 214
211 nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); 215 nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
212 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); 216 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
@@ -231,8 +235,8 @@ nv50_evo_destroy(struct drm_device *dev)
231 } 235 }
232 nv50_evo_channel_del(&disp->crtc[i].sync); 236 nv50_evo_channel_del(&disp->crtc[i].sync);
233 } 237 }
234 nouveau_gpuobj_ref(NULL, &disp->ntfy);
235 nv50_evo_channel_del(&disp->master); 238 nv50_evo_channel_del(&disp->master);
239 nouveau_gpuobj_ref(NULL, &disp->ramin);
236} 240}
237 241
238int 242int
@@ -240,55 +244,33 @@ nv50_evo_create(struct drm_device *dev)
240{ 244{
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 245 struct drm_nouveau_private *dev_priv = dev->dev_private;
242 struct nv50_display *disp = nv50_display(dev); 246 struct nv50_display *disp = nv50_display(dev);
243 struct nouveau_gpuobj *ramht = NULL;
244 struct nouveau_channel *evo; 247 struct nouveau_channel *evo;
245 int ret, i, j; 248 int ret, i, j;
246 249
247 /* create primary evo channel, the one we use for modesetting
248 * purporses
249 */
250 ret = nv50_evo_channel_new(dev, 0, &disp->master);
251 if (ret)
252 return ret;
253 evo = disp->master;
254
255 /* setup object management on it, any other evo channel will 250 /* setup object management on it, any other evo channel will
256 * use this also as there's no per-channel support on the 251 * use this also as there's no per-channel support on the
257 * hardware 252 * hardware
258 */ 253 */
259 ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, 254 ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
260 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); 255 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
261 if (ret) { 256 if (ret) {
262 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); 257 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
263 goto err; 258 goto err;
264 } 259 }
265 260
266 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); 261 disp->hash = 0x0000;
267 if (ret) { 262 disp->dmao = 0x1000;
268 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
269 goto err;
270 }
271
272 ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
273 nouveau_gpuobj_ref(NULL, &ramht);
274 if (ret)
275 goto err;
276 263
277 /* not sure exactly what this is.. 264 /* create primary evo channel, the one we use for modesetting
278 * 265 * purporses
279 * the first dword of the structure is used by nvidia to wait on
280 * full completion of an EVO "update" command.
281 *
282 * method 0x8c on the master evo channel will fill a lot more of
283 * this structure with some undefined info
284 */ 266 */
285 ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0, 267 ret = nv50_evo_channel_new(dev, 0, &disp->master);
286 NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
287 if (ret) 268 if (ret)
288 goto err; 269 return ret;
270 evo = disp->master;
289 271
290 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, 272 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
291 disp->ntfy->addr, disp->ntfy->size, NULL); 273 disp->ramin->addr + 0x2000, 0x1000, NULL);
292 if (ret) 274 if (ret)
293 goto err; 275 goto err;
294 276
@@ -304,13 +286,13 @@ nv50_evo_create(struct drm_device *dev)
304 goto err; 286 goto err;
305 287
306 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | 288 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
307 (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), 289 (dev_priv->chipset < 0xc0 ? 0x7a : 0xfe),
308 0, nvfb_vram_size(dev), NULL); 290 0, nvfb_vram_size(dev), NULL);
309 if (ret) 291 if (ret)
310 goto err; 292 goto err;
311 293
312 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | 294 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
313 (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), 295 (dev_priv->chipset < 0xc0 ? 0x70 : 0xfe),
314 0, nvfb_vram_size(dev), NULL); 296 0, nvfb_vram_size(dev), NULL);
315 if (ret) 297 if (ret)
316 goto err; 298 goto err;
@@ -352,14 +334,14 @@ nv50_evo_create(struct drm_device *dev)
352 334
353 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | 335 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
354 (dev_priv->chipset < 0xc0 ? 336 (dev_priv->chipset < 0xc0 ?
355 0x7a00 : 0xfe00), 337 0x7a : 0xfe),
356 0, nvfb_vram_size(dev), NULL); 338 0, nvfb_vram_size(dev), NULL);
357 if (ret) 339 if (ret)
358 goto err; 340 goto err;
359 341
360 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | 342 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
361 (dev_priv->chipset < 0xc0 ? 343 (dev_priv->chipset < 0xc0 ?
362 0x7000 : 0xfe00), 344 0x70 : 0xfe),
363 0, nvfb_vram_size(dev), NULL); 345 0, nvfb_vram_size(dev), NULL);
364 if (ret) 346 if (ret)
365 goto err; 347 goto err;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 1593e2402fd2..2028a4447124 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -22,20 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include "nouveau_drm.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h" 26#include "nouveau_dma.h"
28#include <core/ramht.h>
29#include "nouveau_fbcon.h" 27#include "nouveau_fbcon.h"
30#include <core/mm.h>
31 28
32int 29int
33nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 30nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
34{ 31{
35 struct nouveau_fbdev *nfbdev = info->par; 32 struct nouveau_fbdev *nfbdev = info->par;
36 struct drm_device *dev = nfbdev->dev; 33 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
37 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct nouveau_channel *chan = drm->channel;
38 struct nouveau_channel *chan = dev_priv->channel;
39 int ret; 35 int ret;
40 36
41 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); 37 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
69nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 65nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
70{ 66{
71 struct nouveau_fbdev *nfbdev = info->par; 67 struct nouveau_fbdev *nfbdev = info->par;
72 struct drm_device *dev = nfbdev->dev; 68 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct nouveau_channel *chan = drm->channel;
74 struct nouveau_channel *chan = dev_priv->channel;
75 int ret; 70 int ret;
76 71
77 ret = RING_SPACE(chan, 12); 72 ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
98nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 93nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
99{ 94{
100 struct nouveau_fbdev *nfbdev = info->par; 95 struct nouveau_fbdev *nfbdev = info->par;
101 struct drm_device *dev = nfbdev->dev; 96 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
102 struct drm_nouveau_private *dev_priv = dev->dev_private; 97 struct nouveau_channel *chan = drm->channel;
103 struct nouveau_channel *chan = dev_priv->channel;
104 uint32_t width, dwords, *data = (uint32_t *)image->data; 98 uint32_t width, dwords, *data = (uint32_t *)image->data;
105 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
106 uint32_t *palette = info->pseudo_palette; 100 uint32_t *palette = info->pseudo_palette;
@@ -156,10 +150,11 @@ int
156nv50_fbcon_accel_init(struct fb_info *info) 150nv50_fbcon_accel_init(struct fb_info *info)
157{ 151{
158 struct nouveau_fbdev *nfbdev = info->par; 152 struct nouveau_fbdev *nfbdev = info->par;
159 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; 153 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
154 struct drm_device *dev = nfbdev->dev;
155 struct nouveau_drm *drm = nouveau_newpriv(dev);
156 struct nouveau_channel *chan = drm->channel;
157 struct nouveau_object *object;
163 int ret, format; 158 int ret, format;
164 159
165 switch (info->var.bits_per_pixel) { 160 switch (info->var.bits_per_pixel) {
@@ -189,7 +184,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
189 return -EINVAL; 184 return -EINVAL;
190 } 185 }
191 186
192 ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d); 187 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
188 0x502d, NULL, 0, &object);
193 if (ret) 189 if (ret)
194 return ret; 190 return ret;
195 191
@@ -202,9 +198,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
202 BEGIN_NV04(chan, NvSub2D, 0x0000, 1); 198 BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
203 OUT_RING(chan, Nv2D); 199 OUT_RING(chan, Nv2D);
204 BEGIN_NV04(chan, NvSub2D, 0x0184, 3); 200 BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
205 OUT_RING(chan, chan->vram_handle); 201 OUT_RING(chan, NvDmaFB);
206 OUT_RING(chan, chan->vram_handle); 202 OUT_RING(chan, NvDmaFB);
207 OUT_RING(chan, chan->vram_handle); 203 OUT_RING(chan, NvDmaFB);
208 BEGIN_NV04(chan, NvSub2D, 0x0290, 1); 204 BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
209 OUT_RING(chan, 0); 205 OUT_RING(chan, 0);
210 BEGIN_NV04(chan, NvSub2D, 0x0888, 1); 206 BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 10aa04f26b83..e717aaaf62c6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -22,12 +22,12 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27
28#include "nouveau_drm.h"
27#include "nouveau_dma.h" 29#include "nouveau_dma.h"
28#include <core/ramht.h>
29#include "nouveau_fence.h" 30#include "nouveau_fence.h"
30#include "nv50_display.h"
31 31
32struct nv50_fence_chan { 32struct nv50_fence_chan {
33 struct nouveau_fence_chan base; 33 struct nouveau_fence_chan base;
@@ -43,12 +43,11 @@ struct nv50_fence_priv {
43static int 43static int
44nv50_fence_context_new(struct nouveau_channel *chan) 44nv50_fence_context_new(struct nouveau_channel *chan)
45{ 45{
46 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 46 struct nv50_fence_priv *priv = chan->drm->fence;
47 struct nv50_fence_priv *priv = dev_priv->fence.func;
48 struct nv50_fence_chan *fctx; 47 struct nv50_fence_chan *fctx;
49 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 48 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
50 struct nouveau_gpuobj *obj; 49 struct nouveau_object *object;
51 int ret = 0, i; 50 int ret, i;
52 51
53 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 52 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
54 if (!fctx) 53 if (!fctx)
@@ -56,30 +55,29 @@ nv50_fence_context_new(struct nouveau_channel *chan)
56 55
57 nouveau_fence_context_new(&fctx->base); 56 nouveau_fence_context_new(&fctx->base);
58 57
59 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, 58 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
60 mem->start * PAGE_SIZE, mem->size, 59 NvSema, 0x0002,
61 NV_MEM_ACCESS_RW, 60 &(struct nv_dma_class) {
62 NV_MEM_TARGET_VRAM, &obj); 61 .flags = NV_DMA_TARGET_VRAM |
63 if (!ret) { 62 NV_DMA_ACCESS_RDWR,
64 ret = nouveau_ramht_insert(chan, NvSema, obj); 63 .start = mem->start * PAGE_SIZE,
65 nouveau_gpuobj_ref(NULL, &obj); 64 .limit = mem->size - 1,
66 } 65 }, sizeof(struct nv_dma_class),
66 &object);
67 67
68 /* dma objects for display sync channel semaphore blocks */ 68 /* dma objects for display sync channel semaphore blocks */
69 for (i = 0; i < chan->dev->mode_config.num_crtc; i++) { 69 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
70 struct nv50_display *pdisp = nv50_display(chan->dev); 70 struct nouveau_bo *bo = nv50sema(chan->drm->dev, i);
71 struct nv50_display_crtc *dispc = &pdisp->crtc[i]; 71
72 struct nouveau_gpuobj *obj = NULL; 72 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
73 73 NvEvoSema0 + i, 0x003d,
74 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 74 &(struct nv_dma_class) {
75 dispc->sem.bo->bo.offset, 0x1000, 75 .flags = NV_DMA_TARGET_VRAM |
76 NV_MEM_ACCESS_RW, 76 NV_DMA_ACCESS_RDWR,
77 NV_MEM_TARGET_VRAM, &obj); 77 .start = bo->bo.offset,
78 if (ret) 78 .limit = bo->bo.offset + 0xfff,
79 break; 79 }, sizeof(struct nv_dma_class),
80 80 &object);
81 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
82 nouveau_gpuobj_ref(NULL, &obj);
83 } 81 }
84 82
85 if (ret) 83 if (ret)
@@ -88,13 +86,12 @@ nv50_fence_context_new(struct nouveau_channel *chan)
88} 86}
89 87
90int 88int
91nv50_fence_create(struct drm_device *dev) 89nv50_fence_create(struct nouveau_drm *drm)
92{ 90{
93 struct drm_nouveau_private *dev_priv = dev->dev_private;
94 struct nv50_fence_priv *priv; 91 struct nv50_fence_priv *priv;
95 int ret = 0; 92 int ret = 0;
96 93
97 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 94 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
98 if (!priv) 95 if (!priv)
99 return -ENOMEM; 96 return -ENOMEM;
100 97
@@ -104,10 +101,9 @@ nv50_fence_create(struct drm_device *dev)
104 priv->base.emit = nv10_fence_emit; 101 priv->base.emit = nv10_fence_emit;
105 priv->base.read = nv10_fence_read; 102 priv->base.read = nv10_fence_read;
106 priv->base.sync = nv17_fence_sync; 103 priv->base.sync = nv17_fence_sync;
107 dev_priv->fence.func = &priv->base;
108 spin_lock_init(&priv->lock); 104 spin_lock_init(&priv->lock);
109 105
110 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 106 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
111 0, 0x0000, NULL, &priv->bo); 107 0, 0x0000, NULL, &priv->bo);
112 if (!ret) { 108 if (!ret) {
113 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 109 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -117,9 +113,12 @@ nv50_fence_create(struct drm_device *dev)
117 nouveau_bo_ref(NULL, &priv->bo); 113 nouveau_bo_ref(NULL, &priv->bo);
118 } 114 }
119 115
120 if (ret == 0) 116 if (ret == 0) {
121 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 117 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
122 else 118 priv->base.sync = nv17_fence_sync;
123 nv10_fence_destroy(dev); 119 }
120
121 if (ret)
122 nv10_fence_destroy(drm);
124 return ret; 123 return ret;
125} 124}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 142cd4e83767..ac0208438ace 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -28,7 +28,6 @@
28#include "nouveau_hw.h" 28#include "nouveau_hw.h"
29#include "nouveau_pm.h" 29#include "nouveau_pm.h"
30#include "nouveau_hwsq.h" 30#include "nouveau_hwsq.h"
31#include "nv50_display.h"
32 31
33enum clk_src { 32enum clk_src {
34 clk_src_crystal, 33 clk_src_crystal,
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
deleted file mode 100644
index 7c9dbe862c44..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_software.c
+++ /dev/null
@@ -1,181 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include <core/ramht.h>
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nv50_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv50_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
43{
44 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
45 struct nouveau_gpuobj *gpuobj;
46
47 gpuobj = nouveau_ramht_find(chan, data);
48 if (!gpuobj)
49 return -ENOENT;
50
51 pch->base.vblank.ctxdma = gpuobj->node->offset >> 4;
52 return 0;
53}
54
55static int
56mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
57{
58 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
59 pch->base.vblank.offset = data;
60 return 0;
61}
62
63static int
64mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
65{
66 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
67 pch->base.vblank.value = data;
68 return 0;
69}
70
71static int
72mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
73{
74 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
75 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
76 struct drm_device *dev = chan->dev;
77
78 if (data > 1)
79 return -EINVAL;
80
81 drm_vblank_get(dev, data);
82
83 pch->base.vblank.head = data;
84 list_add(&pch->base.vblank.list, &psw->base.vblank);
85 return 0;
86}
87
88static int
89mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
90{
91 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
92 return pch->base.flip(pch->base.flip_data);
93}
94
95static int
96nv50_software_context_new(struct nouveau_channel *chan, int engine)
97{
98 struct nv50_software_chan *pch;
99
100 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
101 if (!pch)
102 return -ENOMEM;
103
104 nouveau_software_context_new(chan, &pch->base);
105 pch->base.vblank.channel = chan->ramin->addr >> 12;
106 chan->engctx[engine] = pch;
107 return 0;
108}
109
110static void
111nv50_software_context_del(struct nouveau_channel *chan, int engine)
112{
113 struct nv50_software_chan *pch = chan->engctx[engine];
114 chan->engctx[engine] = NULL;
115 kfree(pch);
116}
117
118static int
119nv50_software_object_new(struct nouveau_channel *chan, int engine,
120 u32 handle, u16 class)
121{
122 struct drm_device *dev = chan->dev;
123 struct nouveau_gpuobj *obj = NULL;
124 int ret;
125
126 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
127 if (ret)
128 return ret;
129 obj->engine = 0;
130 obj->class = class;
131
132 ret = nouveau_ramht_insert(chan, handle, obj);
133 nouveau_gpuobj_ref(NULL, &obj);
134 return ret;
135}
136
137static int
138nv50_software_init(struct drm_device *dev, int engine)
139{
140 return 0;
141}
142
143static int
144nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
145{
146 return 0;
147}
148
149static void
150nv50_software_destroy(struct drm_device *dev, int engine)
151{
152 struct nv50_software_priv *psw = nv_engine(dev, engine);
153
154 NVOBJ_ENGINE_DEL(dev, SW);
155 kfree(psw);
156}
157
158int
159nv50_software_create(struct drm_device *dev)
160{
161 struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
162 if (!psw)
163 return -ENOMEM;
164
165 psw->base.base.destroy = nv50_software_destroy;
166 psw->base.base.init = nv50_software_init;
167 psw->base.base.fini = nv50_software_fini;
168 psw->base.base.context_new = nv50_software_context_new;
169 psw->base.base.context_del = nv50_software_context_del;
170 psw->base.base.object_new = nv50_software_object_new;
171 nouveau_software_create(&psw->base);
172
173 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
174 NVOBJ_CLASS(dev, 0x506e, SW);
175 NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
176 NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
177 NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
178 NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
179 NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
180 return 0;
181}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 5ef87edb878d..b0d147a675c4 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,13 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27#include "nouveau_dma.h" 27
28#include <engine/fifo.h> 28#include <engine/fifo.h>
29#include <core/ramht.h> 29
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
30#include "nouveau_fence.h" 32#include "nouveau_fence.h"
31#include "nv50_display.h"
32 33
33struct nv84_fence_chan { 34struct nv84_fence_chan {
34 struct nouveau_fence_chan base; 35 struct nouveau_fence_chan base;
@@ -43,13 +44,14 @@ static int
43nv84_fence_emit(struct nouveau_fence *fence) 44nv84_fence_emit(struct nouveau_fence *fence)
44{ 45{
45 struct nouveau_channel *chan = fence->channel; 46 struct nouveau_channel *chan = fence->channel;
47 struct nouveau_fifo_chan *fifo = (void *)chan->object;
46 int ret = RING_SPACE(chan, 7); 48 int ret = RING_SPACE(chan, 7);
47 if (ret == 0) { 49 if (ret == 0) {
48 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 50 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
49 OUT_RING (chan, NvSema); 51 OUT_RING (chan, NvSema);
50 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 52 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
51 OUT_RING (chan, upper_32_bits(chan->id * 16)); 53 OUT_RING (chan, upper_32_bits(fifo->chid * 16));
52 OUT_RING (chan, lower_32_bits(chan->id * 16)); 54 OUT_RING (chan, lower_32_bits(fifo->chid * 16));
53 OUT_RING (chan, fence->sequence); 55 OUT_RING (chan, fence->sequence);
54 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); 56 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
55 FIRE_RING (chan); 57 FIRE_RING (chan);
@@ -62,13 +64,14 @@ static int
62nv84_fence_sync(struct nouveau_fence *fence, 64nv84_fence_sync(struct nouveau_fence *fence,
63 struct nouveau_channel *prev, struct nouveau_channel *chan) 65 struct nouveau_channel *prev, struct nouveau_channel *chan)
64{ 66{
67 struct nouveau_fifo_chan *fifo = (void *)prev->object;
65 int ret = RING_SPACE(chan, 7); 68 int ret = RING_SPACE(chan, 7);
66 if (ret == 0) { 69 if (ret == 0) {
67 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 70 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
68 OUT_RING (chan, NvSema); 71 OUT_RING (chan, NvSema);
69 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 72 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
70 OUT_RING (chan, upper_32_bits(prev->id * 16)); 73 OUT_RING (chan, upper_32_bits(fifo->chid * 16));
71 OUT_RING (chan, lower_32_bits(prev->id * 16)); 74 OUT_RING (chan, lower_32_bits(fifo->chid * 16));
72 OUT_RING (chan, fence->sequence); 75 OUT_RING (chan, fence->sequence);
73 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL); 76 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
74 FIRE_RING (chan); 77 FIRE_RING (chan);
@@ -79,9 +82,9 @@ nv84_fence_sync(struct nouveau_fence *fence,
79static u32 82static u32
80nv84_fence_read(struct nouveau_channel *chan) 83nv84_fence_read(struct nouveau_channel *chan)
81{ 84{
82 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 85 struct nouveau_fifo_chan *fifo = (void *)chan->object;
83 struct nv84_fence_priv *priv = dev_priv->fence.func; 86 struct nv84_fence_priv *priv = chan->drm->fence;
84 return nv_ro32(priv->mem, chan->id * 16); 87 return nv_ro32(priv->mem, fifo->chid * 16);
85} 88}
86 89
87static void 90static void
@@ -96,10 +99,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
96static int 99static int
97nv84_fence_context_new(struct nouveau_channel *chan) 100nv84_fence_context_new(struct nouveau_channel *chan)
98{ 101{
99 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 102 struct nouveau_fifo_chan *fifo = (void *)chan->object;
100 struct nv84_fence_priv *priv = dev_priv->fence.func; 103 struct nv84_fence_priv *priv = chan->drm->fence;
101 struct nv84_fence_chan *fctx; 104 struct nv84_fence_chan *fctx;
102 struct nouveau_gpuobj *obj; 105 struct nouveau_object *object;
103 int ret, i; 106 int ret, i;
104 107
105 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 108 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -108,58 +111,56 @@ nv84_fence_context_new(struct nouveau_channel *chan)
108 111
109 nouveau_fence_context_new(&fctx->base); 112 nouveau_fence_context_new(&fctx->base);
110 113
111 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, 114 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
112 priv->mem->addr, priv->mem->size, 115 NvSema, 0x0002,
113 NV_MEM_ACCESS_RW, 116 &(struct nv_dma_class) {
114 NV_MEM_TARGET_VRAM, &obj); 117 .flags = NV_DMA_TARGET_VRAM |
115 if (ret == 0) { 118 NV_DMA_ACCESS_RDWR,
116 ret = nouveau_ramht_insert(chan, NvSema, obj); 119 .start = priv->mem->addr,
117 nouveau_gpuobj_ref(NULL, &obj); 120 .limit = priv->mem->addr +
118 nv_wo32(priv->mem, chan->id * 16, 0x00000000); 121 priv->mem->size - 1,
119 } 122 }, sizeof(struct nv_dma_class),
123 &object);
120 124
121 /* dma objects for display sync channel semaphore blocks */ 125 /* dma objects for display sync channel semaphore blocks */
122 for (i = 0; i < chan->dev->mode_config.num_crtc; i++) { 126 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
123 struct nv50_display *pdisp = nv50_display(chan->dev); 127 struct nouveau_bo *bo = nv50sema(chan->drm->dev, i);
124 struct nv50_display_crtc *dispc = &pdisp->crtc[i]; 128
125 struct nouveau_gpuobj *obj = NULL; 129 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
126 130 NvEvoSema0 + i, 0x003d,
127 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 131 &(struct nv_dma_class) {
128 dispc->sem.bo->bo.offset, 0x1000, 132 .flags = NV_DMA_TARGET_VRAM |
129 NV_MEM_ACCESS_RW, 133 NV_DMA_ACCESS_RDWR,
130 NV_MEM_TARGET_VRAM, &obj); 134 .start = bo->bo.offset,
131 if (ret) 135 .limit = bo->bo.offset + 0xfff,
132 break; 136 }, sizeof(struct nv_dma_class),
133 137 &object);
134 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
135 nouveau_gpuobj_ref(NULL, &obj);
136 } 138 }
137 139
138 if (ret) 140 if (ret)
139 nv84_fence_context_del(chan); 141 nv84_fence_context_del(chan);
142 nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
140 return ret; 143 return ret;
141} 144}
142 145
143static void 146static void
144nv84_fence_destroy(struct drm_device *dev) 147nv84_fence_destroy(struct nouveau_drm *drm)
145{ 148{
146 struct drm_nouveau_private *dev_priv = dev->dev_private; 149 struct nv84_fence_priv *priv = drm->fence;
147 struct nv84_fence_priv *priv = dev_priv->fence.func;
148
149 nouveau_gpuobj_ref(NULL, &priv->mem); 150 nouveau_gpuobj_ref(NULL, &priv->mem);
150 dev_priv->fence.func = NULL; 151 drm->fence = NULL;
151 kfree(priv); 152 kfree(priv);
152} 153}
153 154
154int 155int
155nv84_fence_create(struct drm_device *dev) 156nv84_fence_create(struct nouveau_drm *drm)
156{ 157{
157 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 158 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
158 struct drm_nouveau_private *dev_priv = dev->dev_private;
159 struct nv84_fence_priv *priv; 159 struct nv84_fence_priv *priv;
160 u32 chan = pfifo->max + 1;
160 int ret; 161 int ret;
161 162
162 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 163 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
163 if (!priv) 164 if (!priv)
164 return -ENOMEM; 165 return -ENOMEM;
165 166
@@ -169,15 +170,10 @@ nv84_fence_create(struct drm_device *dev)
169 priv->base.emit = nv84_fence_emit; 170 priv->base.emit = nv84_fence_emit;
170 priv->base.sync = nv84_fence_sync; 171 priv->base.sync = nv84_fence_sync;
171 priv->base.read = nv84_fence_read; 172 priv->base.read = nv84_fence_read;
172 dev_priv->fence.func = priv;
173
174 ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
175 0x1000, 0, &priv->mem);
176 if (ret)
177 goto out;
178 173
179out: 174 ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
175 &priv->mem);
180 if (ret) 176 if (ret)
181 nv84_fence_destroy(dev); 177 nv84_fence_destroy(drm);
182 return ret; 178 return ret;
183} 179}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index db26e050c73c..cc88f3649909 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -22,20 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include "nouveau_drm.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h" 26#include "nouveau_dma.h"
28#include <core/ramht.h>
29#include "nouveau_fbcon.h" 27#include "nouveau_fbcon.h"
30#include <core/mm.h>
31 28
32int 29int
33nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 30nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
34{ 31{
35 struct nouveau_fbdev *nfbdev = info->par; 32 struct nouveau_fbdev *nfbdev = info->par;
36 struct drm_device *dev = nfbdev->dev; 33 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
37 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct nouveau_channel *chan = drm->channel;
38 struct nouveau_channel *chan = dev_priv->channel;
39 int ret; 35 int ret;
40 36
41 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); 37 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
69nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 65nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
70{ 66{
71 struct nouveau_fbdev *nfbdev = info->par; 67 struct nouveau_fbdev *nfbdev = info->par;
72 struct drm_device *dev = nfbdev->dev; 68 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct nouveau_channel *chan = drm->channel;
74 struct nouveau_channel *chan = dev_priv->channel;
75 int ret; 70 int ret;
76 71
77 ret = RING_SPACE(chan, 12); 72 ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
98nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 93nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
99{ 94{
100 struct nouveau_fbdev *nfbdev = info->par; 95 struct nouveau_fbdev *nfbdev = info->par;
101 struct drm_device *dev = nfbdev->dev; 96 struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
102 struct drm_nouveau_private *dev_priv = dev->dev_private; 97 struct nouveau_channel *chan = drm->channel;
103 struct nouveau_channel *chan = dev_priv->channel;
104 uint32_t width, dwords, *data = (uint32_t *)image->data; 98 uint32_t width, dwords, *data = (uint32_t *)image->data;
105 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
106 uint32_t *palette = info->pseudo_palette; 100 uint32_t *palette = info->pseudo_palette;
@@ -157,12 +151,14 @@ nvc0_fbcon_accel_init(struct fb_info *info)
157{ 151{
158 struct nouveau_fbdev *nfbdev = info->par; 152 struct nouveau_fbdev *nfbdev = info->par;
159 struct drm_device *dev = nfbdev->dev; 153 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; 154 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
155 struct nouveau_drm *drm = nouveau_newpriv(dev);
156 struct nouveau_channel *chan = drm->channel;
157 struct nouveau_object *object;
163 int ret, format; 158 int ret, format;
164 159
165 ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); 160 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
161 0x902d, NULL, 0, &object);
166 if (ret) 162 if (ret)
167 return ret; 163 return ret;
168 164
@@ -202,9 +198,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
202 198
203 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); 199 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
204 OUT_RING (chan, 0x0000902d); 200 OUT_RING (chan, 0x0000902d);
205 BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
208 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); 201 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
209 OUT_RING (chan, 0); 202 OUT_RING (chan, 0);
210 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); 203 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 779c5ff4ed70..ce612ad398ad 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,13 +22,15 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/client.h>
27#include "nouveau_dma.h" 27#include <core/class.h>
28
28#include <engine/fifo.h> 29#include <engine/fifo.h>
29#include <core/ramht.h> 30
31#include "nouveau_drm.h"
32#include "nouveau_dma.h"
30#include "nouveau_fence.h" 33#include "nouveau_fence.h"
31#include "nv50_display.h"
32 34
33struct nvc0_fence_priv { 35struct nvc0_fence_priv {
34 struct nouveau_fence_priv base; 36 struct nouveau_fence_priv base;
@@ -54,7 +56,8 @@ nvc0_fence_emit(struct nouveau_fence *fence)
54{ 56{
55 struct nouveau_channel *chan = fence->channel; 57 struct nouveau_channel *chan = fence->channel;
56 struct nvc0_fence_chan *fctx = chan->fence; 58 struct nvc0_fence_chan *fctx = chan->fence;
57 u64 addr = fctx->vma.offset + chan->id * 16; 59 struct nouveau_fifo_chan *fifo = (void *)chan->object;
60 u64 addr = fctx->vma.offset + fifo->chid * 16;
58 int ret; 61 int ret;
59 62
60 ret = RING_SPACE(chan, 5); 63 ret = RING_SPACE(chan, 5);
@@ -75,7 +78,8 @@ nvc0_fence_sync(struct nouveau_fence *fence,
75 struct nouveau_channel *prev, struct nouveau_channel *chan) 78 struct nouveau_channel *prev, struct nouveau_channel *chan)
76{ 79{
77 struct nvc0_fence_chan *fctx = chan->fence; 80 struct nvc0_fence_chan *fctx = chan->fence;
78 u64 addr = fctx->vma.offset + prev->id * 16; 81 struct nouveau_fifo_chan *fifo = (void *)prev->object;
82 u64 addr = fctx->vma.offset + fifo->chid * 16;
79 int ret; 83 int ret;
80 84
81 ret = RING_SPACE(chan, 5); 85 ret = RING_SPACE(chan, 5);
@@ -95,31 +99,29 @@ nvc0_fence_sync(struct nouveau_fence *fence,
95static u32 99static u32
96nvc0_fence_read(struct nouveau_channel *chan) 100nvc0_fence_read(struct nouveau_channel *chan)
97{ 101{
98 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 102 struct nouveau_fifo_chan *fifo = (void *)chan->object;
99 struct nvc0_fence_priv *priv = dev_priv->fence.func; 103 struct nvc0_fence_priv *priv = chan->drm->fence;
100 return nouveau_bo_rd32(priv->bo, chan->id * 16/4); 104 return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
101} 105}
102 106
103static void 107static void
104nvc0_fence_context_del(struct nouveau_channel *chan) 108nvc0_fence_context_del(struct nouveau_channel *chan)
105{ 109{
106 struct drm_device *dev = chan->dev; 110 struct drm_device *dev = chan->drm->dev;
107 struct drm_nouveau_private *dev_priv = dev->dev_private; 111 struct nvc0_fence_priv *priv = chan->drm->fence;
108 struct nvc0_fence_priv *priv = dev_priv->fence.func;
109 struct nvc0_fence_chan *fctx = chan->fence; 112 struct nvc0_fence_chan *fctx = chan->fence;
110 int i; 113 int i;
111 114
112 if (dev_priv->card_type >= NV_D0) { 115 if (nv_device(chan->drm->device)->card_type >= NV_D0) {
113 for (i = 0; i < dev->mode_config.num_crtc; i++) { 116 for (i = 0; i < dev->mode_config.num_crtc; i++) {
114 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i); 117 struct nouveau_bo *bo = nvd0sema(dev, i);
115 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); 118 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
116 } 119 }
117 } else 120 } else
118 if (dev_priv->card_type >= NV_50) { 121 if (nv_device(chan->drm->device)->card_type >= NV_50) {
119 struct nv50_display *disp = nv50_display(dev);
120 for (i = 0; i < dev->mode_config.num_crtc; i++) { 122 for (i = 0; i < dev->mode_config.num_crtc; i++) {
121 struct nv50_display_crtc *dispc = &disp->crtc[i]; 123 struct nouveau_bo *bo = nv50sema(dev, i);
122 nouveau_bo_vma_del(dispc->sem.bo, &fctx->dispc_vma[i]); 124 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
123 } 125 }
124 } 126 }
125 127
@@ -132,9 +134,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
132static int 134static int
133nvc0_fence_context_new(struct nouveau_channel *chan) 135nvc0_fence_context_new(struct nouveau_channel *chan)
134{ 136{
135 struct drm_device *dev = chan->dev; 137 struct nouveau_fifo_chan *fifo = (void *)chan->object;
136 struct drm_nouveau_private *dev_priv = dev->dev_private; 138 struct nouveau_client *client = nouveau_client(fifo);
137 struct nvc0_fence_priv *priv = dev_priv->fence.func; 139 struct nvc0_fence_priv *priv = chan->drm->fence;
138 struct nvc0_fence_chan *fctx; 140 struct nvc0_fence_chan *fctx;
139 int ret, i; 141 int ret, i;
140 142
@@ -144,36 +146,35 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
144 146
145 nouveau_fence_context_new(&fctx->base); 147 nouveau_fence_context_new(&fctx->base);
146 148
147 ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma); 149 ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
148 if (ret) 150 if (ret)
149 nvc0_fence_context_del(chan); 151 nvc0_fence_context_del(chan);
150 152
151 /* map display semaphore buffers into channel's vm */ 153 /* map display semaphore buffers into channel's vm */
152 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { 154 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
153 struct nouveau_bo *bo; 155 struct nouveau_bo *bo;
154 if (dev_priv->card_type >= NV_D0) 156 if (nv_device(chan->drm->device)->card_type >= NV_D0)
155 bo = nvd0_display_crtc_sema(dev, i); 157 bo = nvd0sema(chan->drm->dev, i);
156 else 158 else
157 bo = nv50_display(dev)->crtc[i].sem.bo; 159 bo = nv50sema(chan->drm->dev, i);
158 160
159 ret = nouveau_bo_vma_add(bo, chan->vm, &fctx->dispc_vma[i]); 161 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
160 } 162 }
161 163
162 nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000); 164 nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
163 return ret; 165 return ret;
164} 166}
165 167
166static bool 168static bool
167nvc0_fence_suspend(struct drm_device *dev) 169nvc0_fence_suspend(struct nouveau_drm *drm)
168{ 170{
169 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 171 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
170 struct drm_nouveau_private *dev_priv = dev->dev_private; 172 struct nvc0_fence_priv *priv = drm->fence;
171 struct nvc0_fence_priv *priv = dev_priv->fence.func;
172 int i; 173 int i;
173 174
174 priv->suspend = vmalloc(pfifo->channels * sizeof(u32)); 175 priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
175 if (priv->suspend) { 176 if (priv->suspend) {
176 for (i = 0; i < pfifo->channels; i++) 177 for (i = 0; i <= pfifo->max; i++)
177 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i); 178 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
178 } 179 }
179 180
@@ -181,15 +182,14 @@ nvc0_fence_suspend(struct drm_device *dev)
181} 182}
182 183
183static void 184static void
184nvc0_fence_resume(struct drm_device *dev) 185nvc0_fence_resume(struct nouveau_drm *drm)
185{ 186{
186 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 187 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
187 struct drm_nouveau_private *dev_priv = dev->dev_private; 188 struct nvc0_fence_priv *priv = drm->fence;
188 struct nvc0_fence_priv *priv = dev_priv->fence.func;
189 int i; 189 int i;
190 190
191 if (priv->suspend) { 191 if (priv->suspend) {
192 for (i = 0; i < pfifo->channels; i++) 192 for (i = 0; i <= pfifo->max; i++)
193 nouveau_bo_wr32(priv->bo, i, priv->suspend[i]); 193 nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
194 vfree(priv->suspend); 194 vfree(priv->suspend);
195 priv->suspend = NULL; 195 priv->suspend = NULL;
@@ -197,26 +197,23 @@ nvc0_fence_resume(struct drm_device *dev)
197} 197}
198 198
199static void 199static void
200nvc0_fence_destroy(struct drm_device *dev) 200nvc0_fence_destroy(struct nouveau_drm *drm)
201{ 201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private; 202 struct nvc0_fence_priv *priv = drm->fence;
203 struct nvc0_fence_priv *priv = dev_priv->fence.func;
204
205 nouveau_bo_unmap(priv->bo); 203 nouveau_bo_unmap(priv->bo);
206 nouveau_bo_ref(NULL, &priv->bo); 204 nouveau_bo_ref(NULL, &priv->bo);
207 dev_priv->fence.func = NULL; 205 drm->fence = NULL;
208 kfree(priv); 206 kfree(priv);
209} 207}
210 208
211int 209int
212nvc0_fence_create(struct drm_device *dev) 210nvc0_fence_create(struct nouveau_drm *drm)
213{ 211{
214 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 212 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
215 struct drm_nouveau_private *dev_priv = dev->dev_private;
216 struct nvc0_fence_priv *priv; 213 struct nvc0_fence_priv *priv;
217 int ret; 214 int ret;
218 215
219 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 216 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
220 if (!priv) 217 if (!priv)
221 return -ENOMEM; 218 return -ENOMEM;
222 219
@@ -228,10 +225,9 @@ nvc0_fence_create(struct drm_device *dev)
228 priv->base.emit = nvc0_fence_emit; 225 priv->base.emit = nvc0_fence_emit;
229 priv->base.sync = nvc0_fence_sync; 226 priv->base.sync = nvc0_fence_sync;
230 priv->base.read = nvc0_fence_read; 227 priv->base.read = nvc0_fence_read;
231 dev_priv->fence.func = priv;
232 228
233 ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM, 229 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
234 0, 0, NULL, &priv->bo); 230 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
235 if (ret == 0) { 231 if (ret == 0) {
236 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 232 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
237 if (ret == 0) 233 if (ret == 0)
@@ -241,6 +237,6 @@ nvc0_fence_create(struct drm_device *dev)
241 } 237 }
242 238
243 if (ret) 239 if (ret)
244 nvc0_fence_destroy(dev); 240 nvc0_fence_destroy(drm);
245 return ret; 241 return ret;
246} 242}
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
deleted file mode 100644
index eaaa5768f4f7..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_software.c
+++ /dev/null
@@ -1,109 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include <core/ramht.h>
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nvc0_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nvc0_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42nvc0_software_context_new(struct nouveau_channel *chan, int engine)
43{
44 struct nvc0_software_chan *pch;
45
46 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
47 if (!pch)
48 return -ENOMEM;
49
50 nouveau_software_context_new(chan, &pch->base);
51 chan->engctx[engine] = pch;
52 return 0;
53}
54
55static void
56nvc0_software_context_del(struct nouveau_channel *chan, int engine)
57{
58 struct nvc0_software_chan *pch = chan->engctx[engine];
59 chan->engctx[engine] = NULL;
60 kfree(pch);
61}
62
63static int
64nvc0_software_object_new(struct nouveau_channel *chan, int engine,
65 u32 handle, u16 class)
66{
67 return 0;
68}
69
70static int
71nvc0_software_init(struct drm_device *dev, int engine)
72{
73 return 0;
74}
75
76static int
77nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
78{
79 return 0;
80}
81
82static void
83nvc0_software_destroy(struct drm_device *dev, int engine)
84{
85 struct nvc0_software_priv *psw = nv_engine(dev, engine);
86
87 NVOBJ_ENGINE_DEL(dev, SW);
88 kfree(psw);
89}
90
91int
92nvc0_software_create(struct drm_device *dev)
93{
94 struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
95 if (!psw)
96 return -ENOMEM;
97
98 psw->base.base.destroy = nvc0_software_destroy;
99 psw->base.base.init = nvc0_software_init;
100 psw->base.base.fini = nvc0_software_fini;
101 psw->base.base.context_new = nvc0_software_context_new;
102 psw->base.base.context_del = nvc0_software_context_del;
103 psw->base.base.object_new = nvc0_software_object_new;
104 nouveau_software_create(&psw->base);
105
106 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
107 NVOBJ_CLASS(dev, 0x906e, SW);
108 return 0;
109}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 2da4927b5e06..37b3f3f071d9 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -31,7 +31,6 @@
31#include "nouveau_connector.h" 31#include "nouveau_connector.h"
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_dma.h"
35#include "nouveau_fb.h" 34#include "nouveau_fb.h"
36#include "nouveau_fence.h" 35#include "nouveau_fence.h"
37#include "nv50_display.h" 36#include "nv50_display.h"
@@ -1830,15 +1829,7 @@ nvd0_display_intr(struct drm_device *dev)
1830 intr &= ~0x00100000; 1829 intr &= ~0x00100000;
1831 } 1830 }
1832 1831
1833 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1832 intr &= ~0x0f000000; /* vblank, handled in core */
1834 u32 mask = 0x01000000 << i;
1835 if (intr & mask) {
1836 u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800));
1837 nv_wr32(dev, 0x6100bc + (i * 0x800), stat);
1838 intr &= ~mask;
1839 }
1840 }
1841
1842 if (intr) 1833 if (intr)
1843 NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr); 1834 NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
1844} 1835}