aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nv50_graph.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-03-31 19:50:18 -0400
committerBen Skeggs <bskeggs@redhat.com>2011-05-15 20:48:06 -0400
commit2703c21a82301f5c31ba5679e2d56422bd4cd404 (patch)
tree5c47247ff2ea9b103ecc7492bce19e2577b2e489 /drivers/gpu/drm/nouveau/nv50_graph.c
parent6dfdd7a61e8fc25552d9de1cb25272324dfc4c13 (diff)
drm/nv50/gr: move to exec engine interfaces
This needs a massive cleanup, but to catch bugs from the interface changes vs the engine code cleanup, this will be done later. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv50_graph.c')
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c426
1 files changed, 224 insertions, 202 deletions
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index eb83179ed74f..bffa486ec7ce 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -34,8 +34,92 @@
34#include "nouveau_ramht.h" 34#include "nouveau_ramht.h"
35#include "nv50_evo.h" 35#include "nv50_evo.h"
36 36
37static int nv50_graph_register(struct drm_device *); 37struct nv50_graph_engine {
38static void nv50_graph_isr(struct drm_device *); 38 struct nouveau_exec_engine base;
39 u32 ctxprog[512];
40 u32 ctxprog_size;
41 u32 grctx_size;
42};
43
44static void
45nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
46{
47 const uint32_t mask = 0x00010001;
48
49 if (enabled)
50 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
51 else
52 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
53}
54
55static struct nouveau_channel *
56nv50_graph_channel(struct drm_device *dev)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 uint32_t inst;
60 int i;
61
62 /* Be sure we're not in the middle of a context switch or bad things
63 * will happen, such as unloading the wrong pgraph context.
64 */
65 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
66 NV_ERROR(dev, "Ctxprog is still running\n");
67
68 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
69 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
70 return NULL;
71 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
72
73 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
74 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
75
76 if (chan && chan->ramin && chan->ramin->vinst == inst)
77 return chan;
78 }
79
80 return NULL;
81}
82
83static int
84nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
85{
86 uint32_t fifo = nv_rd32(dev, 0x400500);
87
88 nv_wr32(dev, 0x400500, fifo & ~1);
89 nv_wr32(dev, 0x400784, inst);
90 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
91 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
92 nv_wr32(dev, 0x400040, 0xffffffff);
93 (void)nv_rd32(dev, 0x400040);
94 nv_wr32(dev, 0x400040, 0x00000000);
95 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
96
97 if (nouveau_wait_for_idle(dev))
98 nv_wr32(dev, 0x40032c, inst | (1<<31));
99 nv_wr32(dev, 0x400500, fifo);
100
101 return 0;
102}
103
104static int
105nv50_graph_unload_context(struct drm_device *dev)
106{
107 uint32_t inst;
108
109 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
110 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
111 return 0;
112 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
113
114 nouveau_wait_for_idle(dev);
115 nv_wr32(dev, 0x400784, inst);
116 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
117 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
118 nouveau_wait_for_idle(dev);
119
120 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
121 return 0;
122}
39 123
40static void 124static void
41nv50_graph_init_reset(struct drm_device *dev) 125nv50_graph_init_reset(struct drm_device *dev)
@@ -53,7 +137,6 @@ nv50_graph_init_intr(struct drm_device *dev)
53{ 137{
54 NV_DEBUG(dev, "\n"); 138 NV_DEBUG(dev, "\n");
55 139
56 nouveau_irq_register(dev, 12, nv50_graph_isr);
57 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); 140 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
58 nv_wr32(dev, 0x400138, 0xffffffff); 141 nv_wr32(dev, 0x400138, 0xffffffff);
59 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); 142 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -136,34 +219,14 @@ nv50_graph_init_zcull(struct drm_device *dev)
136static int 219static int
137nv50_graph_init_ctxctl(struct drm_device *dev) 220nv50_graph_init_ctxctl(struct drm_device *dev)
138{ 221{
139 struct drm_nouveau_private *dev_priv = dev->dev_private; 222 struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
140 struct nouveau_grctx ctx = {};
141 uint32_t *cp;
142 int i; 223 int i;
143 224
144 NV_DEBUG(dev, "\n"); 225 NV_DEBUG(dev, "\n");
145 226
146 cp = kmalloc(512 * 4, GFP_KERNEL); 227 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
147 if (!cp) { 228 for (i = 0; i < pgraph->ctxprog_size; i++)
148 NV_ERROR(dev, "failed to allocate ctxprog\n"); 229 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
149 dev_priv->engine.graph.accel_blocked = true;
150 return 0;
151 }
152
153 ctx.dev = dev;
154 ctx.mode = NOUVEAU_GRCTX_PROG;
155 ctx.data = cp;
156 ctx.ctxprog_max = 512;
157 if (!nv50_grctx_init(&ctx)) {
158 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
159
160 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
161 for (i = 0; i < ctx.ctxprog_len; i++)
162 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
163 } else {
164 dev_priv->engine.graph.accel_blocked = true;
165 }
166 kfree(cp);
167 230
168 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 231 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
169 nv_wr32(dev, 0x400320, 4); 232 nv_wr32(dev, 0x400320, 4);
@@ -172,8 +235,8 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
172 return 0; 235 return 0;
173} 236}
174 237
175int 238static int
176nv50_graph_init(struct drm_device *dev) 239nv50_graph_init(struct drm_device *dev, int engine)
177{ 240{
178 int ret; 241 int ret;
179 242
@@ -187,105 +250,66 @@ nv50_graph_init(struct drm_device *dev)
187 if (ret) 250 if (ret)
188 return ret; 251 return ret;
189 252
190 ret = nv50_graph_register(dev);
191 if (ret)
192 return ret;
193 nv50_graph_init_intr(dev); 253 nv50_graph_init_intr(dev);
194 return 0; 254 return 0;
195} 255}
196 256
197void 257static int
198nv50_graph_takedown(struct drm_device *dev) 258nv50_graph_fini(struct drm_device *dev, int engine)
199{ 259{
200 NV_DEBUG(dev, "\n"); 260 NV_DEBUG(dev, "\n");
261 nv50_graph_unload_context(dev);
201 nv_wr32(dev, 0x40013c, 0x00000000); 262 nv_wr32(dev, 0x40013c, 0x00000000);
202 nouveau_irq_unregister(dev, 12); 263 return 0;
203}
204
205void
206nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
207{
208 const uint32_t mask = 0x00010001;
209
210 if (enabled)
211 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
212 else
213 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
214}
215
216struct nouveau_channel *
217nv50_graph_channel(struct drm_device *dev)
218{
219 struct drm_nouveau_private *dev_priv = dev->dev_private;
220 uint32_t inst;
221 int i;
222
223 /* Be sure we're not in the middle of a context switch or bad things
224 * will happen, such as unloading the wrong pgraph context.
225 */
226 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
227 NV_ERROR(dev, "Ctxprog is still running\n");
228
229 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
230 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
231 return NULL;
232 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
233
234 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
235 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
236
237 if (chan && chan->ramin && chan->ramin->vinst == inst)
238 return chan;
239 }
240
241 return NULL;
242} 264}
243 265
244int 266static int
245nv50_graph_create_context(struct nouveau_channel *chan) 267nv50_graph_context_new(struct nouveau_channel *chan, int engine)
246{ 268{
247 struct drm_device *dev = chan->dev; 269 struct drm_device *dev = chan->dev;
248 struct drm_nouveau_private *dev_priv = dev->dev_private; 270 struct drm_nouveau_private *dev_priv = dev->dev_private;
249 struct nouveau_gpuobj *ramin = chan->ramin; 271 struct nouveau_gpuobj *ramin = chan->ramin;
250 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 272 struct nouveau_gpuobj *grctx = NULL;
273 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
251 struct nouveau_grctx ctx = {}; 274 struct nouveau_grctx ctx = {};
252 int hdr, ret; 275 int hdr, ret;
253 276
254 NV_DEBUG(dev, "ch%d\n", chan->id); 277 NV_DEBUG(dev, "ch%d\n", chan->id);
255 278
256 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, 279 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
257 NVOBJ_FLAG_ZERO_ALLOC | 280 NVOBJ_FLAG_ZERO_ALLOC |
258 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 281 NVOBJ_FLAG_ZERO_FREE, &grctx);
259 if (ret) 282 if (ret)
260 return ret; 283 return ret;
261 284
262 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 285 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
263 nv_wo32(ramin, hdr + 0x00, 0x00190002); 286 nv_wo32(ramin, hdr + 0x00, 0x00190002);
264 nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst + 287 nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
265 pgraph->grctx_size - 1); 288 nv_wo32(ramin, hdr + 0x08, grctx->vinst);
266 nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
267 nv_wo32(ramin, hdr + 0x0c, 0); 289 nv_wo32(ramin, hdr + 0x0c, 0);
268 nv_wo32(ramin, hdr + 0x10, 0); 290 nv_wo32(ramin, hdr + 0x10, 0);
269 nv_wo32(ramin, hdr + 0x14, 0x00010000); 291 nv_wo32(ramin, hdr + 0x14, 0x00010000);
270 292
271 ctx.dev = chan->dev; 293 ctx.dev = chan->dev;
272 ctx.mode = NOUVEAU_GRCTX_VALS; 294 ctx.mode = NOUVEAU_GRCTX_VALS;
273 ctx.data = chan->ramin_grctx; 295 ctx.data = grctx;
274 nv50_grctx_init(&ctx); 296 nv50_grctx_init(&ctx);
275 297
276 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); 298 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
277 299
278 dev_priv->engine.instmem.flush(dev); 300 dev_priv->engine.instmem.flush(dev);
279 atomic_inc(&chan->vm->pgraph_refs); 301
302 atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
303 chan->engctx[NVOBJ_ENGINE_GR] = grctx;
280 return 0; 304 return 0;
281} 305}
282 306
283void 307static void
284nv50_graph_destroy_context(struct nouveau_channel *chan) 308nv50_graph_context_del(struct nouveau_channel *chan, int engine)
285{ 309{
310 struct nouveau_gpuobj *grctx = chan->engctx[engine];
286 struct drm_device *dev = chan->dev; 311 struct drm_device *dev = chan->dev;
287 struct drm_nouveau_private *dev_priv = dev->dev_private; 312 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
289 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 313 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
290 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 314 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
291 unsigned long flags; 315 unsigned long flags;
@@ -297,76 +321,28 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
297 321
298 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 322 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
299 pfifo->reassign(dev, false); 323 pfifo->reassign(dev, false);
300 pgraph->fifo_access(dev, false); 324 nv50_graph_fifo_access(dev, false);
301 325
302 if (pgraph->channel(dev) == chan) 326 if (nv50_graph_channel(dev) == chan)
303 pgraph->unload_context(dev); 327 nv50_graph_unload_context(dev);
304 328
305 for (i = hdr; i < hdr + 24; i += 4) 329 for (i = hdr; i < hdr + 24; i += 4)
306 nv_wo32(chan->ramin, i, 0); 330 nv_wo32(chan->ramin, i, 0);
307 dev_priv->engine.instmem.flush(dev); 331 dev_priv->engine.instmem.flush(dev);
308 332
309 pgraph->fifo_access(dev, true); 333 nv50_graph_fifo_access(dev, true);
310 pfifo->reassign(dev, true); 334 pfifo->reassign(dev, true);
311 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 335 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
312 336
313 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 337 nouveau_gpuobj_ref(NULL, &grctx);
314 338
315 atomic_dec(&chan->vm->pgraph_refs); 339 atomic_dec(&chan->vm->engref[engine]);
340 chan->engctx[engine] = NULL;
316} 341}
317 342
318static int 343static int
319nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) 344nv50_graph_object_new(struct nouveau_channel *chan, int engine,
320{ 345 u32 handle, u16 class)
321 uint32_t fifo = nv_rd32(dev, 0x400500);
322
323 nv_wr32(dev, 0x400500, fifo & ~1);
324 nv_wr32(dev, 0x400784, inst);
325 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
326 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
327 nv_wr32(dev, 0x400040, 0xffffffff);
328 (void)nv_rd32(dev, 0x400040);
329 nv_wr32(dev, 0x400040, 0x00000000);
330 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
331
332 if (nouveau_wait_for_idle(dev))
333 nv_wr32(dev, 0x40032c, inst | (1<<31));
334 nv_wr32(dev, 0x400500, fifo);
335
336 return 0;
337}
338
339int
340nv50_graph_load_context(struct nouveau_channel *chan)
341{
342 uint32_t inst = chan->ramin->vinst >> 12;
343
344 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
345 return nv50_graph_do_load_context(chan->dev, inst);
346}
347
348int
349nv50_graph_unload_context(struct drm_device *dev)
350{
351 uint32_t inst;
352
353 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
354 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
355 return 0;
356 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
357
358 nouveau_wait_for_idle(dev);
359 nv_wr32(dev, 0x400784, inst);
360 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
361 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
362 nouveau_wait_for_idle(dev);
363
364 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
365 return 0;
366}
367
368int
369nv50_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
370{ 346{
371 struct drm_device *dev = chan->dev; 347 struct drm_device *dev = chan->dev;
372 struct drm_nouveau_private *dev_priv = dev->dev_private; 348 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -468,68 +444,15 @@ nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
468 return 0; 444 return 0;
469} 445}
470 446
471static int
472nv50_graph_register(struct drm_device *dev)
473{
474 struct drm_nouveau_private *dev_priv = dev->dev_private;
475
476 if (dev_priv->engine.graph.registered)
477 return 0;
478
479 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
480 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
481 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
482 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
483 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
484 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
485
486 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
487 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
488 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
489
490 /* tesla */
491 if (dev_priv->chipset == 0x50)
492 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
493 else
494 if (dev_priv->chipset < 0xa0)
495 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
496 else {
497 switch (dev_priv->chipset) {
498 case 0xa0:
499 case 0xaa:
500 case 0xac:
501 NVOBJ_CLASS(dev, 0x8397, GR);
502 break;
503 case 0xa3:
504 case 0xa5:
505 case 0xa8:
506 NVOBJ_CLASS(dev, 0x8597, GR);
507 break;
508 case 0xaf:
509 NVOBJ_CLASS(dev, 0x8697, GR);
510 break;
511 }
512 }
513
514 /* compute */
515 NVOBJ_CLASS(dev, 0x50c0, GR);
516 if (dev_priv->chipset > 0xa0 &&
517 dev_priv->chipset != 0xaa &&
518 dev_priv->chipset != 0xac)
519 NVOBJ_CLASS(dev, 0x85c0, GR);
520
521 dev_priv->engine.graph.registered = true;
522 return 0;
523}
524 447
525void 448static void
526nv50_graph_tlb_flush(struct drm_device *dev) 449nv50_graph_tlb_flush(struct drm_device *dev, int engine)
527{ 450{
528 nv50_vm_flush_engine(dev, 0); 451 nv50_vm_flush_engine(dev, 0);
529} 452}
530 453
531void 454static void
532nv84_graph_tlb_flush(struct drm_device *dev) 455nv84_graph_tlb_flush(struct drm_device *dev, int engine)
533{ 456{
534 struct drm_nouveau_private *dev_priv = dev->dev_private; 457 struct drm_nouveau_private *dev_priv = dev->dev_private;
535 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 458 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1098,3 +1021,102 @@ nv50_graph_isr(struct drm_device *dev)
1098 if (nv_rd32(dev, 0x400824) & (1 << 31)) 1021 if (nv_rd32(dev, 0x400824) & (1 << 31))
1099 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 1022 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1100} 1023}
1024
1025static void
1026nv50_graph_destroy(struct drm_device *dev, int engine)
1027{
1028 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
1029
1030 NVOBJ_ENGINE_DEL(dev, GR);
1031
1032 nouveau_irq_unregister(dev, 12);
1033 kfree(pgraph);
1034}
1035
1036int
1037nv50_graph_create(struct drm_device *dev)
1038{
1039 struct drm_nouveau_private *dev_priv = dev->dev_private;
1040 struct nv50_graph_engine *pgraph;
1041 struct nouveau_grctx ctx = {};
1042 int ret;
1043
1044 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
1045 if (!pgraph)
1046 return -ENOMEM;
1047
1048 ctx.dev = dev;
1049 ctx.mode = NOUVEAU_GRCTX_PROG;
1050 ctx.data = pgraph->ctxprog;
1051 ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
1052
1053 ret = nv50_grctx_init(&ctx);
1054 if (ret) {
1055 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
1056 dev_priv->engine.graph.accel_blocked = true;
1057 kfree(pgraph);
1058 return 0;
1059 }
1060
1061 pgraph->grctx_size = ctx.ctxvals_pos * 4;
1062 pgraph->ctxprog_size = ctx.ctxprog_len;
1063
1064 pgraph->base.destroy = nv50_graph_destroy;
1065 pgraph->base.init = nv50_graph_init;
1066 pgraph->base.fini = nv50_graph_fini;
1067 pgraph->base.context_new = nv50_graph_context_new;
1068 pgraph->base.context_del = nv50_graph_context_del;
1069 pgraph->base.object_new = nv50_graph_object_new;
1070 if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
1071 pgraph->base.tlb_flush = nv50_graph_tlb_flush;
1072 else
1073 pgraph->base.tlb_flush = nv84_graph_tlb_flush;
1074
1075 nouveau_irq_register(dev, 12, nv50_graph_isr);
1076
1077 /* NVSW really doesn't live here... */
1078 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1079 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
1080 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
1081 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
1082 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
1083 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
1084
1085 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1086 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1087 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
1088 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
1089
1090 /* tesla */
1091 if (dev_priv->chipset == 0x50)
1092 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
1093 else
1094 if (dev_priv->chipset < 0xa0)
1095 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
1096 else {
1097 switch (dev_priv->chipset) {
1098 case 0xa0:
1099 case 0xaa:
1100 case 0xac:
1101 NVOBJ_CLASS(dev, 0x8397, GR);
1102 break;
1103 case 0xa3:
1104 case 0xa5:
1105 case 0xa8:
1106 NVOBJ_CLASS(dev, 0x8597, GR);
1107 break;
1108 case 0xaf:
1109 NVOBJ_CLASS(dev, 0x8697, GR);
1110 break;
1111 }
1112 }
1113
1114 /* compute */
1115 NVOBJ_CLASS(dev, 0x50c0, GR);
1116 if (dev_priv->chipset > 0xa0 &&
1117 dev_priv->chipset != 0xaa &&
1118 dev_priv->chipset != 0xac)
1119 NVOBJ_CLASS(dev, 0x85c0, GR);
1120
1121 return 0;
1122}