aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nv50_graph.c
diff options
context:
space:
mode:
authorJoel Becker <jlbec@evilplan.org>2011-05-26 00:51:55 -0400
committerJoel Becker <jlbec@evilplan.org>2011-05-26 00:51:55 -0400
commitece928df16494becd43f999aff9bd530182e7e81 (patch)
tree905042764ea5d8ab6eda63666406e19f607bcf4c /drivers/gpu/drm/nouveau/nv50_graph.c
parent3d1c1829ebe7e8bb48a997b39b4865abc9197e5e (diff)
parentdda54e76d7dba0532ebdd72e0b4f492a03f83225 (diff)
Merge branch 'move_extents' of git://oss.oracle.com/git/tye/linux-2.6 into ocfs2-merge-window
Conflicts: fs/ocfs2/ioctl.c
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv50_graph.c')
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c442
1 files changed, 244 insertions, 198 deletions
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index b02a5b1e7d37..e25cbb46789a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -31,10 +31,95 @@
31#include "nouveau_grctx.h" 31#include "nouveau_grctx.h"
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33#include "nouveau_vm.h" 33#include "nouveau_vm.h"
34#include "nouveau_ramht.h"
34#include "nv50_evo.h" 35#include "nv50_evo.h"
35 36
36static int nv50_graph_register(struct drm_device *); 37struct nv50_graph_engine {
37static void nv50_graph_isr(struct drm_device *); 38 struct nouveau_exec_engine base;
39 u32 ctxprog[512];
40 u32 ctxprog_size;
41 u32 grctx_size;
42};
43
44static void
45nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
46{
47 const uint32_t mask = 0x00010001;
48
49 if (enabled)
50 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
51 else
52 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
53}
54
55static struct nouveau_channel *
56nv50_graph_channel(struct drm_device *dev)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 uint32_t inst;
60 int i;
61
62 /* Be sure we're not in the middle of a context switch or bad things
63 * will happen, such as unloading the wrong pgraph context.
64 */
65 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
66 NV_ERROR(dev, "Ctxprog is still running\n");
67
68 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
69 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
70 return NULL;
71 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
72
73 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
74 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
75
76 if (chan && chan->ramin && chan->ramin->vinst == inst)
77 return chan;
78 }
79
80 return NULL;
81}
82
83static int
84nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
85{
86 uint32_t fifo = nv_rd32(dev, 0x400500);
87
88 nv_wr32(dev, 0x400500, fifo & ~1);
89 nv_wr32(dev, 0x400784, inst);
90 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
91 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
92 nv_wr32(dev, 0x400040, 0xffffffff);
93 (void)nv_rd32(dev, 0x400040);
94 nv_wr32(dev, 0x400040, 0x00000000);
95 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
96
97 if (nouveau_wait_for_idle(dev))
98 nv_wr32(dev, 0x40032c, inst | (1<<31));
99 nv_wr32(dev, 0x400500, fifo);
100
101 return 0;
102}
103
104static int
105nv50_graph_unload_context(struct drm_device *dev)
106{
107 uint32_t inst;
108
109 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
110 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
111 return 0;
112 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
113
114 nouveau_wait_for_idle(dev);
115 nv_wr32(dev, 0x400784, inst);
116 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
117 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
118 nouveau_wait_for_idle(dev);
119
120 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
121 return 0;
122}
38 123
39static void 124static void
40nv50_graph_init_reset(struct drm_device *dev) 125nv50_graph_init_reset(struct drm_device *dev)
@@ -52,7 +137,6 @@ nv50_graph_init_intr(struct drm_device *dev)
52{ 137{
53 NV_DEBUG(dev, "\n"); 138 NV_DEBUG(dev, "\n");
54 139
55 nouveau_irq_register(dev, 12, nv50_graph_isr);
56 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); 140 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
57 nv_wr32(dev, 0x400138, 0xffffffff); 141 nv_wr32(dev, 0x400138, 0xffffffff);
58 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); 142 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -135,34 +219,14 @@ nv50_graph_init_zcull(struct drm_device *dev)
135static int 219static int
136nv50_graph_init_ctxctl(struct drm_device *dev) 220nv50_graph_init_ctxctl(struct drm_device *dev)
137{ 221{
138 struct drm_nouveau_private *dev_priv = dev->dev_private; 222 struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
139 struct nouveau_grctx ctx = {};
140 uint32_t *cp;
141 int i; 223 int i;
142 224
143 NV_DEBUG(dev, "\n"); 225 NV_DEBUG(dev, "\n");
144 226
145 cp = kmalloc(512 * 4, GFP_KERNEL); 227 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
146 if (!cp) { 228 for (i = 0; i < pgraph->ctxprog_size; i++)
147 NV_ERROR(dev, "failed to allocate ctxprog\n"); 229 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
148 dev_priv->engine.graph.accel_blocked = true;
149 return 0;
150 }
151
152 ctx.dev = dev;
153 ctx.mode = NOUVEAU_GRCTX_PROG;
154 ctx.data = cp;
155 ctx.ctxprog_max = 512;
156 if (!nv50_grctx_init(&ctx)) {
157 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
158
159 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
160 for (i = 0; i < ctx.ctxprog_len; i++)
161 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
162 } else {
163 dev_priv->engine.graph.accel_blocked = true;
164 }
165 kfree(cp);
166 230
167 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 231 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
168 nv_wr32(dev, 0x400320, 4); 232 nv_wr32(dev, 0x400320, 4);
@@ -171,8 +235,8 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
171 return 0; 235 return 0;
172} 236}
173 237
174int 238static int
175nv50_graph_init(struct drm_device *dev) 239nv50_graph_init(struct drm_device *dev, int engine)
176{ 240{
177 int ret; 241 int ret;
178 242
@@ -186,105 +250,66 @@ nv50_graph_init(struct drm_device *dev)
186 if (ret) 250 if (ret)
187 return ret; 251 return ret;
188 252
189 ret = nv50_graph_register(dev);
190 if (ret)
191 return ret;
192 nv50_graph_init_intr(dev); 253 nv50_graph_init_intr(dev);
193 return 0; 254 return 0;
194} 255}
195 256
196void 257static int
197nv50_graph_takedown(struct drm_device *dev) 258nv50_graph_fini(struct drm_device *dev, int engine)
198{ 259{
199 NV_DEBUG(dev, "\n"); 260 NV_DEBUG(dev, "\n");
261 nv50_graph_unload_context(dev);
200 nv_wr32(dev, 0x40013c, 0x00000000); 262 nv_wr32(dev, 0x40013c, 0x00000000);
201 nouveau_irq_unregister(dev, 12); 263 return 0;
202}
203
204void
205nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
206{
207 const uint32_t mask = 0x00010001;
208
209 if (enabled)
210 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
211 else
212 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
213}
214
215struct nouveau_channel *
216nv50_graph_channel(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 uint32_t inst;
220 int i;
221
222 /* Be sure we're not in the middle of a context switch or bad things
223 * will happen, such as unloading the wrong pgraph context.
224 */
225 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
226 NV_ERROR(dev, "Ctxprog is still running\n");
227
228 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
229 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
230 return NULL;
231 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
232
233 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
234 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
235
236 if (chan && chan->ramin && chan->ramin->vinst == inst)
237 return chan;
238 }
239
240 return NULL;
241} 264}
242 265
243int 266static int
244nv50_graph_create_context(struct nouveau_channel *chan) 267nv50_graph_context_new(struct nouveau_channel *chan, int engine)
245{ 268{
246 struct drm_device *dev = chan->dev; 269 struct drm_device *dev = chan->dev;
247 struct drm_nouveau_private *dev_priv = dev->dev_private; 270 struct drm_nouveau_private *dev_priv = dev->dev_private;
248 struct nouveau_gpuobj *ramin = chan->ramin; 271 struct nouveau_gpuobj *ramin = chan->ramin;
249 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 272 struct nouveau_gpuobj *grctx = NULL;
273 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
250 struct nouveau_grctx ctx = {}; 274 struct nouveau_grctx ctx = {};
251 int hdr, ret; 275 int hdr, ret;
252 276
253 NV_DEBUG(dev, "ch%d\n", chan->id); 277 NV_DEBUG(dev, "ch%d\n", chan->id);
254 278
255 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, 279 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
256 NVOBJ_FLAG_ZERO_ALLOC | 280 NVOBJ_FLAG_ZERO_ALLOC |
257 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 281 NVOBJ_FLAG_ZERO_FREE, &grctx);
258 if (ret) 282 if (ret)
259 return ret; 283 return ret;
260 284
261 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 285 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
262 nv_wo32(ramin, hdr + 0x00, 0x00190002); 286 nv_wo32(ramin, hdr + 0x00, 0x00190002);
263 nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst + 287 nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
264 pgraph->grctx_size - 1); 288 nv_wo32(ramin, hdr + 0x08, grctx->vinst);
265 nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
266 nv_wo32(ramin, hdr + 0x0c, 0); 289 nv_wo32(ramin, hdr + 0x0c, 0);
267 nv_wo32(ramin, hdr + 0x10, 0); 290 nv_wo32(ramin, hdr + 0x10, 0);
268 nv_wo32(ramin, hdr + 0x14, 0x00010000); 291 nv_wo32(ramin, hdr + 0x14, 0x00010000);
269 292
270 ctx.dev = chan->dev; 293 ctx.dev = chan->dev;
271 ctx.mode = NOUVEAU_GRCTX_VALS; 294 ctx.mode = NOUVEAU_GRCTX_VALS;
272 ctx.data = chan->ramin_grctx; 295 ctx.data = grctx;
273 nv50_grctx_init(&ctx); 296 nv50_grctx_init(&ctx);
274 297
275 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); 298 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
276 299
277 dev_priv->engine.instmem.flush(dev); 300 dev_priv->engine.instmem.flush(dev);
278 atomic_inc(&chan->vm->pgraph_refs); 301
302 atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
303 chan->engctx[NVOBJ_ENGINE_GR] = grctx;
279 return 0; 304 return 0;
280} 305}
281 306
282void 307static void
283nv50_graph_destroy_context(struct nouveau_channel *chan) 308nv50_graph_context_del(struct nouveau_channel *chan, int engine)
284{ 309{
310 struct nouveau_gpuobj *grctx = chan->engctx[engine];
285 struct drm_device *dev = chan->dev; 311 struct drm_device *dev = chan->dev;
286 struct drm_nouveau_private *dev_priv = dev->dev_private; 312 struct drm_nouveau_private *dev_priv = dev->dev_private;
287 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
288 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 313 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
289 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 314 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
290 unsigned long flags; 315 unsigned long flags;
@@ -296,72 +321,49 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
296 321
297 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 322 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
298 pfifo->reassign(dev, false); 323 pfifo->reassign(dev, false);
299 pgraph->fifo_access(dev, false); 324 nv50_graph_fifo_access(dev, false);
300 325
301 if (pgraph->channel(dev) == chan) 326 if (nv50_graph_channel(dev) == chan)
302 pgraph->unload_context(dev); 327 nv50_graph_unload_context(dev);
303 328
304 for (i = hdr; i < hdr + 24; i += 4) 329 for (i = hdr; i < hdr + 24; i += 4)
305 nv_wo32(chan->ramin, i, 0); 330 nv_wo32(chan->ramin, i, 0);
306 dev_priv->engine.instmem.flush(dev); 331 dev_priv->engine.instmem.flush(dev);
307 332
308 pgraph->fifo_access(dev, true); 333 nv50_graph_fifo_access(dev, true);
309 pfifo->reassign(dev, true); 334 pfifo->reassign(dev, true);
310 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 335 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
311 336
312 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 337 nouveau_gpuobj_ref(NULL, &grctx);
313 338
314 atomic_dec(&chan->vm->pgraph_refs); 339 atomic_dec(&chan->vm->engref[engine]);
340 chan->engctx[engine] = NULL;
315} 341}
316 342
317static int 343static int
318nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) 344nv50_graph_object_new(struct nouveau_channel *chan, int engine,
319{ 345 u32 handle, u16 class)
320 uint32_t fifo = nv_rd32(dev, 0x400500);
321
322 nv_wr32(dev, 0x400500, fifo & ~1);
323 nv_wr32(dev, 0x400784, inst);
324 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
325 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
326 nv_wr32(dev, 0x400040, 0xffffffff);
327 (void)nv_rd32(dev, 0x400040);
328 nv_wr32(dev, 0x400040, 0x00000000);
329 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
330
331 if (nouveau_wait_for_idle(dev))
332 nv_wr32(dev, 0x40032c, inst | (1<<31));
333 nv_wr32(dev, 0x400500, fifo);
334
335 return 0;
336}
337
338int
339nv50_graph_load_context(struct nouveau_channel *chan)
340{
341 uint32_t inst = chan->ramin->vinst >> 12;
342
343 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
344 return nv50_graph_do_load_context(chan->dev, inst);
345}
346
347int
348nv50_graph_unload_context(struct drm_device *dev)
349{ 346{
350 uint32_t inst; 347 struct drm_device *dev = chan->dev;
348 struct drm_nouveau_private *dev_priv = dev->dev_private;
349 struct nouveau_gpuobj *obj = NULL;
350 int ret;
351 351
352 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 352 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
353 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 353 if (ret)
354 return 0; 354 return ret;
355 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 355 obj->engine = 1;
356 obj->class = class;
356 357
357 nouveau_wait_for_idle(dev); 358 nv_wo32(obj, 0x00, class);
358 nv_wr32(dev, 0x400784, inst); 359 nv_wo32(obj, 0x04, 0x00000000);
359 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 360 nv_wo32(obj, 0x08, 0x00000000);
360 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 361 nv_wo32(obj, 0x0c, 0x00000000);
361 nouveau_wait_for_idle(dev); 362 dev_priv->engine.instmem.flush(dev);
362 363
363 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 364 ret = nouveau_ramht_insert(chan, handle, obj);
364 return 0; 365 nouveau_gpuobj_ref(NULL, &obj);
366 return ret;
365} 367}
366 368
367static void 369static void
@@ -442,68 +444,15 @@ nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
442 return 0; 444 return 0;
443} 445}
444 446
445static int
446nv50_graph_register(struct drm_device *dev)
447{
448 struct drm_nouveau_private *dev_priv = dev->dev_private;
449
450 if (dev_priv->engine.graph.registered)
451 return 0;
452
453 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
454 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
455 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
456 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
457 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
458 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
459
460 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
461 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
462 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
463
464 /* tesla */
465 if (dev_priv->chipset == 0x50)
466 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
467 else
468 if (dev_priv->chipset < 0xa0)
469 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
470 else {
471 switch (dev_priv->chipset) {
472 case 0xa0:
473 case 0xaa:
474 case 0xac:
475 NVOBJ_CLASS(dev, 0x8397, GR);
476 break;
477 case 0xa3:
478 case 0xa5:
479 case 0xa8:
480 NVOBJ_CLASS(dev, 0x8597, GR);
481 break;
482 case 0xaf:
483 NVOBJ_CLASS(dev, 0x8697, GR);
484 break;
485 }
486 }
487
488 /* compute */
489 NVOBJ_CLASS(dev, 0x50c0, GR);
490 if (dev_priv->chipset > 0xa0 &&
491 dev_priv->chipset != 0xaa &&
492 dev_priv->chipset != 0xac)
493 NVOBJ_CLASS(dev, 0x85c0, GR);
494
495 dev_priv->engine.graph.registered = true;
496 return 0;
497}
498 447
499void 448static void
500nv50_graph_tlb_flush(struct drm_device *dev) 449nv50_graph_tlb_flush(struct drm_device *dev, int engine)
501{ 450{
502 nv50_vm_flush_engine(dev, 0); 451 nv50_vm_flush_engine(dev, 0);
503} 452}
504 453
505void 454static void
506nv84_graph_tlb_flush(struct drm_device *dev) 455nv84_graph_tlb_flush(struct drm_device *dev, int engine)
507{ 456{
508 struct drm_nouveau_private *dev_priv = dev->dev_private; 457 struct drm_nouveau_private *dev_priv = dev->dev_private;
509 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 458 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -548,8 +497,7 @@ nv84_graph_tlb_flush(struct drm_device *dev)
548 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 497 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
549} 498}
550 499
551static struct nouveau_enum nv50_mp_exec_error_names[] = 500static struct nouveau_enum nv50_mp_exec_error_names[] = {
552{
553 { 3, "STACK_UNDERFLOW", NULL }, 501 { 3, "STACK_UNDERFLOW", NULL },
554 { 4, "QUADON_ACTIVE", NULL }, 502 { 4, "QUADON_ACTIVE", NULL },
555 { 8, "TIMEOUT", NULL }, 503 { 8, "TIMEOUT", NULL },
@@ -663,7 +611,7 @@ nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
663 nv_rd32(dev, addr + 0x20); 611 nv_rd32(dev, addr + 0x20);
664 pc = nv_rd32(dev, addr + 0x24); 612 pc = nv_rd32(dev, addr + 0x24);
665 oplow = nv_rd32(dev, addr + 0x70); 613 oplow = nv_rd32(dev, addr + 0x70);
666 ophigh= nv_rd32(dev, addr + 0x74); 614 ophigh = nv_rd32(dev, addr + 0x74);
667 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " 615 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
668 "TP %d MP %d: ", tpid, i); 616 "TP %d MP %d: ", tpid, i);
669 nouveau_enum_print(nv50_mp_exec_error_names, status); 617 nouveau_enum_print(nv50_mp_exec_error_names, status);
@@ -991,7 +939,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
991 return 1; 939 return 1;
992} 940}
993 941
994static int 942int
995nv50_graph_isr_chid(struct drm_device *dev, u64 inst) 943nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
996{ 944{
997 struct drm_nouveau_private *dev_priv = dev->dev_private; 945 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -1073,3 +1021,101 @@ nv50_graph_isr(struct drm_device *dev)
1073 if (nv_rd32(dev, 0x400824) & (1 << 31)) 1021 if (nv_rd32(dev, 0x400824) & (1 << 31))
1074 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 1022 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1075} 1023}
1024
1025static void
1026nv50_graph_destroy(struct drm_device *dev, int engine)
1027{
1028 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
1029
1030 NVOBJ_ENGINE_DEL(dev, GR);
1031
1032 nouveau_irq_unregister(dev, 12);
1033 kfree(pgraph);
1034}
1035
1036int
1037nv50_graph_create(struct drm_device *dev)
1038{
1039 struct drm_nouveau_private *dev_priv = dev->dev_private;
1040 struct nv50_graph_engine *pgraph;
1041 struct nouveau_grctx ctx = {};
1042 int ret;
1043
1044 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
1045 if (!pgraph)
1046 return -ENOMEM;
1047
1048 ctx.dev = dev;
1049 ctx.mode = NOUVEAU_GRCTX_PROG;
1050 ctx.data = pgraph->ctxprog;
1051 ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
1052
1053 ret = nv50_grctx_init(&ctx);
1054 if (ret) {
1055 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
1056 kfree(pgraph);
1057 return 0;
1058 }
1059
1060 pgraph->grctx_size = ctx.ctxvals_pos * 4;
1061 pgraph->ctxprog_size = ctx.ctxprog_len;
1062
1063 pgraph->base.destroy = nv50_graph_destroy;
1064 pgraph->base.init = nv50_graph_init;
1065 pgraph->base.fini = nv50_graph_fini;
1066 pgraph->base.context_new = nv50_graph_context_new;
1067 pgraph->base.context_del = nv50_graph_context_del;
1068 pgraph->base.object_new = nv50_graph_object_new;
1069 if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
1070 pgraph->base.tlb_flush = nv50_graph_tlb_flush;
1071 else
1072 pgraph->base.tlb_flush = nv84_graph_tlb_flush;
1073
1074 nouveau_irq_register(dev, 12, nv50_graph_isr);
1075
1076 /* NVSW really doesn't live here... */
1077 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1078 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
1079 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
1080 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
1081 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
1082 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
1083
1084 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1085 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1086 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
1087 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
1088
1089 /* tesla */
1090 if (dev_priv->chipset == 0x50)
1091 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
1092 else
1093 if (dev_priv->chipset < 0xa0)
1094 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
1095 else {
1096 switch (dev_priv->chipset) {
1097 case 0xa0:
1098 case 0xaa:
1099 case 0xac:
1100 NVOBJ_CLASS(dev, 0x8397, GR);
1101 break;
1102 case 0xa3:
1103 case 0xa5:
1104 case 0xa8:
1105 NVOBJ_CLASS(dev, 0x8597, GR);
1106 break;
1107 case 0xaf:
1108 NVOBJ_CLASS(dev, 0x8697, GR);
1109 break;
1110 }
1111 }
1112
1113 /* compute */
1114 NVOBJ_CLASS(dev, 0x50c0, GR);
1115 if (dev_priv->chipset > 0xa0 &&
1116 dev_priv->chipset != 0xaa &&
1117 dev_priv->chipset != 0xac)
1118 NVOBJ_CLASS(dev, 0x85c0, GR);
1119
1120 return 0;
1121}