diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv40_graph.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_graph.c | 323 |
1 files changed, 173 insertions, 150 deletions
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index fceb44c0ec74..5beb01b8ace1 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -28,14 +28,18 @@ | |||
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_grctx.h" | 30 | #include "nouveau_grctx.h" |
31 | #include "nouveau_ramht.h" | ||
31 | 32 | ||
32 | static int nv40_graph_register(struct drm_device *); | 33 | struct nv40_graph_engine { |
33 | static void nv40_graph_isr(struct drm_device *); | 34 | struct nouveau_exec_engine base; |
35 | u32 grctx_size; | ||
36 | }; | ||
34 | 37 | ||
35 | struct nouveau_channel * | 38 | static struct nouveau_channel * |
36 | nv40_graph_channel(struct drm_device *dev) | 39 | nv40_graph_channel(struct drm_device *dev) |
37 | { | 40 | { |
38 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 41 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
42 | struct nouveau_gpuobj *grctx; | ||
39 | uint32_t inst; | 43 | uint32_t inst; |
40 | int i; | 44 | int i; |
41 | 45 | ||
@@ -45,74 +49,17 @@ nv40_graph_channel(struct drm_device *dev) | |||
45 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; | 49 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; |
46 | 50 | ||
47 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 51 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
48 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | 52 | if (!dev_priv->channels.ptr[i]) |
53 | continue; | ||
49 | 54 | ||
50 | if (chan && chan->ramin_grctx && | 55 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; |
51 | chan->ramin_grctx->pinst == inst) | 56 | if (grctx && grctx->pinst == inst) |
52 | return chan; | 57 | return dev_priv->channels.ptr[i]; |
53 | } | 58 | } |
54 | 59 | ||
55 | return NULL; | 60 | return NULL; |
56 | } | 61 | } |
57 | 62 | ||
58 | int | ||
59 | nv40_graph_create_context(struct nouveau_channel *chan) | ||
60 | { | ||
61 | struct drm_device *dev = chan->dev; | ||
62 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
63 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
64 | struct nouveau_grctx ctx = {}; | ||
65 | unsigned long flags; | ||
66 | int ret; | ||
67 | |||
68 | ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, | ||
69 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); | ||
70 | if (ret) | ||
71 | return ret; | ||
72 | |||
73 | /* Initialise default context values */ | ||
74 | ctx.dev = chan->dev; | ||
75 | ctx.mode = NOUVEAU_GRCTX_VALS; | ||
76 | ctx.data = chan->ramin_grctx; | ||
77 | nv40_grctx_init(&ctx); | ||
78 | |||
79 | nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst); | ||
80 | |||
81 | /* init grctx pointer in ramfc, and on PFIFO if channel is | ||
82 | * already active there | ||
83 | */ | ||
84 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
85 | nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4); | ||
86 | nv_mask(dev, 0x002500, 0x00000001, 0x00000000); | ||
87 | if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) | ||
88 | nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4); | ||
89 | nv_mask(dev, 0x002500, 0x00000001, 0x00000001); | ||
90 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | void | ||
95 | nv40_graph_destroy_context(struct nouveau_channel *chan) | ||
96 | { | ||
97 | struct drm_device *dev = chan->dev; | ||
98 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
99 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
100 | unsigned long flags; | ||
101 | |||
102 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
103 | pgraph->fifo_access(dev, false); | ||
104 | |||
105 | /* Unload the context if it's the currently active one */ | ||
106 | if (pgraph->channel(dev) == chan) | ||
107 | pgraph->unload_context(dev); | ||
108 | |||
109 | pgraph->fifo_access(dev, true); | ||
110 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
111 | |||
112 | /* Free the context resources */ | ||
113 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); | ||
114 | } | ||
115 | |||
116 | static int | 63 | static int |
117 | nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) | 64 | nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) |
118 | { | 65 | { |
@@ -154,57 +101,115 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) | |||
154 | return 0; | 101 | return 0; |
155 | } | 102 | } |
156 | 103 | ||
157 | /* Restore the context for a specific channel into PGRAPH */ | 104 | static int |
158 | int | 105 | nv40_graph_unload_context(struct drm_device *dev) |
159 | nv40_graph_load_context(struct nouveau_channel *chan) | ||
160 | { | 106 | { |
161 | struct drm_device *dev = chan->dev; | ||
162 | uint32_t inst; | 107 | uint32_t inst; |
163 | int ret; | 108 | int ret; |
164 | 109 | ||
165 | if (!chan->ramin_grctx) | 110 | inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); |
166 | return -EINVAL; | 111 | if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) |
167 | inst = chan->ramin_grctx->pinst >> 4; | 112 | return 0; |
113 | inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; | ||
114 | |||
115 | ret = nv40_graph_transfer_context(dev, inst, 1); | ||
116 | |||
117 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | static int | ||
122 | nv40_graph_context_new(struct nouveau_channel *chan, int engine) | ||
123 | { | ||
124 | struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine); | ||
125 | struct drm_device *dev = chan->dev; | ||
126 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
127 | struct nouveau_gpuobj *grctx = NULL; | ||
128 | struct nouveau_grctx ctx = {}; | ||
129 | unsigned long flags; | ||
130 | int ret; | ||
168 | 131 | ||
169 | ret = nv40_graph_transfer_context(dev, inst, 0); | 132 | ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, |
133 | NVOBJ_FLAG_ZERO_ALLOC, &grctx); | ||
170 | if (ret) | 134 | if (ret) |
171 | return ret; | 135 | return ret; |
172 | 136 | ||
173 | /* 0x40032C, no idea of it's exact function. Could simply be a | 137 | /* Initialise default context values */ |
174 | * record of the currently active PGRAPH context. It's currently | 138 | ctx.dev = chan->dev; |
175 | * unknown as to what bit 24 does. The nv ddx has it set, so we will | 139 | ctx.mode = NOUVEAU_GRCTX_VALS; |
176 | * set it here too. | 140 | ctx.data = grctx; |
177 | */ | 141 | nv40_grctx_init(&ctx); |
178 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); | 142 | |
179 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, | 143 | nv_wo32(grctx, 0, grctx->vinst); |
180 | (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) | | 144 | |
181 | NV40_PGRAPH_CTXCTL_CUR_LOADED); | 145 | /* init grctx pointer in ramfc, and on PFIFO if channel is |
182 | /* 0x32E0 records the instance address of the active FIFO's PGRAPH | 146 | * already active there |
183 | * context. If at any time this doesn't match 0x40032C, you will | ||
184 | * receive PGRAPH_INTR_CONTEXT_SWITCH | ||
185 | */ | 147 | */ |
186 | nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst); | 148 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
149 | nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4); | ||
150 | nv_mask(dev, 0x002500, 0x00000001, 0x00000000); | ||
151 | if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) | ||
152 | nv_wr32(dev, 0x0032e0, grctx->vinst >> 4); | ||
153 | nv_mask(dev, 0x002500, 0x00000001, 0x00000001); | ||
154 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
155 | |||
156 | chan->engctx[engine] = grctx; | ||
187 | return 0; | 157 | return 0; |
188 | } | 158 | } |
189 | 159 | ||
190 | int | 160 | static void |
191 | nv40_graph_unload_context(struct drm_device *dev) | 161 | nv40_graph_context_del(struct nouveau_channel *chan, int engine) |
192 | { | 162 | { |
193 | uint32_t inst; | 163 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; |
194 | int ret; | 164 | struct drm_device *dev = chan->dev; |
165 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
166 | unsigned long flags; | ||
195 | 167 | ||
196 | inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); | 168 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
197 | if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) | 169 | nv04_graph_fifo_access(dev, false); |
198 | return 0; | ||
199 | inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; | ||
200 | 170 | ||
201 | ret = nv40_graph_transfer_context(dev, inst, 1); | 171 | /* Unload the context if it's the currently active one */ |
172 | if (nv40_graph_channel(dev) == chan) | ||
173 | nv40_graph_unload_context(dev); | ||
202 | 174 | ||
203 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); | 175 | nv04_graph_fifo_access(dev, true); |
176 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
177 | |||
178 | /* Free the context resources */ | ||
179 | nouveau_gpuobj_ref(NULL, &grctx); | ||
180 | chan->engctx[engine] = NULL; | ||
181 | } | ||
182 | |||
183 | int | ||
184 | nv40_graph_object_new(struct nouveau_channel *chan, int engine, | ||
185 | u32 handle, u16 class) | ||
186 | { | ||
187 | struct drm_device *dev = chan->dev; | ||
188 | struct nouveau_gpuobj *obj = NULL; | ||
189 | int ret; | ||
190 | |||
191 | ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj); | ||
192 | if (ret) | ||
193 | return ret; | ||
194 | obj->engine = 1; | ||
195 | obj->class = class; | ||
196 | |||
197 | nv_wo32(obj, 0x00, class); | ||
198 | nv_wo32(obj, 0x04, 0x00000000); | ||
199 | #ifndef __BIG_ENDIAN | ||
200 | nv_wo32(obj, 0x08, 0x00000000); | ||
201 | #else | ||
202 | nv_wo32(obj, 0x08, 0x01000000); | ||
203 | #endif | ||
204 | nv_wo32(obj, 0x0c, 0x00000000); | ||
205 | nv_wo32(obj, 0x10, 0x00000000); | ||
206 | |||
207 | ret = nouveau_ramht_insert(chan, handle, obj); | ||
208 | nouveau_gpuobj_ref(NULL, &obj); | ||
204 | return ret; | 209 | return ret; |
205 | } | 210 | } |
206 | 211 | ||
207 | void | 212 | static void |
208 | nv40_graph_set_tile_region(struct drm_device *dev, int i) | 213 | nv40_graph_set_tile_region(struct drm_device *dev, int i) |
209 | { | 214 | { |
210 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 215 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
@@ -257,14 +262,14 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
257 | * C51 0x4e | 262 | * C51 0x4e |
258 | */ | 263 | */ |
259 | int | 264 | int |
260 | nv40_graph_init(struct drm_device *dev) | 265 | nv40_graph_init(struct drm_device *dev, int engine) |
261 | { | 266 | { |
262 | struct drm_nouveau_private *dev_priv = | 267 | struct nv40_graph_engine *pgraph = nv_engine(dev, engine); |
263 | (struct drm_nouveau_private *)dev->dev_private; | 268 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
264 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 269 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
265 | struct nouveau_grctx ctx = {}; | 270 | struct nouveau_grctx ctx = {}; |
266 | uint32_t vramsz, *cp; | 271 | uint32_t vramsz, *cp; |
267 | int ret, i, j; | 272 | int i, j; |
268 | 273 | ||
269 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 274 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
270 | ~NV_PMC_ENABLE_PGRAPH); | 275 | ~NV_PMC_ENABLE_PGRAPH); |
@@ -280,7 +285,7 @@ nv40_graph_init(struct drm_device *dev) | |||
280 | ctx.data = cp; | 285 | ctx.data = cp; |
281 | ctx.ctxprog_max = 256; | 286 | ctx.ctxprog_max = 256; |
282 | nv40_grctx_init(&ctx); | 287 | nv40_grctx_init(&ctx); |
283 | dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; | 288 | pgraph->grctx_size = ctx.ctxvals_pos * 4; |
284 | 289 | ||
285 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); | 290 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); |
286 | for (i = 0; i < ctx.ctxprog_len; i++) | 291 | for (i = 0; i < ctx.ctxprog_len; i++) |
@@ -288,14 +293,9 @@ nv40_graph_init(struct drm_device *dev) | |||
288 | 293 | ||
289 | kfree(cp); | 294 | kfree(cp); |
290 | 295 | ||
291 | ret = nv40_graph_register(dev); | ||
292 | if (ret) | ||
293 | return ret; | ||
294 | |||
295 | /* No context present currently */ | 296 | /* No context present currently */ |
296 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); | 297 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); |
297 | 298 | ||
298 | nouveau_irq_register(dev, 12, nv40_graph_isr); | ||
299 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | 299 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); |
300 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); | 300 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); |
301 | 301 | ||
@@ -428,47 +428,10 @@ nv40_graph_init(struct drm_device *dev) | |||
428 | return 0; | 428 | return 0; |
429 | } | 429 | } |
430 | 430 | ||
431 | void nv40_graph_takedown(struct drm_device *dev) | ||
432 | { | ||
433 | nouveau_irq_unregister(dev, 12); | ||
434 | } | ||
435 | |||
436 | static int | 431 | static int |
437 | nv40_graph_register(struct drm_device *dev) | 432 | nv40_graph_fini(struct drm_device *dev, int engine) |
438 | { | 433 | { |
439 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 434 | nv40_graph_unload_context(dev); |
440 | |||
441 | if (dev_priv->engine.graph.registered) | ||
442 | return 0; | ||
443 | |||
444 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
445 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
446 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
447 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
448 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
449 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
450 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
451 | NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ | ||
452 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
453 | NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ | ||
454 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
455 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
456 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
457 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
458 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
459 | NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ | ||
460 | |||
461 | /* curie */ | ||
462 | if (nv44_graph_class(dev)) | ||
463 | NVOBJ_CLASS(dev, 0x4497, GR); | ||
464 | else | ||
465 | NVOBJ_CLASS(dev, 0x4097, GR); | ||
466 | |||
467 | /* nvsw */ | ||
468 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
469 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
470 | |||
471 | dev_priv->engine.graph.registered = true; | ||
472 | return 0; | 435 | return 0; |
473 | } | 436 | } |
474 | 437 | ||
@@ -476,17 +439,17 @@ static int | |||
476 | nv40_graph_isr_chid(struct drm_device *dev, u32 inst) | 439 | nv40_graph_isr_chid(struct drm_device *dev, u32 inst) |
477 | { | 440 | { |
478 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 441 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
479 | struct nouveau_channel *chan; | 442 | struct nouveau_gpuobj *grctx; |
480 | unsigned long flags; | 443 | unsigned long flags; |
481 | int i; | 444 | int i; |
482 | 445 | ||
483 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 446 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
484 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 447 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
485 | chan = dev_priv->channels.ptr[i]; | 448 | if (!dev_priv->channels.ptr[i]) |
486 | if (!chan || !chan->ramin_grctx) | ||
487 | continue; | 449 | continue; |
450 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; | ||
488 | 451 | ||
489 | if (inst == chan->ramin_grctx->pinst) | 452 | if (grctx && grctx->pinst == inst) |
490 | break; | 453 | break; |
491 | } | 454 | } |
492 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | 455 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); |
@@ -537,3 +500,63 @@ nv40_graph_isr(struct drm_device *dev) | |||
537 | } | 500 | } |
538 | } | 501 | } |
539 | } | 502 | } |
503 | |||
504 | static void | ||
505 | nv40_graph_destroy(struct drm_device *dev, int engine) | ||
506 | { | ||
507 | struct nv40_graph_engine *pgraph = nv_engine(dev, engine); | ||
508 | |||
509 | nouveau_irq_unregister(dev, 12); | ||
510 | |||
511 | NVOBJ_ENGINE_DEL(dev, GR); | ||
512 | kfree(pgraph); | ||
513 | } | ||
514 | |||
515 | int | ||
516 | nv40_graph_create(struct drm_device *dev) | ||
517 | { | ||
518 | struct nv40_graph_engine *pgraph; | ||
519 | |||
520 | pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); | ||
521 | if (!pgraph) | ||
522 | return -ENOMEM; | ||
523 | |||
524 | pgraph->base.destroy = nv40_graph_destroy; | ||
525 | pgraph->base.init = nv40_graph_init; | ||
526 | pgraph->base.fini = nv40_graph_fini; | ||
527 | pgraph->base.context_new = nv40_graph_context_new; | ||
528 | pgraph->base.context_del = nv40_graph_context_del; | ||
529 | pgraph->base.object_new = nv40_graph_object_new; | ||
530 | pgraph->base.set_tile_region = nv40_graph_set_tile_region; | ||
531 | |||
532 | NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); | ||
533 | nouveau_irq_register(dev, 12, nv40_graph_isr); | ||
534 | |||
535 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
536 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
537 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
538 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
539 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
540 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
541 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
542 | NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ | ||
543 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
544 | NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ | ||
545 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
546 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
547 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
548 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
549 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
550 | NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ | ||
551 | |||
552 | /* curie */ | ||
553 | if (nv44_graph_class(dev)) | ||
554 | NVOBJ_CLASS(dev, 0x4497, GR); | ||
555 | else | ||
556 | NVOBJ_CLASS(dev, 0x4097, GR); | ||
557 | |||
558 | /* nvsw */ | ||
559 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
560 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
561 | return 0; | ||
562 | } | ||